Tiburoncin commited on
Commit
5cbf1e2
1 Parent(s): 4614f25

Upload 768 files

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. python310/LICENSE.txt +279 -0
  3. python310/abc.py +188 -0
  4. python310/aifc.py +947 -0
  5. python310/antigravity.py +17 -0
  6. python310/argparse.py +0 -0
  7. python310/ast.py +1709 -0
  8. python310/asynchat.py +315 -0
  9. python310/asyncio/__init__.py +43 -0
  10. python310/asyncio/__main__.py +125 -0
  11. python310/asyncio/__pycache__/__init__.cpython-310.pyc +0 -0
  12. python310/asyncio/__pycache__/__main__.cpython-310.pyc +0 -0
  13. python310/asyncio/__pycache__/base_events.cpython-310.pyc +0 -0
  14. python310/asyncio/__pycache__/base_futures.cpython-310.pyc +0 -0
  15. python310/asyncio/__pycache__/base_subprocess.cpython-310.pyc +0 -0
  16. python310/asyncio/__pycache__/base_tasks.cpython-310.pyc +0 -0
  17. python310/asyncio/__pycache__/constants.cpython-310.pyc +0 -0
  18. python310/asyncio/__pycache__/coroutines.cpython-310.pyc +0 -0
  19. python310/asyncio/__pycache__/events.cpython-310.pyc +0 -0
  20. python310/asyncio/__pycache__/exceptions.cpython-310.pyc +0 -0
  21. python310/asyncio/__pycache__/format_helpers.cpython-310.pyc +0 -0
  22. python310/asyncio/__pycache__/futures.cpython-310.pyc +0 -0
  23. python310/asyncio/__pycache__/locks.cpython-310.pyc +0 -0
  24. python310/asyncio/__pycache__/log.cpython-310.pyc +0 -0
  25. python310/asyncio/__pycache__/mixins.cpython-310.pyc +0 -0
  26. python310/asyncio/__pycache__/proactor_events.cpython-310.pyc +0 -0
  27. python310/asyncio/__pycache__/protocols.cpython-310.pyc +0 -0
  28. python310/asyncio/__pycache__/queues.cpython-310.pyc +0 -0
  29. python310/asyncio/__pycache__/runners.cpython-310.pyc +0 -0
  30. python310/asyncio/__pycache__/selector_events.cpython-310.pyc +0 -0
  31. python310/asyncio/__pycache__/sslproto.cpython-310.pyc +0 -0
  32. python310/asyncio/__pycache__/staggered.cpython-310.pyc +0 -0
  33. python310/asyncio/__pycache__/streams.cpython-310.pyc +0 -0
  34. python310/asyncio/__pycache__/subprocess.cpython-310.pyc +0 -0
  35. python310/asyncio/__pycache__/tasks.cpython-310.pyc +0 -0
  36. python310/asyncio/__pycache__/threads.cpython-310.pyc +0 -0
  37. python310/asyncio/__pycache__/transports.cpython-310.pyc +0 -0
  38. python310/asyncio/__pycache__/trsock.cpython-310.pyc +0 -0
  39. python310/asyncio/__pycache__/unix_events.cpython-310.pyc +0 -0
  40. python310/asyncio/__pycache__/windows_events.cpython-310.pyc +0 -0
  41. python310/asyncio/__pycache__/windows_utils.cpython-310.pyc +0 -0
  42. python310/asyncio/base_events.py +1934 -0
  43. python310/asyncio/base_futures.py +80 -0
  44. python310/asyncio/base_subprocess.py +285 -0
  45. python310/asyncio/base_tasks.py +85 -0
  46. python310/asyncio/constants.py +27 -0
  47. python310/asyncio/coroutines.py +269 -0
  48. python310/asyncio/events.py +819 -0
  49. python310/asyncio/exceptions.py +58 -0
  50. python310/asyncio/format_helpers.py +76 -0
.gitattributes CHANGED
@@ -33,3 +33,6 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ python310/config-3.10-x86_64-linux-gnu/libpython3.10-pic.a filter=lfs diff=lfs merge=lfs -text
37
+ python310/config-3.10-x86_64-linux-gnu/libpython3.10.a filter=lfs diff=lfs merge=lfs -text
38
+ python310/config-3.10-x86_64-linux-gnu/libpython3.10.so filter=lfs diff=lfs merge=lfs -text
python310/LICENSE.txt ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ A. HISTORY OF THE SOFTWARE
2
+ ==========================
3
+
4
+ Python was created in the early 1990s by Guido van Rossum at Stichting
5
+ Mathematisch Centrum (CWI, see https://www.cwi.nl) in the Netherlands
6
+ as a successor of a language called ABC. Guido remains Python's
7
+ principal author, although it includes many contributions from others.
8
+
9
+ In 1995, Guido continued his work on Python at the Corporation for
10
+ National Research Initiatives (CNRI, see https://www.cnri.reston.va.us)
11
+ in Reston, Virginia where he released several versions of the
12
+ software.
13
+
14
+ In May 2000, Guido and the Python core development team moved to
15
+ BeOpen.com to form the BeOpen PythonLabs team. In October of the same
16
+ year, the PythonLabs team moved to Digital Creations, which became
17
+ Zope Corporation. In 2001, the Python Software Foundation (PSF, see
18
+ https://www.python.org/psf/) was formed, a non-profit organization
19
+ created specifically to own Python-related Intellectual Property.
20
+ Zope Corporation was a sponsoring member of the PSF.
21
+
22
+ All Python releases are Open Source (see https://opensource.org for
23
+ the Open Source Definition). Historically, most, but not all, Python
24
+ releases have also been GPL-compatible; the table below summarizes
25
+ the various releases.
26
+
27
+ Release Derived Year Owner GPL-
28
+ from compatible? (1)
29
+
30
+ 0.9.0 thru 1.2 1991-1995 CWI yes
31
+ 1.3 thru 1.5.2 1.2 1995-1999 CNRI yes
32
+ 1.6 1.5.2 2000 CNRI no
33
+ 2.0 1.6 2000 BeOpen.com no
34
+ 1.6.1 1.6 2001 CNRI yes (2)
35
+ 2.1 2.0+1.6.1 2001 PSF no
36
+ 2.0.1 2.0+1.6.1 2001 PSF yes
37
+ 2.1.1 2.1+2.0.1 2001 PSF yes
38
+ 2.1.2 2.1.1 2002 PSF yes
39
+ 2.1.3 2.1.2 2002 PSF yes
40
+ 2.2 and above 2.1.1 2001-now PSF yes
41
+
42
+ Footnotes:
43
+
44
+ (1) GPL-compatible doesn't mean that we're distributing Python under
45
+ the GPL. All Python licenses, unlike the GPL, let you distribute
46
+ a modified version without making your changes open source. The
47
+ GPL-compatible licenses make it possible to combine Python with
48
+ other software that is released under the GPL; the others don't.
49
+
50
+ (2) According to Richard Stallman, 1.6.1 is not GPL-compatible,
51
+ because its license has a choice of law clause. According to
52
+ CNRI, however, Stallman's lawyer has told CNRI's lawyer that 1.6.1
53
+ is "not incompatible" with the GPL.
54
+
55
+ Thanks to the many outside volunteers who have worked under Guido's
56
+ direction to make these releases possible.
57
+
58
+
59
+ B. TERMS AND CONDITIONS FOR ACCESSING OR OTHERWISE USING PYTHON
60
+ ===============================================================
61
+
62
+ Python software and documentation are licensed under the
63
+ Python Software Foundation License Version 2.
64
+
65
+ Starting with Python 3.8.6, examples, recipes, and other code in
66
+ the documentation are dual licensed under the PSF License Version 2
67
+ and the Zero-Clause BSD license.
68
+
69
+ Some software incorporated into Python is under different licenses.
70
+ The licenses are listed with code falling under that license.
71
+
72
+
73
+ PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
74
+ --------------------------------------------
75
+
76
+ 1. This LICENSE AGREEMENT is between the Python Software Foundation
77
+ ("PSF"), and the Individual or Organization ("Licensee") accessing and
78
+ otherwise using this software ("Python") in source or binary form and
79
+ its associated documentation.
80
+
81
+ 2. Subject to the terms and conditions of this License Agreement, PSF hereby
82
+ grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
83
+ analyze, test, perform and/or display publicly, prepare derivative works,
84
+ distribute, and otherwise use Python alone or in any derivative version,
85
+ provided, however, that PSF's License Agreement and PSF's notice of copyright,
86
+ i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
87
+ 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022, 2023 Python Software Foundation;
88
+ All Rights Reserved" are retained in Python alone or in any derivative version
89
+ prepared by Licensee.
90
+
91
+ 3. In the event Licensee prepares a derivative work that is based on
92
+ or incorporates Python or any part thereof, and wants to make
93
+ the derivative work available to others as provided herein, then
94
+ Licensee hereby agrees to include in any such work a brief summary of
95
+ the changes made to Python.
96
+
97
+ 4. PSF is making Python available to Licensee on an "AS IS"
98
+ basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
99
+ IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
100
+ DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
101
+ FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
102
+ INFRINGE ANY THIRD PARTY RIGHTS.
103
+
104
+ 5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
105
+ FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
106
+ A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
107
+ OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
108
+
109
+ 6. This License Agreement will automatically terminate upon a material
110
+ breach of its terms and conditions.
111
+
112
+ 7. Nothing in this License Agreement shall be deemed to create any
113
+ relationship of agency, partnership, or joint venture between PSF and
114
+ Licensee. This License Agreement does not grant permission to use PSF
115
+ trademarks or trade name in a trademark sense to endorse or promote
116
+ products or services of Licensee, or any third party.
117
+
118
+ 8. By copying, installing or otherwise using Python, Licensee
119
+ agrees to be bound by the terms and conditions of this License
120
+ Agreement.
121
+
122
+
123
+ BEOPEN.COM LICENSE AGREEMENT FOR PYTHON 2.0
124
+ -------------------------------------------
125
+
126
+ BEOPEN PYTHON OPEN SOURCE LICENSE AGREEMENT VERSION 1
127
+
128
+ 1. This LICENSE AGREEMENT is between BeOpen.com ("BeOpen"), having an
129
+ office at 160 Saratoga Avenue, Santa Clara, CA 95051, and the
130
+ Individual or Organization ("Licensee") accessing and otherwise using
131
+ this software in source or binary form and its associated
132
+ documentation ("the Software").
133
+
134
+ 2. Subject to the terms and conditions of this BeOpen Python License
135
+ Agreement, BeOpen hereby grants Licensee a non-exclusive,
136
+ royalty-free, world-wide license to reproduce, analyze, test, perform
137
+ and/or display publicly, prepare derivative works, distribute, and
138
+ otherwise use the Software alone or in any derivative version,
139
+ provided, however, that the BeOpen Python License is retained in the
140
+ Software, alone or in any derivative version prepared by Licensee.
141
+
142
+ 3. BeOpen is making the Software available to Licensee on an "AS IS"
143
+ basis. BEOPEN MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
144
+ IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, BEOPEN MAKES NO AND
145
+ DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
146
+ FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF THE SOFTWARE WILL NOT
147
+ INFRINGE ANY THIRD PARTY RIGHTS.
148
+
149
+ 4. BEOPEN SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF THE
150
+ SOFTWARE FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS
151
+ AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THE SOFTWARE, OR ANY
152
+ DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
153
+
154
+ 5. This License Agreement will automatically terminate upon a material
155
+ breach of its terms and conditions.
156
+
157
+ 6. This License Agreement shall be governed by and interpreted in all
158
+ respects by the law of the State of California, excluding conflict of
159
+ law provisions. Nothing in this License Agreement shall be deemed to
160
+ create any relationship of agency, partnership, or joint venture
161
+ between BeOpen and Licensee. This License Agreement does not grant
162
+ permission to use BeOpen trademarks or trade names in a trademark
163
+ sense to endorse or promote products or services of Licensee, or any
164
+ third party. As an exception, the "BeOpen Python" logos available at
165
+ http://www.pythonlabs.com/logos.html may be used according to the
166
+ permissions granted on that web page.
167
+
168
+ 7. By copying, installing or otherwise using the software, Licensee
169
+ agrees to be bound by the terms and conditions of this License
170
+ Agreement.
171
+
172
+
173
+ CNRI LICENSE AGREEMENT FOR PYTHON 1.6.1
174
+ ---------------------------------------
175
+
176
+ 1. This LICENSE AGREEMENT is between the Corporation for National
177
+ Research Initiatives, having an office at 1895 Preston White Drive,
178
+ Reston, VA 20191 ("CNRI"), and the Individual or Organization
179
+ ("Licensee") accessing and otherwise using Python 1.6.1 software in
180
+ source or binary form and its associated documentation.
181
+
182
+ 2. Subject to the terms and conditions of this License Agreement, CNRI
183
+ hereby grants Licensee a nonexclusive, royalty-free, world-wide
184
+ license to reproduce, analyze, test, perform and/or display publicly,
185
+ prepare derivative works, distribute, and otherwise use Python 1.6.1
186
+ alone or in any derivative version, provided, however, that CNRI's
187
+ License Agreement and CNRI's notice of copyright, i.e., "Copyright (c)
188
+ 1995-2001 Corporation for National Research Initiatives; All Rights
189
+ Reserved" are retained in Python 1.6.1 alone or in any derivative
190
+ version prepared by Licensee. Alternately, in lieu of CNRI's License
191
+ Agreement, Licensee may substitute the following text (omitting the
192
+ quotes): "Python 1.6.1 is made available subject to the terms and
193
+ conditions in CNRI's License Agreement. This Agreement together with
194
+ Python 1.6.1 may be located on the internet using the following
195
+ unique, persistent identifier (known as a handle): 1895.22/1013. This
196
+ Agreement may also be obtained from a proxy server on the internet
197
+ using the following URL: http://hdl.handle.net/1895.22/1013".
198
+
199
+ 3. In the event Licensee prepares a derivative work that is based on
200
+ or incorporates Python 1.6.1 or any part thereof, and wants to make
201
+ the derivative work available to others as provided herein, then
202
+ Licensee hereby agrees to include in any such work a brief summary of
203
+ the changes made to Python 1.6.1.
204
+
205
+ 4. CNRI is making Python 1.6.1 available to Licensee on an "AS IS"
206
+ basis. CNRI MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
207
+ IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, CNRI MAKES NO AND
208
+ DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
209
+ FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON 1.6.1 WILL NOT
210
+ INFRINGE ANY THIRD PARTY RIGHTS.
211
+
212
+ 5. CNRI SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
213
+ 1.6.1 FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
214
+ A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 1.6.1,
215
+ OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
216
+
217
+ 6. This License Agreement will automatically terminate upon a material
218
+ breach of its terms and conditions.
219
+
220
+ 7. This License Agreement shall be governed by the federal
221
+ intellectual property law of the United States, including without
222
+ limitation the federal copyright law, and, to the extent such
223
+ U.S. federal law does not apply, by the law of the Commonwealth of
224
+ Virginia, excluding Virginia's conflict of law provisions.
225
+ Notwithstanding the foregoing, with regard to derivative works based
226
+ on Python 1.6.1 that incorporate non-separable material that was
227
+ previously distributed under the GNU General Public License (GPL), the
228
+ law of the Commonwealth of Virginia shall govern this License
229
+ Agreement only as to issues arising under or with respect to
230
+ Paragraphs 4, 5, and 7 of this License Agreement. Nothing in this
231
+ License Agreement shall be deemed to create any relationship of
232
+ agency, partnership, or joint venture between CNRI and Licensee. This
233
+ License Agreement does not grant permission to use CNRI trademarks or
234
+ trade name in a trademark sense to endorse or promote products or
235
+ services of Licensee, or any third party.
236
+
237
+ 8. By clicking on the "ACCEPT" button where indicated, or by copying,
238
+ installing or otherwise using Python 1.6.1, Licensee agrees to be
239
+ bound by the terms and conditions of this License Agreement.
240
+
241
+ ACCEPT
242
+
243
+
244
+ CWI LICENSE AGREEMENT FOR PYTHON 0.9.0 THROUGH 1.2
245
+ --------------------------------------------------
246
+
247
+ Copyright (c) 1991 - 1995, Stichting Mathematisch Centrum Amsterdam,
248
+ The Netherlands. All rights reserved.
249
+
250
+ Permission to use, copy, modify, and distribute this software and its
251
+ documentation for any purpose and without fee is hereby granted,
252
+ provided that the above copyright notice appear in all copies and that
253
+ both that copyright notice and this permission notice appear in
254
+ supporting documentation, and that the name of Stichting Mathematisch
255
+ Centrum or CWI not be used in advertising or publicity pertaining to
256
+ distribution of the software without specific, written prior
257
+ permission.
258
+
259
+ STICHTING MATHEMATISCH CENTRUM DISCLAIMS ALL WARRANTIES WITH REGARD TO
260
+ THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
261
+ FITNESS, IN NO EVENT SHALL STICHTING MATHEMATISCH CENTRUM BE LIABLE
262
+ FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
263
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
264
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
265
+ OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
266
+
267
+ ZERO-CLAUSE BSD LICENSE FOR CODE IN THE PYTHON DOCUMENTATION
268
+ ----------------------------------------------------------------------
269
+
270
+ Permission to use, copy, modify, and/or distribute this software for any
271
+ purpose with or without fee is hereby granted.
272
+
273
+ THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
274
+ REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
275
+ AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
276
+ INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
277
+ LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
278
+ OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
279
+ PERFORMANCE OF THIS SOFTWARE.
python310/abc.py ADDED
@@ -0,0 +1,188 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2007 Google, Inc. All Rights Reserved.
2
+ # Licensed to PSF under a Contributor Agreement.
3
+
4
+ """Abstract Base Classes (ABCs) according to PEP 3119."""
5
+
6
+
7
+ def abstractmethod(funcobj):
8
+ """A decorator indicating abstract methods.
9
+
10
+ Requires that the metaclass is ABCMeta or derived from it. A
11
+ class that has a metaclass derived from ABCMeta cannot be
12
+ instantiated unless all of its abstract methods are overridden.
13
+ The abstract methods can be called using any of the normal
14
+ 'super' call mechanisms. abstractmethod() may be used to declare
15
+ abstract methods for properties and descriptors.
16
+
17
+ Usage:
18
+
19
+ class C(metaclass=ABCMeta):
20
+ @abstractmethod
21
+ def my_abstract_method(self, ...):
22
+ ...
23
+ """
24
+ funcobj.__isabstractmethod__ = True
25
+ return funcobj
26
+
27
+
28
+ class abstractclassmethod(classmethod):
29
+ """A decorator indicating abstract classmethods.
30
+
31
+ Deprecated, use 'classmethod' with 'abstractmethod' instead:
32
+
33
+ class C(ABC):
34
+ @classmethod
35
+ @abstractmethod
36
+ def my_abstract_classmethod(cls, ...):
37
+ ...
38
+
39
+ """
40
+
41
+ __isabstractmethod__ = True
42
+
43
+ def __init__(self, callable):
44
+ callable.__isabstractmethod__ = True
45
+ super().__init__(callable)
46
+
47
+
48
+ class abstractstaticmethod(staticmethod):
49
+ """A decorator indicating abstract staticmethods.
50
+
51
+ Deprecated, use 'staticmethod' with 'abstractmethod' instead:
52
+
53
+ class C(ABC):
54
+ @staticmethod
55
+ @abstractmethod
56
+ def my_abstract_staticmethod(...):
57
+ ...
58
+
59
+ """
60
+
61
+ __isabstractmethod__ = True
62
+
63
+ def __init__(self, callable):
64
+ callable.__isabstractmethod__ = True
65
+ super().__init__(callable)
66
+
67
+
68
+ class abstractproperty(property):
69
+ """A decorator indicating abstract properties.
70
+
71
+ Deprecated, use 'property' with 'abstractmethod' instead:
72
+
73
+ class C(ABC):
74
+ @property
75
+ @abstractmethod
76
+ def my_abstract_property(self):
77
+ ...
78
+
79
+ """
80
+
81
+ __isabstractmethod__ = True
82
+
83
+
84
+ try:
85
+ from _abc import (get_cache_token, _abc_init, _abc_register,
86
+ _abc_instancecheck, _abc_subclasscheck, _get_dump,
87
+ _reset_registry, _reset_caches)
88
+ except ImportError:
89
+ from _py_abc import ABCMeta, get_cache_token
90
+ ABCMeta.__module__ = 'abc'
91
+ else:
92
+ class ABCMeta(type):
93
+ """Metaclass for defining Abstract Base Classes (ABCs).
94
+
95
+ Use this metaclass to create an ABC. An ABC can be subclassed
96
+ directly, and then acts as a mix-in class. You can also register
97
+ unrelated concrete classes (even built-in classes) and unrelated
98
+ ABCs as 'virtual subclasses' -- these and their descendants will
99
+ be considered subclasses of the registering ABC by the built-in
100
+ issubclass() function, but the registering ABC won't show up in
101
+ their MRO (Method Resolution Order) nor will method
102
+ implementations defined by the registering ABC be callable (not
103
+ even via super()).
104
+ """
105
+ def __new__(mcls, name, bases, namespace, **kwargs):
106
+ cls = super().__new__(mcls, name, bases, namespace, **kwargs)
107
+ _abc_init(cls)
108
+ return cls
109
+
110
+ def register(cls, subclass):
111
+ """Register a virtual subclass of an ABC.
112
+
113
+ Returns the subclass, to allow usage as a class decorator.
114
+ """
115
+ return _abc_register(cls, subclass)
116
+
117
+ def __instancecheck__(cls, instance):
118
+ """Override for isinstance(instance, cls)."""
119
+ return _abc_instancecheck(cls, instance)
120
+
121
+ def __subclasscheck__(cls, subclass):
122
+ """Override for issubclass(subclass, cls)."""
123
+ return _abc_subclasscheck(cls, subclass)
124
+
125
+ def _dump_registry(cls, file=None):
126
+ """Debug helper to print the ABC registry."""
127
+ print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file)
128
+ print(f"Inv. counter: {get_cache_token()}", file=file)
129
+ (_abc_registry, _abc_cache, _abc_negative_cache,
130
+ _abc_negative_cache_version) = _get_dump(cls)
131
+ print(f"_abc_registry: {_abc_registry!r}", file=file)
132
+ print(f"_abc_cache: {_abc_cache!r}", file=file)
133
+ print(f"_abc_negative_cache: {_abc_negative_cache!r}", file=file)
134
+ print(f"_abc_negative_cache_version: {_abc_negative_cache_version!r}",
135
+ file=file)
136
+
137
+ def _abc_registry_clear(cls):
138
+ """Clear the registry (for debugging or testing)."""
139
+ _reset_registry(cls)
140
+
141
+ def _abc_caches_clear(cls):
142
+ """Clear the caches (for debugging or testing)."""
143
+ _reset_caches(cls)
144
+
145
+
146
+ def update_abstractmethods(cls):
147
+ """Recalculate the set of abstract methods of an abstract class.
148
+
149
+ If a class has had one of its abstract methods implemented after the
150
+ class was created, the method will not be considered implemented until
151
+ this function is called. Alternatively, if a new abstract method has been
152
+ added to the class, it will only be considered an abstract method of the
153
+ class after this function is called.
154
+
155
+ This function should be called before any use is made of the class,
156
+ usually in class decorators that add methods to the subject class.
157
+
158
+ Returns cls, to allow usage as a class decorator.
159
+
160
+ If cls is not an instance of ABCMeta, does nothing.
161
+ """
162
+ if not hasattr(cls, '__abstractmethods__'):
163
+ # We check for __abstractmethods__ here because cls might by a C
164
+ # implementation or a python implementation (especially during
165
+ # testing), and we want to handle both cases.
166
+ return cls
167
+
168
+ abstracts = set()
169
+ # Check the existing abstract methods of the parents, keep only the ones
170
+ # that are not implemented.
171
+ for scls in cls.__bases__:
172
+ for name in getattr(scls, '__abstractmethods__', ()):
173
+ value = getattr(cls, name, None)
174
+ if getattr(value, "__isabstractmethod__", False):
175
+ abstracts.add(name)
176
+ # Also add any other newly added abstract methods.
177
+ for name, value in cls.__dict__.items():
178
+ if getattr(value, "__isabstractmethod__", False):
179
+ abstracts.add(name)
180
+ cls.__abstractmethods__ = frozenset(abstracts)
181
+ return cls
182
+
183
+
184
+ class ABC(metaclass=ABCMeta):
185
+ """Helper class that provides a standard way to create an ABC using
186
+ inheritance.
187
+ """
188
+ __slots__ = ()
python310/aifc.py ADDED
@@ -0,0 +1,947 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Stuff to parse AIFF-C and AIFF files.
2
+
3
+ Unless explicitly stated otherwise, the description below is true
4
+ both for AIFF-C files and AIFF files.
5
+
6
+ An AIFF-C file has the following structure.
7
+
8
+ +-----------------+
9
+ | FORM |
10
+ +-----------------+
11
+ | <size> |
12
+ +----+------------+
13
+ | | AIFC |
14
+ | +------------+
15
+ | | <chunks> |
16
+ | | . |
17
+ | | . |
18
+ | | . |
19
+ +----+------------+
20
+
21
+ An AIFF file has the string "AIFF" instead of "AIFC".
22
+
23
+ A chunk consists of an identifier (4 bytes) followed by a size (4 bytes,
24
+ big endian order), followed by the data. The size field does not include
25
+ the size of the 8 byte header.
26
+
27
+ The following chunk types are recognized.
28
+
29
+ FVER
30
+ <version number of AIFF-C defining document> (AIFF-C only).
31
+ MARK
32
+ <# of markers> (2 bytes)
33
+ list of markers:
34
+ <marker ID> (2 bytes, must be > 0)
35
+ <position> (4 bytes)
36
+ <marker name> ("pstring")
37
+ COMM
38
+ <# of channels> (2 bytes)
39
+ <# of sound frames> (4 bytes)
40
+ <size of the samples> (2 bytes)
41
+ <sampling frequency> (10 bytes, IEEE 80-bit extended
42
+ floating point)
43
+ in AIFF-C files only:
44
+ <compression type> (4 bytes)
45
+ <human-readable version of compression type> ("pstring")
46
+ SSND
47
+ <offset> (4 bytes, not used by this program)
48
+ <blocksize> (4 bytes, not used by this program)
49
+ <sound data>
50
+
51
+ A pstring consists of 1 byte length, a string of characters, and 0 or 1
52
+ byte pad to make the total length even.
53
+
54
+ Usage.
55
+
56
+ Reading AIFF files:
57
+ f = aifc.open(file, 'r')
58
+ where file is either the name of a file or an open file pointer.
59
+ The open file pointer must have methods read(), seek(), and close().
60
+ In some types of audio files, if the setpos() method is not used,
61
+ the seek() method is not necessary.
62
+
63
+ This returns an instance of a class with the following public methods:
64
+ getnchannels() -- returns number of audio channels (1 for
65
+ mono, 2 for stereo)
66
+ getsampwidth() -- returns sample width in bytes
67
+ getframerate() -- returns sampling frequency
68
+ getnframes() -- returns number of audio frames
69
+ getcomptype() -- returns compression type ('NONE' for AIFF files)
70
+ getcompname() -- returns human-readable version of
71
+ compression type ('not compressed' for AIFF files)
72
+ getparams() -- returns a namedtuple consisting of all of the
73
+ above in the above order
74
+ getmarkers() -- get the list of marks in the audio file or None
75
+ if there are no marks
76
+ getmark(id) -- get mark with the specified id (raises an error
77
+ if the mark does not exist)
78
+ readframes(n) -- returns at most n frames of audio
79
+ rewind() -- rewind to the beginning of the audio stream
80
+ setpos(pos) -- seek to the specified position
81
+ tell() -- return the current position
82
+ close() -- close the instance (make it unusable)
83
+ The position returned by tell(), the position given to setpos() and
84
+ the position of marks are all compatible and have nothing to do with
85
+ the actual position in the file.
86
+ The close() method is called automatically when the class instance
87
+ is destroyed.
88
+
89
+ Writing AIFF files:
90
+ f = aifc.open(file, 'w')
91
+ where file is either the name of a file or an open file pointer.
92
+ The open file pointer must have methods write(), tell(), seek(), and
93
+ close().
94
+
95
+ This returns an instance of a class with the following public methods:
96
+ aiff() -- create an AIFF file (AIFF-C default)
97
+ aifc() -- create an AIFF-C file
98
+ setnchannels(n) -- set the number of channels
99
+ setsampwidth(n) -- set the sample width
100
+ setframerate(n) -- set the frame rate
101
+ setnframes(n) -- set the number of frames
102
+ setcomptype(type, name)
103
+ -- set the compression type and the
104
+ human-readable compression type
105
+ setparams(tuple)
106
+ -- set all parameters at once
107
+ setmark(id, pos, name)
108
+ -- add specified mark to the list of marks
109
+ tell() -- return current position in output file (useful
110
+ in combination with setmark())
111
+ writeframesraw(data)
112
+ -- write audio frames without pathing up the
113
+ file header
114
+ writeframes(data)
115
+ -- write audio frames and patch up the file header
116
+ close() -- patch up the file header and close the
117
+ output file
118
+ You should set the parameters before the first writeframesraw or
119
+ writeframes. The total number of frames does not need to be set,
120
+ but when it is set to the correct value, the header does not have to
121
+ be patched up.
122
+ It is best to first set all parameters, perhaps possibly the
123
+ compression type, and then write audio frames using writeframesraw.
124
+ When all frames have been written, either call writeframes(b'') or
125
+ close() to patch up the sizes in the header.
126
+ Marks can be added anytime. If there are any marks, you must call
127
+ close() after all frames have been written.
128
+ The close() method is called automatically when the class instance
129
+ is destroyed.
130
+
131
+ When a file is opened with the extension '.aiff', an AIFF file is
132
+ written, otherwise an AIFF-C file is written. This default can be
133
+ changed by calling aiff() or aifc() before the first writeframes or
134
+ writeframesraw.
135
+ """
136
+
137
+ import struct
138
+ import builtins
139
+ import warnings
140
+
141
+ __all__ = ["Error", "open"]
142
+
143
+ class Error(Exception):
144
+ pass
145
+
146
+ _AIFC_version = 0xA2805140 # Version 1 of AIFF-C
147
+
148
+ def _read_long(file):
149
+ try:
150
+ return struct.unpack('>l', file.read(4))[0]
151
+ except struct.error:
152
+ raise EOFError from None
153
+
154
+ def _read_ulong(file):
155
+ try:
156
+ return struct.unpack('>L', file.read(4))[0]
157
+ except struct.error:
158
+ raise EOFError from None
159
+
160
+ def _read_short(file):
161
+ try:
162
+ return struct.unpack('>h', file.read(2))[0]
163
+ except struct.error:
164
+ raise EOFError from None
165
+
166
+ def _read_ushort(file):
167
+ try:
168
+ return struct.unpack('>H', file.read(2))[0]
169
+ except struct.error:
170
+ raise EOFError from None
171
+
172
+ def _read_string(file):
173
+ length = ord(file.read(1))
174
+ if length == 0:
175
+ data = b''
176
+ else:
177
+ data = file.read(length)
178
+ if length & 1 == 0:
179
+ dummy = file.read(1)
180
+ return data
181
+
182
+ _HUGE_VAL = 1.79769313486231e+308 # See <limits.h>
183
+
184
+ def _read_float(f): # 10 bytes
185
+ expon = _read_short(f) # 2 bytes
186
+ sign = 1
187
+ if expon < 0:
188
+ sign = -1
189
+ expon = expon + 0x8000
190
+ himant = _read_ulong(f) # 4 bytes
191
+ lomant = _read_ulong(f) # 4 bytes
192
+ if expon == himant == lomant == 0:
193
+ f = 0.0
194
+ elif expon == 0x7FFF:
195
+ f = _HUGE_VAL
196
+ else:
197
+ expon = expon - 16383
198
+ f = (himant * 0x100000000 + lomant) * pow(2.0, expon - 63)
199
+ return sign * f
200
+
201
+ def _write_short(f, x):
202
+ f.write(struct.pack('>h', x))
203
+
204
+ def _write_ushort(f, x):
205
+ f.write(struct.pack('>H', x))
206
+
207
+ def _write_long(f, x):
208
+ f.write(struct.pack('>l', x))
209
+
210
+ def _write_ulong(f, x):
211
+ f.write(struct.pack('>L', x))
212
+
213
+ def _write_string(f, s):
214
+ if len(s) > 255:
215
+ raise ValueError("string exceeds maximum pstring length")
216
+ f.write(struct.pack('B', len(s)))
217
+ f.write(s)
218
+ if len(s) & 1 == 0:
219
+ f.write(b'\x00')
220
+
221
+ def _write_float(f, x):
222
+ import math
223
+ if x < 0:
224
+ sign = 0x8000
225
+ x = x * -1
226
+ else:
227
+ sign = 0
228
+ if x == 0:
229
+ expon = 0
230
+ himant = 0
231
+ lomant = 0
232
+ else:
233
+ fmant, expon = math.frexp(x)
234
+ if expon > 16384 or fmant >= 1 or fmant != fmant: # Infinity or NaN
235
+ expon = sign|0x7FFF
236
+ himant = 0
237
+ lomant = 0
238
+ else: # Finite
239
+ expon = expon + 16382
240
+ if expon < 0: # denormalized
241
+ fmant = math.ldexp(fmant, expon)
242
+ expon = 0
243
+ expon = expon | sign
244
+ fmant = math.ldexp(fmant, 32)
245
+ fsmant = math.floor(fmant)
246
+ himant = int(fsmant)
247
+ fmant = math.ldexp(fmant - fsmant, 32)
248
+ fsmant = math.floor(fmant)
249
+ lomant = int(fsmant)
250
+ _write_ushort(f, expon)
251
+ _write_ulong(f, himant)
252
+ _write_ulong(f, lomant)
253
+
254
+ from chunk import Chunk
255
+ from collections import namedtuple
256
+
257
+ _aifc_params = namedtuple('_aifc_params',
258
+ 'nchannels sampwidth framerate nframes comptype compname')
259
+
260
+ _aifc_params.nchannels.__doc__ = 'Number of audio channels (1 for mono, 2 for stereo)'
261
+ _aifc_params.sampwidth.__doc__ = 'Sample width in bytes'
262
+ _aifc_params.framerate.__doc__ = 'Sampling frequency'
263
+ _aifc_params.nframes.__doc__ = 'Number of audio frames'
264
+ _aifc_params.comptype.__doc__ = 'Compression type ("NONE" for AIFF files)'
265
+ _aifc_params.compname.__doc__ = ("""\
266
+ A human-readable version of the compression type
267
+ ('not compressed' for AIFF files)""")
268
+
269
+
270
+ class Aifc_read:
271
+ # Variables used in this class:
272
+ #
273
+ # These variables are available to the user though appropriate
274
+ # methods of this class:
275
+ # _file -- the open file with methods read(), close(), and seek()
276
+ # set through the __init__() method
277
+ # _nchannels -- the number of audio channels
278
+ # available through the getnchannels() method
279
+ # _nframes -- the number of audio frames
280
+ # available through the getnframes() method
281
+ # _sampwidth -- the number of bytes per audio sample
282
+ # available through the getsampwidth() method
283
+ # _framerate -- the sampling frequency
284
+ # available through the getframerate() method
285
+ # _comptype -- the AIFF-C compression type ('NONE' if AIFF)
286
+ # available through the getcomptype() method
287
+ # _compname -- the human-readable AIFF-C compression type
288
+ # available through the getcomptype() method
289
+ # _markers -- the marks in the audio file
290
+ # available through the getmarkers() and getmark()
291
+ # methods
292
+ # _soundpos -- the position in the audio stream
293
+ # available through the tell() method, set through the
294
+ # setpos() method
295
+ #
296
+ # These variables are used internally only:
297
+ # _version -- the AIFF-C version number
298
+ # _decomp -- the decompressor from builtin module cl
299
+ # _comm_chunk_read -- 1 iff the COMM chunk has been read
300
+ # _aifc -- 1 iff reading an AIFF-C file
301
+ # _ssnd_seek_needed -- 1 iff positioned correctly in audio
302
+ # file for readframes()
303
+ # _ssnd_chunk -- instantiation of a chunk class for the SSND chunk
304
+ # _framesize -- size of one frame in the file
305
+
306
+ _file = None # Set here since __del__ checks it
307
+
308
+ def initfp(self, file):
309
+ self._version = 0
310
+ self._convert = None
311
+ self._markers = []
312
+ self._soundpos = 0
313
+ self._file = file
314
+ chunk = Chunk(file)
315
+ if chunk.getname() != b'FORM':
316
+ raise Error('file does not start with FORM id')
317
+ formdata = chunk.read(4)
318
+ if formdata == b'AIFF':
319
+ self._aifc = 0
320
+ elif formdata == b'AIFC':
321
+ self._aifc = 1
322
+ else:
323
+ raise Error('not an AIFF or AIFF-C file')
324
+ self._comm_chunk_read = 0
325
+ self._ssnd_chunk = None
326
+ while 1:
327
+ self._ssnd_seek_needed = 1
328
+ try:
329
+ chunk = Chunk(self._file)
330
+ except EOFError:
331
+ break
332
+ chunkname = chunk.getname()
333
+ if chunkname == b'COMM':
334
+ self._read_comm_chunk(chunk)
335
+ self._comm_chunk_read = 1
336
+ elif chunkname == b'SSND':
337
+ self._ssnd_chunk = chunk
338
+ dummy = chunk.read(8)
339
+ self._ssnd_seek_needed = 0
340
+ elif chunkname == b'FVER':
341
+ self._version = _read_ulong(chunk)
342
+ elif chunkname == b'MARK':
343
+ self._readmark(chunk)
344
+ chunk.skip()
345
+ if not self._comm_chunk_read or not self._ssnd_chunk:
346
+ raise Error('COMM chunk and/or SSND chunk missing')
347
+
348
+ def __init__(self, f):
349
+ if isinstance(f, str):
350
+ file_object = builtins.open(f, 'rb')
351
+ try:
352
+ self.initfp(file_object)
353
+ except:
354
+ file_object.close()
355
+ raise
356
+ else:
357
+ # assume it is an open file object already
358
+ self.initfp(f)
359
+
360
+ def __enter__(self):
361
+ return self
362
+
363
+ def __exit__(self, *args):
364
+ self.close()
365
+
366
+ #
367
+ # User visible methods.
368
+ #
369
+ def getfp(self):
370
+ return self._file
371
+
372
+ def rewind(self):
373
+ self._ssnd_seek_needed = 1
374
+ self._soundpos = 0
375
+
376
+ def close(self):
377
+ file = self._file
378
+ if file is not None:
379
+ self._file = None
380
+ file.close()
381
+
382
+ def tell(self):
383
+ return self._soundpos
384
+
385
+ def getnchannels(self):
386
+ return self._nchannels
387
+
388
+ def getnframes(self):
389
+ return self._nframes
390
+
391
+ def getsampwidth(self):
392
+ return self._sampwidth
393
+
394
+ def getframerate(self):
395
+ return self._framerate
396
+
397
+ def getcomptype(self):
398
+ return self._comptype
399
+
400
+ def getcompname(self):
401
+ return self._compname
402
+
403
+ ## def getversion(self):
404
+ ## return self._version
405
+
406
+ def getparams(self):
407
+ return _aifc_params(self.getnchannels(), self.getsampwidth(),
408
+ self.getframerate(), self.getnframes(),
409
+ self.getcomptype(), self.getcompname())
410
+
411
+ def getmarkers(self):
412
+ if len(self._markers) == 0:
413
+ return None
414
+ return self._markers
415
+
416
+ def getmark(self, id):
417
+ for marker in self._markers:
418
+ if id == marker[0]:
419
+ return marker
420
+ raise Error('marker {0!r} does not exist'.format(id))
421
+
422
+ def setpos(self, pos):
423
+ if pos < 0 or pos > self._nframes:
424
+ raise Error('position not in range')
425
+ self._soundpos = pos
426
+ self._ssnd_seek_needed = 1
427
+
428
+ def readframes(self, nframes):
429
+ if self._ssnd_seek_needed:
430
+ self._ssnd_chunk.seek(0)
431
+ dummy = self._ssnd_chunk.read(8)
432
+ pos = self._soundpos * self._framesize
433
+ if pos:
434
+ self._ssnd_chunk.seek(pos + 8)
435
+ self._ssnd_seek_needed = 0
436
+ if nframes == 0:
437
+ return b''
438
+ data = self._ssnd_chunk.read(nframes * self._framesize)
439
+ if self._convert and data:
440
+ data = self._convert(data)
441
+ self._soundpos = self._soundpos + len(data) // (self._nchannels
442
+ * self._sampwidth)
443
+ return data
444
+
445
+ #
446
+ # Internal methods.
447
+ #
448
+
449
+ def _alaw2lin(self, data):
450
+ import audioop
451
+ return audioop.alaw2lin(data, 2)
452
+
453
+ def _ulaw2lin(self, data):
454
+ import audioop
455
+ return audioop.ulaw2lin(data, 2)
456
+
457
+ def _adpcm2lin(self, data):
458
+ import audioop
459
+ if not hasattr(self, '_adpcmstate'):
460
+ # first time
461
+ self._adpcmstate = None
462
+ data, self._adpcmstate = audioop.adpcm2lin(data, 2, self._adpcmstate)
463
+ return data
464
+
465
+ def _read_comm_chunk(self, chunk):
466
+ self._nchannels = _read_short(chunk)
467
+ self._nframes = _read_long(chunk)
468
+ self._sampwidth = (_read_short(chunk) + 7) // 8
469
+ self._framerate = int(_read_float(chunk))
470
+ if self._sampwidth <= 0:
471
+ raise Error('bad sample width')
472
+ if self._nchannels <= 0:
473
+ raise Error('bad # of channels')
474
+ self._framesize = self._nchannels * self._sampwidth
475
+ if self._aifc:
476
+ #DEBUG: SGI's soundeditor produces a bad size :-(
477
+ kludge = 0
478
+ if chunk.chunksize == 18:
479
+ kludge = 1
480
+ warnings.warn('Warning: bad COMM chunk size')
481
+ chunk.chunksize = 23
482
+ #DEBUG end
483
+ self._comptype = chunk.read(4)
484
+ #DEBUG start
485
+ if kludge:
486
+ length = ord(chunk.file.read(1))
487
+ if length & 1 == 0:
488
+ length = length + 1
489
+ chunk.chunksize = chunk.chunksize + length
490
+ chunk.file.seek(-1, 1)
491
+ #DEBUG end
492
+ self._compname = _read_string(chunk)
493
+ if self._comptype != b'NONE':
494
+ if self._comptype == b'G722':
495
+ self._convert = self._adpcm2lin
496
+ elif self._comptype in (b'ulaw', b'ULAW'):
497
+ self._convert = self._ulaw2lin
498
+ elif self._comptype in (b'alaw', b'ALAW'):
499
+ self._convert = self._alaw2lin
500
+ else:
501
+ raise Error('unsupported compression type')
502
+ self._sampwidth = 2
503
+ else:
504
+ self._comptype = b'NONE'
505
+ self._compname = b'not compressed'
506
+
507
+ def _readmark(self, chunk):
508
+ nmarkers = _read_short(chunk)
509
+ # Some files appear to contain invalid counts.
510
+ # Cope with this by testing for EOF.
511
+ try:
512
+ for i in range(nmarkers):
513
+ id = _read_short(chunk)
514
+ pos = _read_long(chunk)
515
+ name = _read_string(chunk)
516
+ if pos or name:
517
+ # some files appear to have
518
+ # dummy markers consisting of
519
+ # a position 0 and name ''
520
+ self._markers.append((id, pos, name))
521
+ except EOFError:
522
+ w = ('Warning: MARK chunk contains only %s marker%s instead of %s' %
523
+ (len(self._markers), '' if len(self._markers) == 1 else 's',
524
+ nmarkers))
525
+ warnings.warn(w)
526
+
527
+ class Aifc_write:
528
+ # Variables used in this class:
529
+ #
530
+ # These variables are user settable through appropriate methods
531
+ # of this class:
532
+ # _file -- the open file with methods write(), close(), tell(), seek()
533
+ # set through the __init__() method
534
+ # _comptype -- the AIFF-C compression type ('NONE' in AIFF)
535
+ # set through the setcomptype() or setparams() method
536
+ # _compname -- the human-readable AIFF-C compression type
537
+ # set through the setcomptype() or setparams() method
538
+ # _nchannels -- the number of audio channels
539
+ # set through the setnchannels() or setparams() method
540
+ # _sampwidth -- the number of bytes per audio sample
541
+ # set through the setsampwidth() or setparams() method
542
+ # _framerate -- the sampling frequency
543
+ # set through the setframerate() or setparams() method
544
+ # _nframes -- the number of audio frames written to the header
545
+ # set through the setnframes() or setparams() method
546
+ # _aifc -- whether we're writing an AIFF-C file or an AIFF file
547
+ # set through the aifc() method, reset through the
548
+ # aiff() method
549
+ #
550
+ # These variables are used internally only:
551
+ # _version -- the AIFF-C version number
552
+ # _comp -- the compressor from builtin module cl
553
+ # _nframeswritten -- the number of audio frames actually written
554
+ # _datalength -- the size of the audio samples written to the header
555
+ # _datawritten -- the size of the audio samples actually written
556
+
557
+ _file = None # Set here since __del__ checks it
558
+
559
+ def __init__(self, f):
560
+ if isinstance(f, str):
561
+ file_object = builtins.open(f, 'wb')
562
+ try:
563
+ self.initfp(file_object)
564
+ except:
565
+ file_object.close()
566
+ raise
567
+
568
+ # treat .aiff file extensions as non-compressed audio
569
+ if f.endswith('.aiff'):
570
+ self._aifc = 0
571
+ else:
572
+ # assume it is an open file object already
573
+ self.initfp(f)
574
+
575
+ def initfp(self, file):
576
+ self._file = file
577
+ self._version = _AIFC_version
578
+ self._comptype = b'NONE'
579
+ self._compname = b'not compressed'
580
+ self._convert = None
581
+ self._nchannels = 0
582
+ self._sampwidth = 0
583
+ self._framerate = 0
584
+ self._nframes = 0
585
+ self._nframeswritten = 0
586
+ self._datawritten = 0
587
+ self._datalength = 0
588
+ self._markers = []
589
+ self._marklength = 0
590
+ self._aifc = 1 # AIFF-C is default
591
+
592
+ def __del__(self):
593
+ self.close()
594
+
595
+ def __enter__(self):
596
+ return self
597
+
598
+ def __exit__(self, *args):
599
+ self.close()
600
+
601
+ #
602
+ # User visible methods.
603
+ #
604
+ def aiff(self):
605
+ if self._nframeswritten:
606
+ raise Error('cannot change parameters after starting to write')
607
+ self._aifc = 0
608
+
609
+ def aifc(self):
610
+ if self._nframeswritten:
611
+ raise Error('cannot change parameters after starting to write')
612
+ self._aifc = 1
613
+
614
+ def setnchannels(self, nchannels):
615
+ if self._nframeswritten:
616
+ raise Error('cannot change parameters after starting to write')
617
+ if nchannels < 1:
618
+ raise Error('bad # of channels')
619
+ self._nchannels = nchannels
620
+
621
+ def getnchannels(self):
622
+ if not self._nchannels:
623
+ raise Error('number of channels not set')
624
+ return self._nchannels
625
+
626
+ def setsampwidth(self, sampwidth):
627
+ if self._nframeswritten:
628
+ raise Error('cannot change parameters after starting to write')
629
+ if sampwidth < 1 or sampwidth > 4:
630
+ raise Error('bad sample width')
631
+ self._sampwidth = sampwidth
632
+
633
+ def getsampwidth(self):
634
+ if not self._sampwidth:
635
+ raise Error('sample width not set')
636
+ return self._sampwidth
637
+
638
+ def setframerate(self, framerate):
639
+ if self._nframeswritten:
640
+ raise Error('cannot change parameters after starting to write')
641
+ if framerate <= 0:
642
+ raise Error('bad frame rate')
643
+ self._framerate = framerate
644
+
645
+ def getframerate(self):
646
+ if not self._framerate:
647
+ raise Error('frame rate not set')
648
+ return self._framerate
649
+
650
+ def setnframes(self, nframes):
651
+ if self._nframeswritten:
652
+ raise Error('cannot change parameters after starting to write')
653
+ self._nframes = nframes
654
+
655
+ def getnframes(self):
656
+ return self._nframeswritten
657
+
658
+ def setcomptype(self, comptype, compname):
659
+ if self._nframeswritten:
660
+ raise Error('cannot change parameters after starting to write')
661
+ if comptype not in (b'NONE', b'ulaw', b'ULAW',
662
+ b'alaw', b'ALAW', b'G722'):
663
+ raise Error('unsupported compression type')
664
+ self._comptype = comptype
665
+ self._compname = compname
666
+
667
+ def getcomptype(self):
668
+ return self._comptype
669
+
670
+ def getcompname(self):
671
+ return self._compname
672
+
673
+ ## def setversion(self, version):
674
+ ## if self._nframeswritten:
675
+ ## raise Error, 'cannot change parameters after starting to write'
676
+ ## self._version = version
677
+
678
+ def setparams(self, params):
679
+ nchannels, sampwidth, framerate, nframes, comptype, compname = params
680
+ if self._nframeswritten:
681
+ raise Error('cannot change parameters after starting to write')
682
+ if comptype not in (b'NONE', b'ulaw', b'ULAW',
683
+ b'alaw', b'ALAW', b'G722'):
684
+ raise Error('unsupported compression type')
685
+ self.setnchannels(nchannels)
686
+ self.setsampwidth(sampwidth)
687
+ self.setframerate(framerate)
688
+ self.setnframes(nframes)
689
+ self.setcomptype(comptype, compname)
690
+
691
+ def getparams(self):
692
+ if not self._nchannels or not self._sampwidth or not self._framerate:
693
+ raise Error('not all parameters set')
694
+ return _aifc_params(self._nchannels, self._sampwidth, self._framerate,
695
+ self._nframes, self._comptype, self._compname)
696
+
697
+ def setmark(self, id, pos, name):
698
+ if id <= 0:
699
+ raise Error('marker ID must be > 0')
700
+ if pos < 0:
701
+ raise Error('marker position must be >= 0')
702
+ if not isinstance(name, bytes):
703
+ raise Error('marker name must be bytes')
704
+ for i in range(len(self._markers)):
705
+ if id == self._markers[i][0]:
706
+ self._markers[i] = id, pos, name
707
+ return
708
+ self._markers.append((id, pos, name))
709
+
710
+ def getmark(self, id):
711
+ for marker in self._markers:
712
+ if id == marker[0]:
713
+ return marker
714
+ raise Error('marker {0!r} does not exist'.format(id))
715
+
716
+ def getmarkers(self):
717
+ if len(self._markers) == 0:
718
+ return None
719
+ return self._markers
720
+
721
+ def tell(self):
722
+ return self._nframeswritten
723
+
724
+ def writeframesraw(self, data):
725
+ if not isinstance(data, (bytes, bytearray)):
726
+ data = memoryview(data).cast('B')
727
+ self._ensure_header_written(len(data))
728
+ nframes = len(data) // (self._sampwidth * self._nchannels)
729
+ if self._convert:
730
+ data = self._convert(data)
731
+ self._file.write(data)
732
+ self._nframeswritten = self._nframeswritten + nframes
733
+ self._datawritten = self._datawritten + len(data)
734
+
735
+ def writeframes(self, data):
736
+ self.writeframesraw(data)
737
+ if self._nframeswritten != self._nframes or \
738
+ self._datalength != self._datawritten:
739
+ self._patchheader()
740
+
741
+ def close(self):
742
+ if self._file is None:
743
+ return
744
+ try:
745
+ self._ensure_header_written(0)
746
+ if self._datawritten & 1:
747
+ # quick pad to even size
748
+ self._file.write(b'\x00')
749
+ self._datawritten = self._datawritten + 1
750
+ self._writemarkers()
751
+ if self._nframeswritten != self._nframes or \
752
+ self._datalength != self._datawritten or \
753
+ self._marklength:
754
+ self._patchheader()
755
+ finally:
756
+ # Prevent ref cycles
757
+ self._convert = None
758
+ f = self._file
759
+ self._file = None
760
+ f.close()
761
+
762
+ #
763
+ # Internal methods.
764
+ #
765
+
766
+ def _lin2alaw(self, data):
767
+ import audioop
768
+ return audioop.lin2alaw(data, 2)
769
+
770
+ def _lin2ulaw(self, data):
771
+ import audioop
772
+ return audioop.lin2ulaw(data, 2)
773
+
774
+ def _lin2adpcm(self, data):
775
+ import audioop
776
+ if not hasattr(self, '_adpcmstate'):
777
+ self._adpcmstate = None
778
+ data, self._adpcmstate = audioop.lin2adpcm(data, 2, self._adpcmstate)
779
+ return data
780
+
781
+ def _ensure_header_written(self, datasize):
782
+ if not self._nframeswritten:
783
+ if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'):
784
+ if not self._sampwidth:
785
+ self._sampwidth = 2
786
+ if self._sampwidth != 2:
787
+ raise Error('sample width must be 2 when compressing '
788
+ 'with ulaw/ULAW, alaw/ALAW or G7.22 (ADPCM)')
789
+ if not self._nchannels:
790
+ raise Error('# channels not specified')
791
+ if not self._sampwidth:
792
+ raise Error('sample width not specified')
793
+ if not self._framerate:
794
+ raise Error('sampling rate not specified')
795
+ self._write_header(datasize)
796
+
797
+ def _init_compression(self):
798
+ if self._comptype == b'G722':
799
+ self._convert = self._lin2adpcm
800
+ elif self._comptype in (b'ulaw', b'ULAW'):
801
+ self._convert = self._lin2ulaw
802
+ elif self._comptype in (b'alaw', b'ALAW'):
803
+ self._convert = self._lin2alaw
804
+
805
+ def _write_header(self, initlength):
806
+ if self._aifc and self._comptype != b'NONE':
807
+ self._init_compression()
808
+ self._file.write(b'FORM')
809
+ if not self._nframes:
810
+ self._nframes = initlength // (self._nchannels * self._sampwidth)
811
+ self._datalength = self._nframes * self._nchannels * self._sampwidth
812
+ if self._datalength & 1:
813
+ self._datalength = self._datalength + 1
814
+ if self._aifc:
815
+ if self._comptype in (b'ulaw', b'ULAW', b'alaw', b'ALAW'):
816
+ self._datalength = self._datalength // 2
817
+ if self._datalength & 1:
818
+ self._datalength = self._datalength + 1
819
+ elif self._comptype == b'G722':
820
+ self._datalength = (self._datalength + 3) // 4
821
+ if self._datalength & 1:
822
+ self._datalength = self._datalength + 1
823
+ try:
824
+ self._form_length_pos = self._file.tell()
825
+ except (AttributeError, OSError):
826
+ self._form_length_pos = None
827
+ commlength = self._write_form_length(self._datalength)
828
+ if self._aifc:
829
+ self._file.write(b'AIFC')
830
+ self._file.write(b'FVER')
831
+ _write_ulong(self._file, 4)
832
+ _write_ulong(self._file, self._version)
833
+ else:
834
+ self._file.write(b'AIFF')
835
+ self._file.write(b'COMM')
836
+ _write_ulong(self._file, commlength)
837
+ _write_short(self._file, self._nchannels)
838
+ if self._form_length_pos is not None:
839
+ self._nframes_pos = self._file.tell()
840
+ _write_ulong(self._file, self._nframes)
841
+ if self._comptype in (b'ULAW', b'ulaw', b'ALAW', b'alaw', b'G722'):
842
+ _write_short(self._file, 8)
843
+ else:
844
+ _write_short(self._file, self._sampwidth * 8)
845
+ _write_float(self._file, self._framerate)
846
+ if self._aifc:
847
+ self._file.write(self._comptype)
848
+ _write_string(self._file, self._compname)
849
+ self._file.write(b'SSND')
850
+ if self._form_length_pos is not None:
851
+ self._ssnd_length_pos = self._file.tell()
852
+ _write_ulong(self._file, self._datalength + 8)
853
+ _write_ulong(self._file, 0)
854
+ _write_ulong(self._file, 0)
855
+
856
+ def _write_form_length(self, datalength):
857
+ if self._aifc:
858
+ commlength = 18 + 5 + len(self._compname)
859
+ if commlength & 1:
860
+ commlength = commlength + 1
861
+ verslength = 12
862
+ else:
863
+ commlength = 18
864
+ verslength = 0
865
+ _write_ulong(self._file, 4 + verslength + self._marklength + \
866
+ 8 + commlength + 16 + datalength)
867
+ return commlength
868
+
869
+ def _patchheader(self):
870
+ curpos = self._file.tell()
871
+ if self._datawritten & 1:
872
+ datalength = self._datawritten + 1
873
+ self._file.write(b'\x00')
874
+ else:
875
+ datalength = self._datawritten
876
+ if datalength == self._datalength and \
877
+ self._nframes == self._nframeswritten and \
878
+ self._marklength == 0:
879
+ self._file.seek(curpos, 0)
880
+ return
881
+ self._file.seek(self._form_length_pos, 0)
882
+ dummy = self._write_form_length(datalength)
883
+ self._file.seek(self._nframes_pos, 0)
884
+ _write_ulong(self._file, self._nframeswritten)
885
+ self._file.seek(self._ssnd_length_pos, 0)
886
+ _write_ulong(self._file, datalength + 8)
887
+ self._file.seek(curpos, 0)
888
+ self._nframes = self._nframeswritten
889
+ self._datalength = datalength
890
+
891
+ def _writemarkers(self):
892
+ if len(self._markers) == 0:
893
+ return
894
+ self._file.write(b'MARK')
895
+ length = 2
896
+ for marker in self._markers:
897
+ id, pos, name = marker
898
+ length = length + len(name) + 1 + 6
899
+ if len(name) & 1 == 0:
900
+ length = length + 1
901
+ _write_ulong(self._file, length)
902
+ self._marklength = length + 8
903
+ _write_short(self._file, len(self._markers))
904
+ for marker in self._markers:
905
+ id, pos, name = marker
906
+ _write_short(self._file, id)
907
+ _write_ulong(self._file, pos)
908
+ _write_string(self._file, name)
909
+
910
+ def open(f, mode=None):
911
+ if mode is None:
912
+ if hasattr(f, 'mode'):
913
+ mode = f.mode
914
+ else:
915
+ mode = 'rb'
916
+ if mode in ('r', 'rb'):
917
+ return Aifc_read(f)
918
+ elif mode in ('w', 'wb'):
919
+ return Aifc_write(f)
920
+ else:
921
+ raise Error("mode must be 'r', 'rb', 'w', or 'wb'")
922
+
923
+
924
+ if __name__ == '__main__':
925
+ import sys
926
+ if not sys.argv[1:]:
927
+ sys.argv.append('/usr/demos/data/audio/bach.aiff')
928
+ fn = sys.argv[1]
929
+ with open(fn, 'r') as f:
930
+ print("Reading", fn)
931
+ print("nchannels =", f.getnchannels())
932
+ print("nframes =", f.getnframes())
933
+ print("sampwidth =", f.getsampwidth())
934
+ print("framerate =", f.getframerate())
935
+ print("comptype =", f.getcomptype())
936
+ print("compname =", f.getcompname())
937
+ if sys.argv[2:]:
938
+ gn = sys.argv[2]
939
+ print("Writing", gn)
940
+ with open(gn, 'w') as g:
941
+ g.setparams(f.getparams())
942
+ while 1:
943
+ data = f.readframes(1024)
944
+ if not data:
945
+ break
946
+ g.writeframes(data)
947
+ print("Done.")
python310/antigravity.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import webbrowser
3
+ import hashlib
4
+
5
+ webbrowser.open("https://xkcd.com/353/")
6
+
7
+ def geohash(latitude, longitude, datedow):
8
+ '''Compute geohash() using the Munroe algorithm.
9
+
10
+ >>> geohash(37.421542, -122.085589, b'2005-05-26-10458.68')
11
+ 37.857713 -122.544543
12
+
13
+ '''
14
+ # https://xkcd.com/426/
15
+ h = hashlib.md5(datedow, usedforsecurity=False).hexdigest()
16
+ p, q = [('%f' % float.fromhex('0.' + x)) for x in (h[:16], h[16:32])]
17
+ print('%d%s %d%s' % (latitude, p[1:], longitude, q[1:]))
python310/argparse.py ADDED
The diff for this file is too large to render. See raw diff
 
python310/ast.py ADDED
@@ -0,0 +1,1709 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ ast
3
+ ~~~
4
+
5
+ The `ast` module helps Python applications to process trees of the Python
6
+ abstract syntax grammar. The abstract syntax itself might change with
7
+ each Python release; this module helps to find out programmatically what
8
+ the current grammar looks like and allows modifications of it.
9
+
10
+ An abstract syntax tree can be generated by passing `ast.PyCF_ONLY_AST` as
11
+ a flag to the `compile()` builtin function or by using the `parse()`
12
+ function from this module. The result will be a tree of objects whose
13
+ classes all inherit from `ast.AST`.
14
+
15
+ A modified abstract syntax tree can be compiled into a Python code object
16
+ using the built-in `compile()` function.
17
+
18
+ Additionally various helper functions are provided that make working with
19
+ the trees simpler. The main intention of the helper functions and this
20
+ module in general is to provide an easy to use interface for libraries
21
+ that work tightly with the python syntax (template engines for example).
22
+
23
+
24
+ :copyright: Copyright 2008 by Armin Ronacher.
25
+ :license: Python License.
26
+ """
27
+ import sys
28
+ from _ast import *
29
+ from contextlib import contextmanager, nullcontext
30
+ from enum import IntEnum, auto
31
+
32
+
33
+ def parse(source, filename='<unknown>', mode='exec', *,
34
+ type_comments=False, feature_version=None):
35
+ """
36
+ Parse the source into an AST node.
37
+ Equivalent to compile(source, filename, mode, PyCF_ONLY_AST).
38
+ Pass type_comments=True to get back type comments where the syntax allows.
39
+ """
40
+ flags = PyCF_ONLY_AST
41
+ if type_comments:
42
+ flags |= PyCF_TYPE_COMMENTS
43
+ if isinstance(feature_version, tuple):
44
+ major, minor = feature_version # Should be a 2-tuple.
45
+ assert major == 3
46
+ feature_version = minor
47
+ elif feature_version is None:
48
+ feature_version = -1
49
+ # Else it should be an int giving the minor version for 3.x.
50
+ return compile(source, filename, mode, flags,
51
+ _feature_version=feature_version)
52
+
53
+
54
+ def literal_eval(node_or_string):
55
+ """
56
+ Evaluate an expression node or a string containing only a Python
57
+ expression. The string or node provided may only consist of the following
58
+ Python literal structures: strings, bytes, numbers, tuples, lists, dicts,
59
+ sets, booleans, and None.
60
+
61
+ Caution: A complex expression can overflow the C stack and cause a crash.
62
+ """
63
+ if isinstance(node_or_string, str):
64
+ node_or_string = parse(node_or_string.lstrip(" \t"), mode='eval')
65
+ if isinstance(node_or_string, Expression):
66
+ node_or_string = node_or_string.body
67
+ def _raise_malformed_node(node):
68
+ msg = "malformed node or string"
69
+ if lno := getattr(node, 'lineno', None):
70
+ msg += f' on line {lno}'
71
+ raise ValueError(msg + f': {node!r}')
72
+ def _convert_num(node):
73
+ if not isinstance(node, Constant) or type(node.value) not in (int, float, complex):
74
+ _raise_malformed_node(node)
75
+ return node.value
76
+ def _convert_signed_num(node):
77
+ if isinstance(node, UnaryOp) and isinstance(node.op, (UAdd, USub)):
78
+ operand = _convert_num(node.operand)
79
+ if isinstance(node.op, UAdd):
80
+ return + operand
81
+ else:
82
+ return - operand
83
+ return _convert_num(node)
84
+ def _convert(node):
85
+ if isinstance(node, Constant):
86
+ return node.value
87
+ elif isinstance(node, Tuple):
88
+ return tuple(map(_convert, node.elts))
89
+ elif isinstance(node, List):
90
+ return list(map(_convert, node.elts))
91
+ elif isinstance(node, Set):
92
+ return set(map(_convert, node.elts))
93
+ elif (isinstance(node, Call) and isinstance(node.func, Name) and
94
+ node.func.id == 'set' and node.args == node.keywords == []):
95
+ return set()
96
+ elif isinstance(node, Dict):
97
+ if len(node.keys) != len(node.values):
98
+ _raise_malformed_node(node)
99
+ return dict(zip(map(_convert, node.keys),
100
+ map(_convert, node.values)))
101
+ elif isinstance(node, BinOp) and isinstance(node.op, (Add, Sub)):
102
+ left = _convert_signed_num(node.left)
103
+ right = _convert_num(node.right)
104
+ if isinstance(left, (int, float)) and isinstance(right, complex):
105
+ if isinstance(node.op, Add):
106
+ return left + right
107
+ else:
108
+ return left - right
109
+ return _convert_signed_num(node)
110
+ return _convert(node_or_string)
111
+
112
+
113
+ def dump(node, annotate_fields=True, include_attributes=False, *, indent=None):
114
+ """
115
+ Return a formatted dump of the tree in node. This is mainly useful for
116
+ debugging purposes. If annotate_fields is true (by default),
117
+ the returned string will show the names and the values for fields.
118
+ If annotate_fields is false, the result string will be more compact by
119
+ omitting unambiguous field names. Attributes such as line
120
+ numbers and column offsets are not dumped by default. If this is wanted,
121
+ include_attributes can be set to true. If indent is a non-negative
122
+ integer or string, then the tree will be pretty-printed with that indent
123
+ level. None (the default) selects the single line representation.
124
+ """
125
+ def _format(node, level=0):
126
+ if indent is not None:
127
+ level += 1
128
+ prefix = '\n' + indent * level
129
+ sep = ',\n' + indent * level
130
+ else:
131
+ prefix = ''
132
+ sep = ', '
133
+ if isinstance(node, AST):
134
+ cls = type(node)
135
+ args = []
136
+ allsimple = True
137
+ keywords = annotate_fields
138
+ for name in node._fields:
139
+ try:
140
+ value = getattr(node, name)
141
+ except AttributeError:
142
+ keywords = True
143
+ continue
144
+ if value is None and getattr(cls, name, ...) is None:
145
+ keywords = True
146
+ continue
147
+ value, simple = _format(value, level)
148
+ allsimple = allsimple and simple
149
+ if keywords:
150
+ args.append('%s=%s' % (name, value))
151
+ else:
152
+ args.append(value)
153
+ if include_attributes and node._attributes:
154
+ for name in node._attributes:
155
+ try:
156
+ value = getattr(node, name)
157
+ except AttributeError:
158
+ continue
159
+ if value is None and getattr(cls, name, ...) is None:
160
+ continue
161
+ value, simple = _format(value, level)
162
+ allsimple = allsimple and simple
163
+ args.append('%s=%s' % (name, value))
164
+ if allsimple and len(args) <= 3:
165
+ return '%s(%s)' % (node.__class__.__name__, ', '.join(args)), not args
166
+ return '%s(%s%s)' % (node.__class__.__name__, prefix, sep.join(args)), False
167
+ elif isinstance(node, list):
168
+ if not node:
169
+ return '[]', True
170
+ return '[%s%s]' % (prefix, sep.join(_format(x, level)[0] for x in node)), False
171
+ return repr(node), True
172
+
173
+ if not isinstance(node, AST):
174
+ raise TypeError('expected AST, got %r' % node.__class__.__name__)
175
+ if indent is not None and not isinstance(indent, str):
176
+ indent = ' ' * indent
177
+ return _format(node)[0]
178
+
179
+
180
+ def copy_location(new_node, old_node):
181
+ """
182
+ Copy source location (`lineno`, `col_offset`, `end_lineno`, and `end_col_offset`
183
+ attributes) from *old_node* to *new_node* if possible, and return *new_node*.
184
+ """
185
+ for attr in 'lineno', 'col_offset', 'end_lineno', 'end_col_offset':
186
+ if attr in old_node._attributes and attr in new_node._attributes:
187
+ value = getattr(old_node, attr, None)
188
+ # end_lineno and end_col_offset are optional attributes, and they
189
+ # should be copied whether the value is None or not.
190
+ if value is not None or (
191
+ hasattr(old_node, attr) and attr.startswith("end_")
192
+ ):
193
+ setattr(new_node, attr, value)
194
+ return new_node
195
+
196
+
197
+ def fix_missing_locations(node):
198
+ """
199
+ When you compile a node tree with compile(), the compiler expects lineno and
200
+ col_offset attributes for every node that supports them. This is rather
201
+ tedious to fill in for generated nodes, so this helper adds these attributes
202
+ recursively where not already set, by setting them to the values of the
203
+ parent node. It works recursively starting at *node*.
204
+ """
205
+ def _fix(node, lineno, col_offset, end_lineno, end_col_offset):
206
+ if 'lineno' in node._attributes:
207
+ if not hasattr(node, 'lineno'):
208
+ node.lineno = lineno
209
+ else:
210
+ lineno = node.lineno
211
+ if 'end_lineno' in node._attributes:
212
+ if getattr(node, 'end_lineno', None) is None:
213
+ node.end_lineno = end_lineno
214
+ else:
215
+ end_lineno = node.end_lineno
216
+ if 'col_offset' in node._attributes:
217
+ if not hasattr(node, 'col_offset'):
218
+ node.col_offset = col_offset
219
+ else:
220
+ col_offset = node.col_offset
221
+ if 'end_col_offset' in node._attributes:
222
+ if getattr(node, 'end_col_offset', None) is None:
223
+ node.end_col_offset = end_col_offset
224
+ else:
225
+ end_col_offset = node.end_col_offset
226
+ for child in iter_child_nodes(node):
227
+ _fix(child, lineno, col_offset, end_lineno, end_col_offset)
228
+ _fix(node, 1, 0, 1, 0)
229
+ return node
230
+
231
+
232
+ def increment_lineno(node, n=1):
233
+ """
234
+ Increment the line number and end line number of each node in the tree
235
+ starting at *node* by *n*. This is useful to "move code" to a different
236
+ location in a file.
237
+ """
238
+ for child in walk(node):
239
+ # TypeIgnore is a special case where lineno is not an attribute
240
+ # but rather a field of the node itself.
241
+ if isinstance(child, TypeIgnore):
242
+ child.lineno = getattr(child, 'lineno', 0) + n
243
+ continue
244
+
245
+ if 'lineno' in child._attributes:
246
+ child.lineno = getattr(child, 'lineno', 0) + n
247
+ if (
248
+ "end_lineno" in child._attributes
249
+ and (end_lineno := getattr(child, "end_lineno", 0)) is not None
250
+ ):
251
+ child.end_lineno = end_lineno + n
252
+ return node
253
+
254
+
255
+ def iter_fields(node):
256
+ """
257
+ Yield a tuple of ``(fieldname, value)`` for each field in ``node._fields``
258
+ that is present on *node*.
259
+ """
260
+ for field in node._fields:
261
+ try:
262
+ yield field, getattr(node, field)
263
+ except AttributeError:
264
+ pass
265
+
266
+
267
+ def iter_child_nodes(node):
268
+ """
269
+ Yield all direct child nodes of *node*, that is, all fields that are nodes
270
+ and all items of fields that are lists of nodes.
271
+ """
272
+ for name, field in iter_fields(node):
273
+ if isinstance(field, AST):
274
+ yield field
275
+ elif isinstance(field, list):
276
+ for item in field:
277
+ if isinstance(item, AST):
278
+ yield item
279
+
280
+
281
+ def get_docstring(node, clean=True):
282
+ """
283
+ Return the docstring for the given node or None if no docstring can
284
+ be found. If the node provided does not have docstrings a TypeError
285
+ will be raised.
286
+
287
+ If *clean* is `True`, all tabs are expanded to spaces and any whitespace
288
+ that can be uniformly removed from the second line onwards is removed.
289
+ """
290
+ if not isinstance(node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)):
291
+ raise TypeError("%r can't have docstrings" % node.__class__.__name__)
292
+ if not(node.body and isinstance(node.body[0], Expr)):
293
+ return None
294
+ node = node.body[0].value
295
+ if isinstance(node, Str):
296
+ text = node.s
297
+ elif isinstance(node, Constant) and isinstance(node.value, str):
298
+ text = node.value
299
+ else:
300
+ return None
301
+ if clean:
302
+ import inspect
303
+ text = inspect.cleandoc(text)
304
+ return text
305
+
306
+
307
+ def _splitlines_no_ff(source):
308
+ """Split a string into lines ignoring form feed and other chars.
309
+
310
+ This mimics how the Python parser splits source code.
311
+ """
312
+ idx = 0
313
+ lines = []
314
+ next_line = ''
315
+ while idx < len(source):
316
+ c = source[idx]
317
+ next_line += c
318
+ idx += 1
319
+ # Keep \r\n together
320
+ if c == '\r' and idx < len(source) and source[idx] == '\n':
321
+ next_line += '\n'
322
+ idx += 1
323
+ if c in '\r\n':
324
+ lines.append(next_line)
325
+ next_line = ''
326
+
327
+ if next_line:
328
+ lines.append(next_line)
329
+ return lines
330
+
331
+
332
+ def _pad_whitespace(source):
333
+ r"""Replace all chars except '\f\t' in a line with spaces."""
334
+ result = ''
335
+ for c in source:
336
+ if c in '\f\t':
337
+ result += c
338
+ else:
339
+ result += ' '
340
+ return result
341
+
342
+
343
+ def get_source_segment(source, node, *, padded=False):
344
+ """Get source code segment of the *source* that generated *node*.
345
+
346
+ If some location information (`lineno`, `end_lineno`, `col_offset`,
347
+ or `end_col_offset`) is missing, return None.
348
+
349
+ If *padded* is `True`, the first line of a multi-line statement will
350
+ be padded with spaces to match its original position.
351
+ """
352
+ try:
353
+ if node.end_lineno is None or node.end_col_offset is None:
354
+ return None
355
+ lineno = node.lineno - 1
356
+ end_lineno = node.end_lineno - 1
357
+ col_offset = node.col_offset
358
+ end_col_offset = node.end_col_offset
359
+ except AttributeError:
360
+ return None
361
+
362
+ lines = _splitlines_no_ff(source)
363
+ if end_lineno == lineno:
364
+ return lines[lineno].encode()[col_offset:end_col_offset].decode()
365
+
366
+ if padded:
367
+ padding = _pad_whitespace(lines[lineno].encode()[:col_offset].decode())
368
+ else:
369
+ padding = ''
370
+
371
+ first = padding + lines[lineno].encode()[col_offset:].decode()
372
+ last = lines[end_lineno].encode()[:end_col_offset].decode()
373
+ lines = lines[lineno+1:end_lineno]
374
+
375
+ lines.insert(0, first)
376
+ lines.append(last)
377
+ return ''.join(lines)
378
+
379
+
380
+ def walk(node):
381
+ """
382
+ Recursively yield all descendant nodes in the tree starting at *node*
383
+ (including *node* itself), in no specified order. This is useful if you
384
+ only want to modify nodes in place and don't care about the context.
385
+ """
386
+ from collections import deque
387
+ todo = deque([node])
388
+ while todo:
389
+ node = todo.popleft()
390
+ todo.extend(iter_child_nodes(node))
391
+ yield node
392
+
393
+
394
+ class NodeVisitor(object):
395
+ """
396
+ A node visitor base class that walks the abstract syntax tree and calls a
397
+ visitor function for every node found. This function may return a value
398
+ which is forwarded by the `visit` method.
399
+
400
+ This class is meant to be subclassed, with the subclass adding visitor
401
+ methods.
402
+
403
+ Per default the visitor functions for the nodes are ``'visit_'`` +
404
+ class name of the node. So a `TryFinally` node visit function would
405
+ be `visit_TryFinally`. This behavior can be changed by overriding
406
+ the `visit` method. If no visitor function exists for a node
407
+ (return value `None`) the `generic_visit` visitor is used instead.
408
+
409
+ Don't use the `NodeVisitor` if you want to apply changes to nodes during
410
+ traversing. For this a special visitor exists (`NodeTransformer`) that
411
+ allows modifications.
412
+ """
413
+
414
+ def visit(self, node):
415
+ """Visit a node."""
416
+ method = 'visit_' + node.__class__.__name__
417
+ visitor = getattr(self, method, self.generic_visit)
418
+ return visitor(node)
419
+
420
+ def generic_visit(self, node):
421
+ """Called if no explicit visitor function exists for a node."""
422
+ for field, value in iter_fields(node):
423
+ if isinstance(value, list):
424
+ for item in value:
425
+ if isinstance(item, AST):
426
+ self.visit(item)
427
+ elif isinstance(value, AST):
428
+ self.visit(value)
429
+
430
+ def visit_Constant(self, node):
431
+ value = node.value
432
+ type_name = _const_node_type_names.get(type(value))
433
+ if type_name is None:
434
+ for cls, name in _const_node_type_names.items():
435
+ if isinstance(value, cls):
436
+ type_name = name
437
+ break
438
+ if type_name is not None:
439
+ method = 'visit_' + type_name
440
+ try:
441
+ visitor = getattr(self, method)
442
+ except AttributeError:
443
+ pass
444
+ else:
445
+ import warnings
446
+ warnings.warn(f"{method} is deprecated; add visit_Constant",
447
+ DeprecationWarning, 2)
448
+ return visitor(node)
449
+ return self.generic_visit(node)
450
+
451
+
452
+ class NodeTransformer(NodeVisitor):
453
+ """
454
+ A :class:`NodeVisitor` subclass that walks the abstract syntax tree and
455
+ allows modification of nodes.
456
+
457
+ The `NodeTransformer` will walk the AST and use the return value of the
458
+ visitor methods to replace or remove the old node. If the return value of
459
+ the visitor method is ``None``, the node will be removed from its location,
460
+ otherwise it is replaced with the return value. The return value may be the
461
+ original node in which case no replacement takes place.
462
+
463
+ Here is an example transformer that rewrites all occurrences of name lookups
464
+ (``foo``) to ``data['foo']``::
465
+
466
+ class RewriteName(NodeTransformer):
467
+
468
+ def visit_Name(self, node):
469
+ return Subscript(
470
+ value=Name(id='data', ctx=Load()),
471
+ slice=Constant(value=node.id),
472
+ ctx=node.ctx
473
+ )
474
+
475
+ Keep in mind that if the node you're operating on has child nodes you must
476
+ either transform the child nodes yourself or call the :meth:`generic_visit`
477
+ method for the node first.
478
+
479
+ For nodes that were part of a collection of statements (that applies to all
480
+ statement nodes), the visitor may also return a list of nodes rather than
481
+ just a single node.
482
+
483
+ Usually you use the transformer like this::
484
+
485
+ node = YourTransformer().visit(node)
486
+ """
487
+
488
+ def generic_visit(self, node):
489
+ for field, old_value in iter_fields(node):
490
+ if isinstance(old_value, list):
491
+ new_values = []
492
+ for value in old_value:
493
+ if isinstance(value, AST):
494
+ value = self.visit(value)
495
+ if value is None:
496
+ continue
497
+ elif not isinstance(value, AST):
498
+ new_values.extend(value)
499
+ continue
500
+ new_values.append(value)
501
+ old_value[:] = new_values
502
+ elif isinstance(old_value, AST):
503
+ new_node = self.visit(old_value)
504
+ if new_node is None:
505
+ delattr(node, field)
506
+ else:
507
+ setattr(node, field, new_node)
508
+ return node
509
+
510
+
511
+ # If the ast module is loaded more than once, only add deprecated methods once
512
+ if not hasattr(Constant, 'n'):
513
+ # The following code is for backward compatibility.
514
+ # It will be removed in future.
515
+
516
+ def _getter(self):
517
+ """Deprecated. Use value instead."""
518
+ return self.value
519
+
520
+ def _setter(self, value):
521
+ self.value = value
522
+
523
+ Constant.n = property(_getter, _setter)
524
+ Constant.s = property(_getter, _setter)
525
+
526
+ class _ABC(type):
527
+
528
+ def __init__(cls, *args):
529
+ cls.__doc__ = """Deprecated AST node class. Use ast.Constant instead"""
530
+
531
+ def __instancecheck__(cls, inst):
532
+ if not isinstance(inst, Constant):
533
+ return False
534
+ if cls in _const_types:
535
+ try:
536
+ value = inst.value
537
+ except AttributeError:
538
+ return False
539
+ else:
540
+ return (
541
+ isinstance(value, _const_types[cls]) and
542
+ not isinstance(value, _const_types_not.get(cls, ()))
543
+ )
544
+ return type.__instancecheck__(cls, inst)
545
+
546
+ def _new(cls, *args, **kwargs):
547
+ for key in kwargs:
548
+ if key not in cls._fields:
549
+ # arbitrary keyword arguments are accepted
550
+ continue
551
+ pos = cls._fields.index(key)
552
+ if pos < len(args):
553
+ raise TypeError(f"{cls.__name__} got multiple values for argument {key!r}")
554
+ if cls in _const_types:
555
+ return Constant(*args, **kwargs)
556
+ return Constant.__new__(cls, *args, **kwargs)
557
+
558
+ class Num(Constant, metaclass=_ABC):
559
+ _fields = ('n',)
560
+ __new__ = _new
561
+
562
+ class Str(Constant, metaclass=_ABC):
563
+ _fields = ('s',)
564
+ __new__ = _new
565
+
566
+ class Bytes(Constant, metaclass=_ABC):
567
+ _fields = ('s',)
568
+ __new__ = _new
569
+
570
+ class NameConstant(Constant, metaclass=_ABC):
571
+ __new__ = _new
572
+
573
+ class Ellipsis(Constant, metaclass=_ABC):
574
+ _fields = ()
575
+
576
+ def __new__(cls, *args, **kwargs):
577
+ if cls is Ellipsis:
578
+ return Constant(..., *args, **kwargs)
579
+ return Constant.__new__(cls, *args, **kwargs)
580
+
581
+ _const_types = {
582
+ Num: (int, float, complex),
583
+ Str: (str,),
584
+ Bytes: (bytes,),
585
+ NameConstant: (type(None), bool),
586
+ Ellipsis: (type(...),),
587
+ }
588
+ _const_types_not = {
589
+ Num: (bool,),
590
+ }
591
+
592
+ _const_node_type_names = {
593
+ bool: 'NameConstant', # should be before int
594
+ type(None): 'NameConstant',
595
+ int: 'Num',
596
+ float: 'Num',
597
+ complex: 'Num',
598
+ str: 'Str',
599
+ bytes: 'Bytes',
600
+ type(...): 'Ellipsis',
601
+ }
602
+
603
+ class slice(AST):
604
+ """Deprecated AST node class."""
605
+
606
+ class Index(slice):
607
+ """Deprecated AST node class. Use the index value directly instead."""
608
+ def __new__(cls, value, **kwargs):
609
+ return value
610
+
611
+ class ExtSlice(slice):
612
+ """Deprecated AST node class. Use ast.Tuple instead."""
613
+ def __new__(cls, dims=(), **kwargs):
614
+ return Tuple(list(dims), Load(), **kwargs)
615
+
616
+ # If the ast module is loaded more than once, only add deprecated methods once
617
+ if not hasattr(Tuple, 'dims'):
618
+ # The following code is for backward compatibility.
619
+ # It will be removed in future.
620
+
621
+ def _dims_getter(self):
622
+ """Deprecated. Use elts instead."""
623
+ return self.elts
624
+
625
+ def _dims_setter(self, value):
626
+ self.elts = value
627
+
628
+ Tuple.dims = property(_dims_getter, _dims_setter)
629
+
630
+ class Suite(mod):
631
+ """Deprecated AST node class. Unused in Python 3."""
632
+
633
+ class AugLoad(expr_context):
634
+ """Deprecated AST node class. Unused in Python 3."""
635
+
636
+ class AugStore(expr_context):
637
+ """Deprecated AST node class. Unused in Python 3."""
638
+
639
+ class Param(expr_context):
640
+ """Deprecated AST node class. Unused in Python 3."""
641
+
642
+
643
+ # Large float and imaginary literals get turned into infinities in the AST.
644
+ # We unparse those infinities to INFSTR.
645
+ _INFSTR = "1e" + repr(sys.float_info.max_10_exp + 1)
646
+
647
+ class _Precedence(IntEnum):
648
+ """Precedence table that originated from python grammar."""
649
+
650
+ TUPLE = auto()
651
+ YIELD = auto() # 'yield', 'yield from'
652
+ TEST = auto() # 'if'-'else', 'lambda'
653
+ OR = auto() # 'or'
654
+ AND = auto() # 'and'
655
+ NOT = auto() # 'not'
656
+ CMP = auto() # '<', '>', '==', '>=', '<=', '!=',
657
+ # 'in', 'not in', 'is', 'is not'
658
+ EXPR = auto()
659
+ BOR = EXPR # '|'
660
+ BXOR = auto() # '^'
661
+ BAND = auto() # '&'
662
+ SHIFT = auto() # '<<', '>>'
663
+ ARITH = auto() # '+', '-'
664
+ TERM = auto() # '*', '@', '/', '%', '//'
665
+ FACTOR = auto() # unary '+', '-', '~'
666
+ POWER = auto() # '**'
667
+ AWAIT = auto() # 'await'
668
+ ATOM = auto()
669
+
670
+ def next(self):
671
+ try:
672
+ return self.__class__(self + 1)
673
+ except ValueError:
674
+ return self
675
+
676
+
677
+ _SINGLE_QUOTES = ("'", '"')
678
+ _MULTI_QUOTES = ('"""', "'''")
679
+ _ALL_QUOTES = (*_SINGLE_QUOTES, *_MULTI_QUOTES)
680
+
681
+ class _Unparser(NodeVisitor):
682
+ """Methods in this class recursively traverse an AST and
683
+ output source code for the abstract syntax; original formatting
684
+ is disregarded."""
685
+
686
+ def __init__(self, *, _avoid_backslashes=False):
687
+ self._source = []
688
+ self._buffer = []
689
+ self._precedences = {}
690
+ self._type_ignores = {}
691
+ self._indent = 0
692
+ self._avoid_backslashes = _avoid_backslashes
693
+
694
+ def interleave(self, inter, f, seq):
695
+ """Call f on each item in seq, calling inter() in between."""
696
+ seq = iter(seq)
697
+ try:
698
+ f(next(seq))
699
+ except StopIteration:
700
+ pass
701
+ else:
702
+ for x in seq:
703
+ inter()
704
+ f(x)
705
+
706
+ def items_view(self, traverser, items):
707
+ """Traverse and separate the given *items* with a comma and append it to
708
+ the buffer. If *items* is a single item sequence, a trailing comma
709
+ will be added."""
710
+ if len(items) == 1:
711
+ traverser(items[0])
712
+ self.write(",")
713
+ else:
714
+ self.interleave(lambda: self.write(", "), traverser, items)
715
+
716
+ def maybe_newline(self):
717
+ """Adds a newline if it isn't the start of generated source"""
718
+ if self._source:
719
+ self.write("\n")
720
+
721
+ def fill(self, text=""):
722
+ """Indent a piece of text and append it, according to the current
723
+ indentation level"""
724
+ self.maybe_newline()
725
+ self.write(" " * self._indent + text)
726
+
727
+ def write(self, text):
728
+ """Append a piece of text"""
729
+ self._source.append(text)
730
+
731
+ def buffer_writer(self, text):
732
+ self._buffer.append(text)
733
+
734
+ @property
735
+ def buffer(self):
736
+ value = "".join(self._buffer)
737
+ self._buffer.clear()
738
+ return value
739
+
740
+ @contextmanager
741
+ def block(self, *, extra = None):
742
+ """A context manager for preparing the source for blocks. It adds
743
+ the character':', increases the indentation on enter and decreases
744
+ the indentation on exit. If *extra* is given, it will be directly
745
+ appended after the colon character.
746
+ """
747
+ self.write(":")
748
+ if extra:
749
+ self.write(extra)
750
+ self._indent += 1
751
+ yield
752
+ self._indent -= 1
753
+
754
+ @contextmanager
755
+ def delimit(self, start, end):
756
+ """A context manager for preparing the source for expressions. It adds
757
+ *start* to the buffer and enters, after exit it adds *end*."""
758
+
759
+ self.write(start)
760
+ yield
761
+ self.write(end)
762
+
763
+ def delimit_if(self, start, end, condition):
764
+ if condition:
765
+ return self.delimit(start, end)
766
+ else:
767
+ return nullcontext()
768
+
769
+ def require_parens(self, precedence, node):
770
+ """Shortcut to adding precedence related parens"""
771
+ return self.delimit_if("(", ")", self.get_precedence(node) > precedence)
772
+
773
+ def get_precedence(self, node):
774
+ return self._precedences.get(node, _Precedence.TEST)
775
+
776
+ def set_precedence(self, precedence, *nodes):
777
+ for node in nodes:
778
+ self._precedences[node] = precedence
779
+
780
+ def get_raw_docstring(self, node):
781
+ """If a docstring node is found in the body of the *node* parameter,
782
+ return that docstring node, None otherwise.
783
+
784
+ Logic mirrored from ``_PyAST_GetDocString``."""
785
+ if not isinstance(
786
+ node, (AsyncFunctionDef, FunctionDef, ClassDef, Module)
787
+ ) or len(node.body) < 1:
788
+ return None
789
+ node = node.body[0]
790
+ if not isinstance(node, Expr):
791
+ return None
792
+ node = node.value
793
+ if isinstance(node, Constant) and isinstance(node.value, str):
794
+ return node
795
+
796
+ def get_type_comment(self, node):
797
+ comment = self._type_ignores.get(node.lineno) or node.type_comment
798
+ if comment is not None:
799
+ return f" # type: {comment}"
800
+
801
+ def traverse(self, node):
802
+ if isinstance(node, list):
803
+ for item in node:
804
+ self.traverse(item)
805
+ else:
806
+ super().visit(node)
807
+
808
+ # Note: as visit() resets the output text, do NOT rely on
809
+ # NodeVisitor.generic_visit to handle any nodes (as it calls back in to
810
+ # the subclass visit() method, which resets self._source to an empty list)
811
+ def visit(self, node):
812
+ """Outputs a source code string that, if converted back to an ast
813
+ (using ast.parse) will generate an AST equivalent to *node*"""
814
+ self._source = []
815
+ self.traverse(node)
816
+ return "".join(self._source)
817
+
818
+ def _write_docstring_and_traverse_body(self, node):
819
+ if (docstring := self.get_raw_docstring(node)):
820
+ self._write_docstring(docstring)
821
+ self.traverse(node.body[1:])
822
+ else:
823
+ self.traverse(node.body)
824
+
825
+ def visit_Module(self, node):
826
+ self._type_ignores = {
827
+ ignore.lineno: f"ignore{ignore.tag}"
828
+ for ignore in node.type_ignores
829
+ }
830
+ self._write_docstring_and_traverse_body(node)
831
+ self._type_ignores.clear()
832
+
833
+ def visit_FunctionType(self, node):
834
+ with self.delimit("(", ")"):
835
+ self.interleave(
836
+ lambda: self.write(", "), self.traverse, node.argtypes
837
+ )
838
+
839
+ self.write(" -> ")
840
+ self.traverse(node.returns)
841
+
842
+ def visit_Expr(self, node):
843
+ self.fill()
844
+ self.set_precedence(_Precedence.YIELD, node.value)
845
+ self.traverse(node.value)
846
+
847
+ def visit_NamedExpr(self, node):
848
+ with self.require_parens(_Precedence.TUPLE, node):
849
+ self.set_precedence(_Precedence.ATOM, node.target, node.value)
850
+ self.traverse(node.target)
851
+ self.write(" := ")
852
+ self.traverse(node.value)
853
+
854
+ def visit_Import(self, node):
855
+ self.fill("import ")
856
+ self.interleave(lambda: self.write(", "), self.traverse, node.names)
857
+
858
+ def visit_ImportFrom(self, node):
859
+ self.fill("from ")
860
+ self.write("." * (node.level or 0))
861
+ if node.module:
862
+ self.write(node.module)
863
+ self.write(" import ")
864
+ self.interleave(lambda: self.write(", "), self.traverse, node.names)
865
+
866
+ def visit_Assign(self, node):
867
+ self.fill()
868
+ for target in node.targets:
869
+ self.traverse(target)
870
+ self.write(" = ")
871
+ self.traverse(node.value)
872
+ if type_comment := self.get_type_comment(node):
873
+ self.write(type_comment)
874
+
875
+ def visit_AugAssign(self, node):
876
+ self.fill()
877
+ self.traverse(node.target)
878
+ self.write(" " + self.binop[node.op.__class__.__name__] + "= ")
879
+ self.traverse(node.value)
880
+
881
+ def visit_AnnAssign(self, node):
882
+ self.fill()
883
+ with self.delimit_if("(", ")", not node.simple and isinstance(node.target, Name)):
884
+ self.traverse(node.target)
885
+ self.write(": ")
886
+ self.traverse(node.annotation)
887
+ if node.value:
888
+ self.write(" = ")
889
+ self.traverse(node.value)
890
+
891
+ def visit_Return(self, node):
892
+ self.fill("return")
893
+ if node.value:
894
+ self.write(" ")
895
+ self.traverse(node.value)
896
+
897
+ def visit_Pass(self, node):
898
+ self.fill("pass")
899
+
900
+ def visit_Break(self, node):
901
+ self.fill("break")
902
+
903
+ def visit_Continue(self, node):
904
+ self.fill("continue")
905
+
906
+ def visit_Delete(self, node):
907
+ self.fill("del ")
908
+ self.interleave(lambda: self.write(", "), self.traverse, node.targets)
909
+
910
+ def visit_Assert(self, node):
911
+ self.fill("assert ")
912
+ self.traverse(node.test)
913
+ if node.msg:
914
+ self.write(", ")
915
+ self.traverse(node.msg)
916
+
917
+ def visit_Global(self, node):
918
+ self.fill("global ")
919
+ self.interleave(lambda: self.write(", "), self.write, node.names)
920
+
921
+ def visit_Nonlocal(self, node):
922
+ self.fill("nonlocal ")
923
+ self.interleave(lambda: self.write(", "), self.write, node.names)
924
+
925
+ def visit_Await(self, node):
926
+ with self.require_parens(_Precedence.AWAIT, node):
927
+ self.write("await")
928
+ if node.value:
929
+ self.write(" ")
930
+ self.set_precedence(_Precedence.ATOM, node.value)
931
+ self.traverse(node.value)
932
+
933
+ def visit_Yield(self, node):
934
+ with self.require_parens(_Precedence.YIELD, node):
935
+ self.write("yield")
936
+ if node.value:
937
+ self.write(" ")
938
+ self.set_precedence(_Precedence.ATOM, node.value)
939
+ self.traverse(node.value)
940
+
941
+ def visit_YieldFrom(self, node):
942
+ with self.require_parens(_Precedence.YIELD, node):
943
+ self.write("yield from ")
944
+ if not node.value:
945
+ raise ValueError("Node can't be used without a value attribute.")
946
+ self.set_precedence(_Precedence.ATOM, node.value)
947
+ self.traverse(node.value)
948
+
949
+ def visit_Raise(self, node):
950
+ self.fill("raise")
951
+ if not node.exc:
952
+ if node.cause:
953
+ raise ValueError(f"Node can't use cause without an exception.")
954
+ return
955
+ self.write(" ")
956
+ self.traverse(node.exc)
957
+ if node.cause:
958
+ self.write(" from ")
959
+ self.traverse(node.cause)
960
+
961
+ def visit_Try(self, node):
962
+ self.fill("try")
963
+ with self.block():
964
+ self.traverse(node.body)
965
+ for ex in node.handlers:
966
+ self.traverse(ex)
967
+ if node.orelse:
968
+ self.fill("else")
969
+ with self.block():
970
+ self.traverse(node.orelse)
971
+ if node.finalbody:
972
+ self.fill("finally")
973
+ with self.block():
974
+ self.traverse(node.finalbody)
975
+
976
+ def visit_ExceptHandler(self, node):
977
+ self.fill("except")
978
+ if node.type:
979
+ self.write(" ")
980
+ self.traverse(node.type)
981
+ if node.name:
982
+ self.write(" as ")
983
+ self.write(node.name)
984
+ with self.block():
985
+ self.traverse(node.body)
986
+
987
+ def visit_ClassDef(self, node):
988
+ self.maybe_newline()
989
+ for deco in node.decorator_list:
990
+ self.fill("@")
991
+ self.traverse(deco)
992
+ self.fill("class " + node.name)
993
+ with self.delimit_if("(", ")", condition = node.bases or node.keywords):
994
+ comma = False
995
+ for e in node.bases:
996
+ if comma:
997
+ self.write(", ")
998
+ else:
999
+ comma = True
1000
+ self.traverse(e)
1001
+ for e in node.keywords:
1002
+ if comma:
1003
+ self.write(", ")
1004
+ else:
1005
+ comma = True
1006
+ self.traverse(e)
1007
+
1008
+ with self.block():
1009
+ self._write_docstring_and_traverse_body(node)
1010
+
1011
+ def visit_FunctionDef(self, node):
1012
+ self._function_helper(node, "def")
1013
+
1014
+ def visit_AsyncFunctionDef(self, node):
1015
+ self._function_helper(node, "async def")
1016
+
1017
+ def _function_helper(self, node, fill_suffix):
1018
+ self.maybe_newline()
1019
+ for deco in node.decorator_list:
1020
+ self.fill("@")
1021
+ self.traverse(deco)
1022
+ def_str = fill_suffix + " " + node.name
1023
+ self.fill(def_str)
1024
+ with self.delimit("(", ")"):
1025
+ self.traverse(node.args)
1026
+ if node.returns:
1027
+ self.write(" -> ")
1028
+ self.traverse(node.returns)
1029
+ with self.block(extra=self.get_type_comment(node)):
1030
+ self._write_docstring_and_traverse_body(node)
1031
+
1032
+ def visit_For(self, node):
1033
+ self._for_helper("for ", node)
1034
+
1035
+ def visit_AsyncFor(self, node):
1036
+ self._for_helper("async for ", node)
1037
+
1038
+ def _for_helper(self, fill, node):
1039
+ self.fill(fill)
1040
+ self.traverse(node.target)
1041
+ self.write(" in ")
1042
+ self.traverse(node.iter)
1043
+ with self.block(extra=self.get_type_comment(node)):
1044
+ self.traverse(node.body)
1045
+ if node.orelse:
1046
+ self.fill("else")
1047
+ with self.block():
1048
+ self.traverse(node.orelse)
1049
+
1050
+ def visit_If(self, node):
1051
+ self.fill("if ")
1052
+ self.traverse(node.test)
1053
+ with self.block():
1054
+ self.traverse(node.body)
1055
+ # collapse nested ifs into equivalent elifs.
1056
+ while node.orelse and len(node.orelse) == 1 and isinstance(node.orelse[0], If):
1057
+ node = node.orelse[0]
1058
+ self.fill("elif ")
1059
+ self.traverse(node.test)
1060
+ with self.block():
1061
+ self.traverse(node.body)
1062
+ # final else
1063
+ if node.orelse:
1064
+ self.fill("else")
1065
+ with self.block():
1066
+ self.traverse(node.orelse)
1067
+
1068
+ def visit_While(self, node):
1069
+ self.fill("while ")
1070
+ self.traverse(node.test)
1071
+ with self.block():
1072
+ self.traverse(node.body)
1073
+ if node.orelse:
1074
+ self.fill("else")
1075
+ with self.block():
1076
+ self.traverse(node.orelse)
1077
+
1078
+ def visit_With(self, node):
1079
+ self.fill("with ")
1080
+ self.interleave(lambda: self.write(", "), self.traverse, node.items)
1081
+ with self.block(extra=self.get_type_comment(node)):
1082
+ self.traverse(node.body)
1083
+
1084
+ def visit_AsyncWith(self, node):
1085
+ self.fill("async with ")
1086
+ self.interleave(lambda: self.write(", "), self.traverse, node.items)
1087
+ with self.block(extra=self.get_type_comment(node)):
1088
+ self.traverse(node.body)
1089
+
1090
+ def _str_literal_helper(
1091
+ self, string, *, quote_types=_ALL_QUOTES, escape_special_whitespace=False
1092
+ ):
1093
+ """Helper for writing string literals, minimizing escapes.
1094
+ Returns the tuple (string literal to write, possible quote types).
1095
+ """
1096
+ def escape_char(c):
1097
+ # \n and \t are non-printable, but we only escape them if
1098
+ # escape_special_whitespace is True
1099
+ if not escape_special_whitespace and c in "\n\t":
1100
+ return c
1101
+ # Always escape backslashes and other non-printable characters
1102
+ if c == "\\" or not c.isprintable():
1103
+ return c.encode("unicode_escape").decode("ascii")
1104
+ return c
1105
+
1106
+ escaped_string = "".join(map(escape_char, string))
1107
+ possible_quotes = quote_types
1108
+ if "\n" in escaped_string:
1109
+ possible_quotes = [q for q in possible_quotes if q in _MULTI_QUOTES]
1110
+ possible_quotes = [q for q in possible_quotes if q not in escaped_string]
1111
+ if not possible_quotes:
1112
+ # If there aren't any possible_quotes, fallback to using repr
1113
+ # on the original string. Try to use a quote from quote_types,
1114
+ # e.g., so that we use triple quotes for docstrings.
1115
+ string = repr(string)
1116
+ quote = next((q for q in quote_types if string[0] in q), string[0])
1117
+ return string[1:-1], [quote]
1118
+ if escaped_string:
1119
+ # Sort so that we prefer '''"''' over """\""""
1120
+ possible_quotes.sort(key=lambda q: q[0] == escaped_string[-1])
1121
+ # If we're using triple quotes and we'd need to escape a final
1122
+ # quote, escape it
1123
+ if possible_quotes[0][0] == escaped_string[-1]:
1124
+ assert len(possible_quotes[0]) == 3
1125
+ escaped_string = escaped_string[:-1] + "\\" + escaped_string[-1]
1126
+ return escaped_string, possible_quotes
1127
+
1128
+ def _write_str_avoiding_backslashes(self, string, *, quote_types=_ALL_QUOTES):
1129
+ """Write string literal value with a best effort attempt to avoid backslashes."""
1130
+ string, quote_types = self._str_literal_helper(string, quote_types=quote_types)
1131
+ quote_type = quote_types[0]
1132
+ self.write(f"{quote_type}{string}{quote_type}")
1133
+
1134
+ def visit_JoinedStr(self, node):
1135
+ self.write("f")
1136
+ if self._avoid_backslashes:
1137
+ self._fstring_JoinedStr(node, self.buffer_writer)
1138
+ self._write_str_avoiding_backslashes(self.buffer)
1139
+ return
1140
+
1141
+ # If we don't need to avoid backslashes globally (i.e., we only need
1142
+ # to avoid them inside FormattedValues), it's cosmetically preferred
1143
+ # to use escaped whitespace. That is, it's preferred to use backslashes
1144
+ # for cases like: f"{x}\n". To accomplish this, we keep track of what
1145
+ # in our buffer corresponds to FormattedValues and what corresponds to
1146
+ # Constant parts of the f-string, and allow escapes accordingly.
1147
+ buffer = []
1148
+ for value in node.values:
1149
+ meth = getattr(self, "_fstring_" + type(value).__name__)
1150
+ meth(value, self.buffer_writer)
1151
+ buffer.append((self.buffer, isinstance(value, Constant)))
1152
+ new_buffer = []
1153
+ quote_types = _ALL_QUOTES
1154
+ for value, is_constant in buffer:
1155
+ # Repeatedly narrow down the list of possible quote_types
1156
+ value, quote_types = self._str_literal_helper(
1157
+ value, quote_types=quote_types,
1158
+ escape_special_whitespace=is_constant
1159
+ )
1160
+ new_buffer.append(value)
1161
+ value = "".join(new_buffer)
1162
+ quote_type = quote_types[0]
1163
+ self.write(f"{quote_type}{value}{quote_type}")
1164
+
1165
+ def visit_FormattedValue(self, node):
1166
+ self.write("f")
1167
+ self._fstring_FormattedValue(node, self.buffer_writer)
1168
+ self._write_str_avoiding_backslashes(self.buffer)
1169
+
1170
+ def _fstring_JoinedStr(self, node, write):
1171
+ for value in node.values:
1172
+ meth = getattr(self, "_fstring_" + type(value).__name__)
1173
+ meth(value, write)
1174
+
1175
+ def _fstring_Constant(self, node, write):
1176
+ if not isinstance(node.value, str):
1177
+ raise ValueError("Constants inside JoinedStr should be a string.")
1178
+ value = node.value.replace("{", "{{").replace("}", "}}")
1179
+ write(value)
1180
+
1181
+ def _fstring_FormattedValue(self, node, write):
1182
+ write("{")
1183
+ unparser = type(self)(_avoid_backslashes=True)
1184
+ unparser.set_precedence(_Precedence.TEST.next(), node.value)
1185
+ expr = unparser.visit(node.value)
1186
+ if expr.startswith("{"):
1187
+ write(" ") # Separate pair of opening brackets as "{ {"
1188
+ if "\\" in expr:
1189
+ raise ValueError("Unable to avoid backslash in f-string expression part")
1190
+ write(expr)
1191
+ if node.conversion != -1:
1192
+ conversion = chr(node.conversion)
1193
+ if conversion not in "sra":
1194
+ raise ValueError("Unknown f-string conversion.")
1195
+ write(f"!{conversion}")
1196
+ if node.format_spec:
1197
+ write(":")
1198
+ meth = getattr(self, "_fstring_" + type(node.format_spec).__name__)
1199
+ meth(node.format_spec, write)
1200
+ write("}")
1201
+
1202
+ def visit_Name(self, node):
1203
+ self.write(node.id)
1204
+
1205
+ def _write_docstring(self, node):
1206
+ self.fill()
1207
+ if node.kind == "u":
1208
+ self.write("u")
1209
+ self._write_str_avoiding_backslashes(node.value, quote_types=_MULTI_QUOTES)
1210
+
1211
+ def _write_constant(self, value):
1212
+ if isinstance(value, (float, complex)):
1213
+ # Substitute overflowing decimal literal for AST infinities,
1214
+ # and inf - inf for NaNs.
1215
+ self.write(
1216
+ repr(value)
1217
+ .replace("inf", _INFSTR)
1218
+ .replace("nan", f"({_INFSTR}-{_INFSTR})")
1219
+ )
1220
+ elif self._avoid_backslashes and isinstance(value, str):
1221
+ self._write_str_avoiding_backslashes(value)
1222
+ else:
1223
+ self.write(repr(value))
1224
+
1225
+ def visit_Constant(self, node):
1226
+ value = node.value
1227
+ if isinstance(value, tuple):
1228
+ with self.delimit("(", ")"):
1229
+ self.items_view(self._write_constant, value)
1230
+ elif value is ...:
1231
+ self.write("...")
1232
+ else:
1233
+ if node.kind == "u":
1234
+ self.write("u")
1235
+ self._write_constant(node.value)
1236
+
1237
+ def visit_List(self, node):
1238
+ with self.delimit("[", "]"):
1239
+ self.interleave(lambda: self.write(", "), self.traverse, node.elts)
1240
+
1241
+ def visit_ListComp(self, node):
1242
+ with self.delimit("[", "]"):
1243
+ self.traverse(node.elt)
1244
+ for gen in node.generators:
1245
+ self.traverse(gen)
1246
+
1247
+ def visit_GeneratorExp(self, node):
1248
+ with self.delimit("(", ")"):
1249
+ self.traverse(node.elt)
1250
+ for gen in node.generators:
1251
+ self.traverse(gen)
1252
+
1253
+ def visit_SetComp(self, node):
1254
+ with self.delimit("{", "}"):
1255
+ self.traverse(node.elt)
1256
+ for gen in node.generators:
1257
+ self.traverse(gen)
1258
+
1259
+ def visit_DictComp(self, node):
1260
+ with self.delimit("{", "}"):
1261
+ self.traverse(node.key)
1262
+ self.write(": ")
1263
+ self.traverse(node.value)
1264
+ for gen in node.generators:
1265
+ self.traverse(gen)
1266
+
1267
+ def visit_comprehension(self, node):
1268
+ if node.is_async:
1269
+ self.write(" async for ")
1270
+ else:
1271
+ self.write(" for ")
1272
+ self.set_precedence(_Precedence.TUPLE, node.target)
1273
+ self.traverse(node.target)
1274
+ self.write(" in ")
1275
+ self.set_precedence(_Precedence.TEST.next(), node.iter, *node.ifs)
1276
+ self.traverse(node.iter)
1277
+ for if_clause in node.ifs:
1278
+ self.write(" if ")
1279
+ self.traverse(if_clause)
1280
+
1281
+ def visit_IfExp(self, node):
1282
+ with self.require_parens(_Precedence.TEST, node):
1283
+ self.set_precedence(_Precedence.TEST.next(), node.body, node.test)
1284
+ self.traverse(node.body)
1285
+ self.write(" if ")
1286
+ self.traverse(node.test)
1287
+ self.write(" else ")
1288
+ self.set_precedence(_Precedence.TEST, node.orelse)
1289
+ self.traverse(node.orelse)
1290
+
1291
+ def visit_Set(self, node):
1292
+ if node.elts:
1293
+ with self.delimit("{", "}"):
1294
+ self.interleave(lambda: self.write(", "), self.traverse, node.elts)
1295
+ else:
1296
+ # `{}` would be interpreted as a dictionary literal, and
1297
+ # `set` might be shadowed. Thus:
1298
+ self.write('{*()}')
1299
+
1300
+ def visit_Dict(self, node):
1301
+ def write_key_value_pair(k, v):
1302
+ self.traverse(k)
1303
+ self.write(": ")
1304
+ self.traverse(v)
1305
+
1306
+ def write_item(item):
1307
+ k, v = item
1308
+ if k is None:
1309
+ # for dictionary unpacking operator in dicts {**{'y': 2}}
1310
+ # see PEP 448 for details
1311
+ self.write("**")
1312
+ self.set_precedence(_Precedence.EXPR, v)
1313
+ self.traverse(v)
1314
+ else:
1315
+ write_key_value_pair(k, v)
1316
+
1317
+ with self.delimit("{", "}"):
1318
+ self.interleave(
1319
+ lambda: self.write(", "), write_item, zip(node.keys, node.values)
1320
+ )
1321
+
1322
+ def visit_Tuple(self, node):
1323
+ with self.delimit("(", ")"):
1324
+ self.items_view(self.traverse, node.elts)
1325
+
1326
+ unop = {"Invert": "~", "Not": "not", "UAdd": "+", "USub": "-"}
1327
+ unop_precedence = {
1328
+ "not": _Precedence.NOT,
1329
+ "~": _Precedence.FACTOR,
1330
+ "+": _Precedence.FACTOR,
1331
+ "-": _Precedence.FACTOR,
1332
+ }
1333
+
1334
+ def visit_UnaryOp(self, node):
1335
+ operator = self.unop[node.op.__class__.__name__]
1336
+ operator_precedence = self.unop_precedence[operator]
1337
+ with self.require_parens(operator_precedence, node):
1338
+ self.write(operator)
1339
+ # factor prefixes (+, -, ~) shouldn't be seperated
1340
+ # from the value they belong, (e.g: +1 instead of + 1)
1341
+ if operator_precedence is not _Precedence.FACTOR:
1342
+ self.write(" ")
1343
+ self.set_precedence(operator_precedence, node.operand)
1344
+ self.traverse(node.operand)
1345
+
1346
+ binop = {
1347
+ "Add": "+",
1348
+ "Sub": "-",
1349
+ "Mult": "*",
1350
+ "MatMult": "@",
1351
+ "Div": "/",
1352
+ "Mod": "%",
1353
+ "LShift": "<<",
1354
+ "RShift": ">>",
1355
+ "BitOr": "|",
1356
+ "BitXor": "^",
1357
+ "BitAnd": "&",
1358
+ "FloorDiv": "//",
1359
+ "Pow": "**",
1360
+ }
1361
+
1362
+ binop_precedence = {
1363
+ "+": _Precedence.ARITH,
1364
+ "-": _Precedence.ARITH,
1365
+ "*": _Precedence.TERM,
1366
+ "@": _Precedence.TERM,
1367
+ "/": _Precedence.TERM,
1368
+ "%": _Precedence.TERM,
1369
+ "<<": _Precedence.SHIFT,
1370
+ ">>": _Precedence.SHIFT,
1371
+ "|": _Precedence.BOR,
1372
+ "^": _Precedence.BXOR,
1373
+ "&": _Precedence.BAND,
1374
+ "//": _Precedence.TERM,
1375
+ "**": _Precedence.POWER,
1376
+ }
1377
+
1378
+ binop_rassoc = frozenset(("**",))
1379
+ def visit_BinOp(self, node):
1380
+ operator = self.binop[node.op.__class__.__name__]
1381
+ operator_precedence = self.binop_precedence[operator]
1382
+ with self.require_parens(operator_precedence, node):
1383
+ if operator in self.binop_rassoc:
1384
+ left_precedence = operator_precedence.next()
1385
+ right_precedence = operator_precedence
1386
+ else:
1387
+ left_precedence = operator_precedence
1388
+ right_precedence = operator_precedence.next()
1389
+
1390
+ self.set_precedence(left_precedence, node.left)
1391
+ self.traverse(node.left)
1392
+ self.write(f" {operator} ")
1393
+ self.set_precedence(right_precedence, node.right)
1394
+ self.traverse(node.right)
1395
+
1396
+ cmpops = {
1397
+ "Eq": "==",
1398
+ "NotEq": "!=",
1399
+ "Lt": "<",
1400
+ "LtE": "<=",
1401
+ "Gt": ">",
1402
+ "GtE": ">=",
1403
+ "Is": "is",
1404
+ "IsNot": "is not",
1405
+ "In": "in",
1406
+ "NotIn": "not in",
1407
+ }
1408
+
1409
+ def visit_Compare(self, node):
1410
+ with self.require_parens(_Precedence.CMP, node):
1411
+ self.set_precedence(_Precedence.CMP.next(), node.left, *node.comparators)
1412
+ self.traverse(node.left)
1413
+ for o, e in zip(node.ops, node.comparators):
1414
+ self.write(" " + self.cmpops[o.__class__.__name__] + " ")
1415
+ self.traverse(e)
1416
+
1417
+ boolops = {"And": "and", "Or": "or"}
1418
+ boolop_precedence = {"and": _Precedence.AND, "or": _Precedence.OR}
1419
+
1420
+ def visit_BoolOp(self, node):
1421
+ operator = self.boolops[node.op.__class__.__name__]
1422
+ operator_precedence = self.boolop_precedence[operator]
1423
+
1424
+ def increasing_level_traverse(node):
1425
+ nonlocal operator_precedence
1426
+ operator_precedence = operator_precedence.next()
1427
+ self.set_precedence(operator_precedence, node)
1428
+ self.traverse(node)
1429
+
1430
+ with self.require_parens(operator_precedence, node):
1431
+ s = f" {operator} "
1432
+ self.interleave(lambda: self.write(s), increasing_level_traverse, node.values)
1433
+
1434
+ def visit_Attribute(self, node):
1435
+ self.set_precedence(_Precedence.ATOM, node.value)
1436
+ self.traverse(node.value)
1437
+ # Special case: 3.__abs__() is a syntax error, so if node.value
1438
+ # is an integer literal then we need to either parenthesize
1439
+ # it or add an extra space to get 3 .__abs__().
1440
+ if isinstance(node.value, Constant) and isinstance(node.value.value, int):
1441
+ self.write(" ")
1442
+ self.write(".")
1443
+ self.write(node.attr)
1444
+
1445
+ def visit_Call(self, node):
1446
+ self.set_precedence(_Precedence.ATOM, node.func)
1447
+ self.traverse(node.func)
1448
+ with self.delimit("(", ")"):
1449
+ comma = False
1450
+ for e in node.args:
1451
+ if comma:
1452
+ self.write(", ")
1453
+ else:
1454
+ comma = True
1455
+ self.traverse(e)
1456
+ for e in node.keywords:
1457
+ if comma:
1458
+ self.write(", ")
1459
+ else:
1460
+ comma = True
1461
+ self.traverse(e)
1462
+
1463
+ def visit_Subscript(self, node):
1464
+ def is_simple_tuple(slice_value):
1465
+ # when unparsing a non-empty tuple, the parentheses can be safely
1466
+ # omitted if there aren't any elements that explicitly requires
1467
+ # parentheses (such as starred expressions).
1468
+ return (
1469
+ isinstance(slice_value, Tuple)
1470
+ and slice_value.elts
1471
+ and not any(isinstance(elt, Starred) for elt in slice_value.elts)
1472
+ )
1473
+
1474
+ self.set_precedence(_Precedence.ATOM, node.value)
1475
+ self.traverse(node.value)
1476
+ with self.delimit("[", "]"):
1477
+ if is_simple_tuple(node.slice):
1478
+ self.items_view(self.traverse, node.slice.elts)
1479
+ else:
1480
+ self.traverse(node.slice)
1481
+
1482
+ def visit_Starred(self, node):
1483
+ self.write("*")
1484
+ self.set_precedence(_Precedence.EXPR, node.value)
1485
+ self.traverse(node.value)
1486
+
1487
+ def visit_Ellipsis(self, node):
1488
+ self.write("...")
1489
+
1490
+ def visit_Slice(self, node):
1491
+ if node.lower:
1492
+ self.traverse(node.lower)
1493
+ self.write(":")
1494
+ if node.upper:
1495
+ self.traverse(node.upper)
1496
+ if node.step:
1497
+ self.write(":")
1498
+ self.traverse(node.step)
1499
+
1500
+ def visit_Match(self, node):
1501
+ self.fill("match ")
1502
+ self.traverse(node.subject)
1503
+ with self.block():
1504
+ for case in node.cases:
1505
+ self.traverse(case)
1506
+
1507
+ def visit_arg(self, node):
1508
+ self.write(node.arg)
1509
+ if node.annotation:
1510
+ self.write(": ")
1511
+ self.traverse(node.annotation)
1512
+
1513
+ def visit_arguments(self, node):
1514
+ first = True
1515
+ # normal arguments
1516
+ all_args = node.posonlyargs + node.args
1517
+ defaults = [None] * (len(all_args) - len(node.defaults)) + node.defaults
1518
+ for index, elements in enumerate(zip(all_args, defaults), 1):
1519
+ a, d = elements
1520
+ if first:
1521
+ first = False
1522
+ else:
1523
+ self.write(", ")
1524
+ self.traverse(a)
1525
+ if d:
1526
+ self.write("=")
1527
+ self.traverse(d)
1528
+ if index == len(node.posonlyargs):
1529
+ self.write(", /")
1530
+
1531
+ # varargs, or bare '*' if no varargs but keyword-only arguments present
1532
+ if node.vararg or node.kwonlyargs:
1533
+ if first:
1534
+ first = False
1535
+ else:
1536
+ self.write(", ")
1537
+ self.write("*")
1538
+ if node.vararg:
1539
+ self.write(node.vararg.arg)
1540
+ if node.vararg.annotation:
1541
+ self.write(": ")
1542
+ self.traverse(node.vararg.annotation)
1543
+
1544
+ # keyword-only arguments
1545
+ if node.kwonlyargs:
1546
+ for a, d in zip(node.kwonlyargs, node.kw_defaults):
1547
+ self.write(", ")
1548
+ self.traverse(a)
1549
+ if d:
1550
+ self.write("=")
1551
+ self.traverse(d)
1552
+
1553
+ # kwargs
1554
+ if node.kwarg:
1555
+ if first:
1556
+ first = False
1557
+ else:
1558
+ self.write(", ")
1559
+ self.write("**" + node.kwarg.arg)
1560
+ if node.kwarg.annotation:
1561
+ self.write(": ")
1562
+ self.traverse(node.kwarg.annotation)
1563
+
1564
+ def visit_keyword(self, node):
1565
+ if node.arg is None:
1566
+ self.write("**")
1567
+ else:
1568
+ self.write(node.arg)
1569
+ self.write("=")
1570
+ self.traverse(node.value)
1571
+
1572
+ def visit_Lambda(self, node):
1573
+ with self.require_parens(_Precedence.TEST, node):
1574
+ self.write("lambda ")
1575
+ self.traverse(node.args)
1576
+ self.write(": ")
1577
+ self.set_precedence(_Precedence.TEST, node.body)
1578
+ self.traverse(node.body)
1579
+
1580
+ def visit_alias(self, node):
1581
+ self.write(node.name)
1582
+ if node.asname:
1583
+ self.write(" as " + node.asname)
1584
+
1585
+ def visit_withitem(self, node):
1586
+ self.traverse(node.context_expr)
1587
+ if node.optional_vars:
1588
+ self.write(" as ")
1589
+ self.traverse(node.optional_vars)
1590
+
1591
+ def visit_match_case(self, node):
1592
+ self.fill("case ")
1593
+ self.traverse(node.pattern)
1594
+ if node.guard:
1595
+ self.write(" if ")
1596
+ self.traverse(node.guard)
1597
+ with self.block():
1598
+ self.traverse(node.body)
1599
+
1600
+ def visit_MatchValue(self, node):
1601
+ self.traverse(node.value)
1602
+
1603
+ def visit_MatchSingleton(self, node):
1604
+ self._write_constant(node.value)
1605
+
1606
+ def visit_MatchSequence(self, node):
1607
+ with self.delimit("[", "]"):
1608
+ self.interleave(
1609
+ lambda: self.write(", "), self.traverse, node.patterns
1610
+ )
1611
+
1612
+ def visit_MatchStar(self, node):
1613
+ name = node.name
1614
+ if name is None:
1615
+ name = "_"
1616
+ self.write(f"*{name}")
1617
+
1618
+ def visit_MatchMapping(self, node):
1619
+ def write_key_pattern_pair(pair):
1620
+ k, p = pair
1621
+ self.traverse(k)
1622
+ self.write(": ")
1623
+ self.traverse(p)
1624
+
1625
+ with self.delimit("{", "}"):
1626
+ keys = node.keys
1627
+ self.interleave(
1628
+ lambda: self.write(", "),
1629
+ write_key_pattern_pair,
1630
+ zip(keys, node.patterns, strict=True),
1631
+ )
1632
+ rest = node.rest
1633
+ if rest is not None:
1634
+ if keys:
1635
+ self.write(", ")
1636
+ self.write(f"**{rest}")
1637
+
1638
+ def visit_MatchClass(self, node):
1639
+ self.set_precedence(_Precedence.ATOM, node.cls)
1640
+ self.traverse(node.cls)
1641
+ with self.delimit("(", ")"):
1642
+ patterns = node.patterns
1643
+ self.interleave(
1644
+ lambda: self.write(", "), self.traverse, patterns
1645
+ )
1646
+ attrs = node.kwd_attrs
1647
+ if attrs:
1648
+ def write_attr_pattern(pair):
1649
+ attr, pattern = pair
1650
+ self.write(f"{attr}=")
1651
+ self.traverse(pattern)
1652
+
1653
+ if patterns:
1654
+ self.write(", ")
1655
+ self.interleave(
1656
+ lambda: self.write(", "),
1657
+ write_attr_pattern,
1658
+ zip(attrs, node.kwd_patterns, strict=True),
1659
+ )
1660
+
1661
+ def visit_MatchAs(self, node):
1662
+ name = node.name
1663
+ pattern = node.pattern
1664
+ if name is None:
1665
+ self.write("_")
1666
+ elif pattern is None:
1667
+ self.write(node.name)
1668
+ else:
1669
+ with self.require_parens(_Precedence.TEST, node):
1670
+ self.set_precedence(_Precedence.BOR, node.pattern)
1671
+ self.traverse(node.pattern)
1672
+ self.write(f" as {node.name}")
1673
+
1674
+ def visit_MatchOr(self, node):
1675
+ with self.require_parens(_Precedence.BOR, node):
1676
+ self.set_precedence(_Precedence.BOR.next(), *node.patterns)
1677
+ self.interleave(lambda: self.write(" | "), self.traverse, node.patterns)
1678
+
1679
+ def unparse(ast_obj):
1680
+ unparser = _Unparser()
1681
+ return unparser.visit(ast_obj)
1682
+
1683
+
1684
+ def main():
1685
+ import argparse
1686
+
1687
+ parser = argparse.ArgumentParser(prog='python -m ast')
1688
+ parser.add_argument('infile', type=argparse.FileType(mode='rb'), nargs='?',
1689
+ default='-',
1690
+ help='the file to parse; defaults to stdin')
1691
+ parser.add_argument('-m', '--mode', default='exec',
1692
+ choices=('exec', 'single', 'eval', 'func_type'),
1693
+ help='specify what kind of code must be parsed')
1694
+ parser.add_argument('--no-type-comments', default=True, action='store_false',
1695
+ help="don't add information about type comments")
1696
+ parser.add_argument('-a', '--include-attributes', action='store_true',
1697
+ help='include attributes such as line numbers and '
1698
+ 'column offsets')
1699
+ parser.add_argument('-i', '--indent', type=int, default=3,
1700
+ help='indentation of nodes (number of spaces)')
1701
+ args = parser.parse_args()
1702
+
1703
+ with args.infile as infile:
1704
+ source = infile.read()
1705
+ tree = parse(source, args.infile.name, args.mode, type_comments=args.no_type_comments)
1706
+ print(dump(tree, include_attributes=args.include_attributes, indent=args.indent))
1707
+
1708
+ if __name__ == '__main__':
1709
+ main()
python310/asynchat.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- Mode: Python; tab-width: 4 -*-
2
+ # Id: asynchat.py,v 2.26 2000/09/07 22:29:26 rushing Exp
3
+ # Author: Sam Rushing <rushing@nightmare.com>
4
+
5
+ # ======================================================================
6
+ # Copyright 1996 by Sam Rushing
7
+ #
8
+ # All Rights Reserved
9
+ #
10
+ # Permission to use, copy, modify, and distribute this software and
11
+ # its documentation for any purpose and without fee is hereby
12
+ # granted, provided that the above copyright notice appear in all
13
+ # copies and that both that copyright notice and this permission
14
+ # notice appear in supporting documentation, and that the name of Sam
15
+ # Rushing not be used in advertising or publicity pertaining to
16
+ # distribution of the software without specific, written prior
17
+ # permission.
18
+ #
19
+ # SAM RUSHING DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
20
+ # INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
21
+ # NO EVENT SHALL SAM RUSHING BE LIABLE FOR ANY SPECIAL, INDIRECT OR
22
+ # CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
23
+ # OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
24
+ # NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
25
+ # CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
26
+ # ======================================================================
27
+
28
+ r"""A class supporting chat-style (command/response) protocols.
29
+
30
+ This class adds support for 'chat' style protocols - where one side
31
+ sends a 'command', and the other sends a response (examples would be
32
+ the common internet protocols - smtp, nntp, ftp, etc..).
33
+
34
+ The handle_read() method looks at the input stream for the current
35
+ 'terminator' (usually '\r\n' for single-line responses, '\r\n.\r\n'
36
+ for multi-line output), calling self.found_terminator() on its
37
+ receipt.
38
+
39
+ for example:
40
+ Say you build an async nntp client using this class. At the start
41
+ of the connection, you'll have self.terminator set to '\r\n', in
42
+ order to process the single-line greeting. Just before issuing a
43
+ 'LIST' command you'll set it to '\r\n.\r\n'. The output of the LIST
44
+ command will be accumulated (using your own 'collect_incoming_data'
45
+ method) up to the terminator, and then control will be returned to
46
+ you - by calling your self.found_terminator() method.
47
+ """
48
+ import asyncore
49
+ from collections import deque
50
+
51
+ from warnings import warn
52
+ warn(
53
+ 'The asynchat module is deprecated and will be removed in Python 3.12. '
54
+ 'The recommended replacement is asyncio',
55
+ DeprecationWarning,
56
+ stacklevel=2)
57
+
58
+
59
+
60
+ class async_chat(asyncore.dispatcher):
61
+ """This is an abstract class. You must derive from this class, and add
62
+ the two methods collect_incoming_data() and found_terminator()"""
63
+
64
+ # these are overridable defaults
65
+
66
+ ac_in_buffer_size = 65536
67
+ ac_out_buffer_size = 65536
68
+
69
+ # we don't want to enable the use of encoding by default, because that is a
70
+ # sign of an application bug that we don't want to pass silently
71
+
72
+ use_encoding = 0
73
+ encoding = 'latin-1'
74
+
75
+ def __init__(self, sock=None, map=None):
76
+ # for string terminator matching
77
+ self.ac_in_buffer = b''
78
+
79
+ # we use a list here rather than io.BytesIO for a few reasons...
80
+ # del lst[:] is faster than bio.truncate(0)
81
+ # lst = [] is faster than bio.truncate(0)
82
+ self.incoming = []
83
+
84
+ # we toss the use of the "simple producer" and replace it with
85
+ # a pure deque, which the original fifo was a wrapping of
86
+ self.producer_fifo = deque()
87
+ asyncore.dispatcher.__init__(self, sock, map)
88
+
89
+ def collect_incoming_data(self, data):
90
+ raise NotImplementedError("must be implemented in subclass")
91
+
92
+ def _collect_incoming_data(self, data):
93
+ self.incoming.append(data)
94
+
95
+ def _get_data(self):
96
+ d = b''.join(self.incoming)
97
+ del self.incoming[:]
98
+ return d
99
+
100
+ def found_terminator(self):
101
+ raise NotImplementedError("must be implemented in subclass")
102
+
103
+ def set_terminator(self, term):
104
+ """Set the input delimiter.
105
+
106
+ Can be a fixed string of any length, an integer, or None.
107
+ """
108
+ if isinstance(term, str) and self.use_encoding:
109
+ term = bytes(term, self.encoding)
110
+ elif isinstance(term, int) and term < 0:
111
+ raise ValueError('the number of received bytes must be positive')
112
+ self.terminator = term
113
+
114
+ def get_terminator(self):
115
+ return self.terminator
116
+
117
+ # grab some more data from the socket,
118
+ # throw it to the collector method,
119
+ # check for the terminator,
120
+ # if found, transition to the next state.
121
+
122
+ def handle_read(self):
123
+
124
+ try:
125
+ data = self.recv(self.ac_in_buffer_size)
126
+ except BlockingIOError:
127
+ return
128
+ except OSError:
129
+ self.handle_error()
130
+ return
131
+
132
+ if isinstance(data, str) and self.use_encoding:
133
+ data = bytes(str, self.encoding)
134
+ self.ac_in_buffer = self.ac_in_buffer + data
135
+
136
+ # Continue to search for self.terminator in self.ac_in_buffer,
137
+ # while calling self.collect_incoming_data. The while loop
138
+ # is necessary because we might read several data+terminator
139
+ # combos with a single recv(4096).
140
+
141
+ while self.ac_in_buffer:
142
+ lb = len(self.ac_in_buffer)
143
+ terminator = self.get_terminator()
144
+ if not terminator:
145
+ # no terminator, collect it all
146
+ self.collect_incoming_data(self.ac_in_buffer)
147
+ self.ac_in_buffer = b''
148
+ elif isinstance(terminator, int):
149
+ # numeric terminator
150
+ n = terminator
151
+ if lb < n:
152
+ self.collect_incoming_data(self.ac_in_buffer)
153
+ self.ac_in_buffer = b''
154
+ self.terminator = self.terminator - lb
155
+ else:
156
+ self.collect_incoming_data(self.ac_in_buffer[:n])
157
+ self.ac_in_buffer = self.ac_in_buffer[n:]
158
+ self.terminator = 0
159
+ self.found_terminator()
160
+ else:
161
+ # 3 cases:
162
+ # 1) end of buffer matches terminator exactly:
163
+ # collect data, transition
164
+ # 2) end of buffer matches some prefix:
165
+ # collect data to the prefix
166
+ # 3) end of buffer does not match any prefix:
167
+ # collect data
168
+ terminator_len = len(terminator)
169
+ index = self.ac_in_buffer.find(terminator)
170
+ if index != -1:
171
+ # we found the terminator
172
+ if index > 0:
173
+ # don't bother reporting the empty string
174
+ # (source of subtle bugs)
175
+ self.collect_incoming_data(self.ac_in_buffer[:index])
176
+ self.ac_in_buffer = self.ac_in_buffer[index+terminator_len:]
177
+ # This does the Right Thing if the terminator
178
+ # is changed here.
179
+ self.found_terminator()
180
+ else:
181
+ # check for a prefix of the terminator
182
+ index = find_prefix_at_end(self.ac_in_buffer, terminator)
183
+ if index:
184
+ if index != lb:
185
+ # we found a prefix, collect up to the prefix
186
+ self.collect_incoming_data(self.ac_in_buffer[:-index])
187
+ self.ac_in_buffer = self.ac_in_buffer[-index:]
188
+ break
189
+ else:
190
+ # no prefix, collect it all
191
+ self.collect_incoming_data(self.ac_in_buffer)
192
+ self.ac_in_buffer = b''
193
+
194
+ def handle_write(self):
195
+ self.initiate_send()
196
+
197
+ def handle_close(self):
198
+ self.close()
199
+
200
+ def push(self, data):
201
+ if not isinstance(data, (bytes, bytearray, memoryview)):
202
+ raise TypeError('data argument must be byte-ish (%r)',
203
+ type(data))
204
+ sabs = self.ac_out_buffer_size
205
+ if len(data) > sabs:
206
+ for i in range(0, len(data), sabs):
207
+ self.producer_fifo.append(data[i:i+sabs])
208
+ else:
209
+ self.producer_fifo.append(data)
210
+ self.initiate_send()
211
+
212
+ def push_with_producer(self, producer):
213
+ self.producer_fifo.append(producer)
214
+ self.initiate_send()
215
+
216
+ def readable(self):
217
+ "predicate for inclusion in the readable for select()"
218
+ # cannot use the old predicate, it violates the claim of the
219
+ # set_terminator method.
220
+
221
+ # return (len(self.ac_in_buffer) <= self.ac_in_buffer_size)
222
+ return 1
223
+
224
+ def writable(self):
225
+ "predicate for inclusion in the writable for select()"
226
+ return self.producer_fifo or (not self.connected)
227
+
228
+ def close_when_done(self):
229
+ "automatically close this channel once the outgoing queue is empty"
230
+ self.producer_fifo.append(None)
231
+
232
+ def initiate_send(self):
233
+ while self.producer_fifo and self.connected:
234
+ first = self.producer_fifo[0]
235
+ # handle empty string/buffer or None entry
236
+ if not first:
237
+ del self.producer_fifo[0]
238
+ if first is None:
239
+ self.handle_close()
240
+ return
241
+
242
+ # handle classic producer behavior
243
+ obs = self.ac_out_buffer_size
244
+ try:
245
+ data = first[:obs]
246
+ except TypeError:
247
+ data = first.more()
248
+ if data:
249
+ self.producer_fifo.appendleft(data)
250
+ else:
251
+ del self.producer_fifo[0]
252
+ continue
253
+
254
+ if isinstance(data, str) and self.use_encoding:
255
+ data = bytes(data, self.encoding)
256
+
257
+ # send the data
258
+ try:
259
+ num_sent = self.send(data)
260
+ except OSError:
261
+ self.handle_error()
262
+ return
263
+
264
+ if num_sent:
265
+ if num_sent < len(data) or obs < len(first):
266
+ self.producer_fifo[0] = first[num_sent:]
267
+ else:
268
+ del self.producer_fifo[0]
269
+ # we tried to send some actual data
270
+ return
271
+
272
+ def discard_buffers(self):
273
+ # Emergencies only!
274
+ self.ac_in_buffer = b''
275
+ del self.incoming[:]
276
+ self.producer_fifo.clear()
277
+
278
+
279
+ class simple_producer:
280
+
281
+ def __init__(self, data, buffer_size=512):
282
+ self.data = data
283
+ self.buffer_size = buffer_size
284
+
285
+ def more(self):
286
+ if len(self.data) > self.buffer_size:
287
+ result = self.data[:self.buffer_size]
288
+ self.data = self.data[self.buffer_size:]
289
+ return result
290
+ else:
291
+ result = self.data
292
+ self.data = b''
293
+ return result
294
+
295
+
296
+ # Given 'haystack', see if any prefix of 'needle' is at its end. This
297
+ # assumes an exact match has already been checked. Return the number of
298
+ # characters matched.
299
+ # for example:
300
+ # f_p_a_e("qwerty\r", "\r\n") => 1
301
+ # f_p_a_e("qwertydkjf", "\r\n") => 0
302
+ # f_p_a_e("qwerty\r\n", "\r\n") => <undefined>
303
+
304
+ # this could maybe be made faster with a computed regex?
305
+ # [answer: no; circa Python-2.0, Jan 2001]
306
+ # new python: 28961/s
307
+ # old python: 18307/s
308
+ # re: 12820/s
309
+ # regex: 14035/s
310
+
311
+ def find_prefix_at_end(haystack, needle):
312
+ l = len(needle) - 1
313
+ while l and not haystack.endswith(needle[:l]):
314
+ l -= 1
315
+ return l
python310/asyncio/__init__.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """The asyncio package, tracking PEP 3156."""
2
+
3
+ # flake8: noqa
4
+
5
+ import sys
6
+
7
+ # This relies on each of the submodules having an __all__ variable.
8
+ from .base_events import *
9
+ from .coroutines import *
10
+ from .events import *
11
+ from .exceptions import *
12
+ from .futures import *
13
+ from .locks import *
14
+ from .protocols import *
15
+ from .runners import *
16
+ from .queues import *
17
+ from .streams import *
18
+ from .subprocess import *
19
+ from .tasks import *
20
+ from .threads import *
21
+ from .transports import *
22
+
23
+ __all__ = (base_events.__all__ +
24
+ coroutines.__all__ +
25
+ events.__all__ +
26
+ exceptions.__all__ +
27
+ futures.__all__ +
28
+ locks.__all__ +
29
+ protocols.__all__ +
30
+ runners.__all__ +
31
+ queues.__all__ +
32
+ streams.__all__ +
33
+ subprocess.__all__ +
34
+ tasks.__all__ +
35
+ threads.__all__ +
36
+ transports.__all__)
37
+
38
+ if sys.platform == 'win32': # pragma: no cover
39
+ from .windows_events import *
40
+ __all__ += windows_events.__all__
41
+ else:
42
+ from .unix_events import * # pragma: no cover
43
+ __all__ += unix_events.__all__
python310/asyncio/__main__.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import ast
2
+ import asyncio
3
+ import code
4
+ import concurrent.futures
5
+ import inspect
6
+ import sys
7
+ import threading
8
+ import types
9
+ import warnings
10
+
11
+ from . import futures
12
+
13
+
14
+ class AsyncIOInteractiveConsole(code.InteractiveConsole):
15
+
16
+ def __init__(self, locals, loop):
17
+ super().__init__(locals)
18
+ self.compile.compiler.flags |= ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
19
+
20
+ self.loop = loop
21
+
22
+ def runcode(self, code):
23
+ future = concurrent.futures.Future()
24
+
25
+ def callback():
26
+ global repl_future
27
+ global repl_future_interrupted
28
+
29
+ repl_future = None
30
+ repl_future_interrupted = False
31
+
32
+ func = types.FunctionType(code, self.locals)
33
+ try:
34
+ coro = func()
35
+ except SystemExit:
36
+ raise
37
+ except KeyboardInterrupt as ex:
38
+ repl_future_interrupted = True
39
+ future.set_exception(ex)
40
+ return
41
+ except BaseException as ex:
42
+ future.set_exception(ex)
43
+ return
44
+
45
+ if not inspect.iscoroutine(coro):
46
+ future.set_result(coro)
47
+ return
48
+
49
+ try:
50
+ repl_future = self.loop.create_task(coro)
51
+ futures._chain_future(repl_future, future)
52
+ except BaseException as exc:
53
+ future.set_exception(exc)
54
+
55
+ loop.call_soon_threadsafe(callback)
56
+
57
+ try:
58
+ return future.result()
59
+ except SystemExit:
60
+ raise
61
+ except BaseException:
62
+ if repl_future_interrupted:
63
+ self.write("\nKeyboardInterrupt\n")
64
+ else:
65
+ self.showtraceback()
66
+
67
+
68
+ class REPLThread(threading.Thread):
69
+
70
+ def run(self):
71
+ try:
72
+ banner = (
73
+ f'asyncio REPL {sys.version} on {sys.platform}\n'
74
+ f'Use "await" directly instead of "asyncio.run()".\n'
75
+ f'Type "help", "copyright", "credits" or "license" '
76
+ f'for more information.\n'
77
+ f'{getattr(sys, "ps1", ">>> ")}import asyncio'
78
+ )
79
+
80
+ console.interact(
81
+ banner=banner,
82
+ exitmsg='exiting asyncio REPL...')
83
+ finally:
84
+ warnings.filterwarnings(
85
+ 'ignore',
86
+ message=r'^coroutine .* was never awaited$',
87
+ category=RuntimeWarning)
88
+
89
+ loop.call_soon_threadsafe(loop.stop)
90
+
91
+
92
+ if __name__ == '__main__':
93
+ loop = asyncio.new_event_loop()
94
+ asyncio.set_event_loop(loop)
95
+
96
+ repl_locals = {'asyncio': asyncio}
97
+ for key in {'__name__', '__package__',
98
+ '__loader__', '__spec__',
99
+ '__builtins__', '__file__'}:
100
+ repl_locals[key] = locals()[key]
101
+
102
+ console = AsyncIOInteractiveConsole(repl_locals, loop)
103
+
104
+ repl_future = None
105
+ repl_future_interrupted = False
106
+
107
+ try:
108
+ import readline # NoQA
109
+ except ImportError:
110
+ pass
111
+
112
+ repl_thread = REPLThread()
113
+ repl_thread.daemon = True
114
+ repl_thread.start()
115
+
116
+ while True:
117
+ try:
118
+ loop.run_forever()
119
+ except KeyboardInterrupt:
120
+ if repl_future and not repl_future.done():
121
+ repl_future.cancel()
122
+ repl_future_interrupted = True
123
+ continue
124
+ else:
125
+ break
python310/asyncio/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (744 Bytes). View file
 
python310/asyncio/__pycache__/__main__.cpython-310.pyc ADDED
Binary file (3.17 kB). View file
 
python310/asyncio/__pycache__/base_events.cpython-310.pyc ADDED
Binary file (51.9 kB). View file
 
python310/asyncio/__pycache__/base_futures.cpython-310.pyc ADDED
Binary file (1.91 kB). View file
 
python310/asyncio/__pycache__/base_subprocess.cpython-310.pyc ADDED
Binary file (9.39 kB). View file
 
python310/asyncio/__pycache__/base_tasks.cpython-310.pyc ADDED
Binary file (1.98 kB). View file
 
python310/asyncio/__pycache__/constants.cpython-310.pyc ADDED
Binary file (586 Bytes). View file
 
python310/asyncio/__pycache__/coroutines.cpython-310.pyc ADDED
Binary file (6.62 kB). View file
 
python310/asyncio/__pycache__/events.cpython-310.pyc ADDED
Binary file (28.3 kB). View file
 
python310/asyncio/__pycache__/exceptions.cpython-310.pyc ADDED
Binary file (2.47 kB). View file
 
python310/asyncio/__pycache__/format_helpers.cpython-310.pyc ADDED
Binary file (2.34 kB). View file
 
python310/asyncio/__pycache__/futures.cpython-310.pyc ADDED
Binary file (11.9 kB). View file
 
python310/asyncio/__pycache__/locks.cpython-310.pyc ADDED
Binary file (14.2 kB). View file
 
python310/asyncio/__pycache__/log.cpython-310.pyc ADDED
Binary file (229 Bytes). View file
 
python310/asyncio/__pycache__/mixins.cpython-310.pyc ADDED
Binary file (1.08 kB). View file
 
python310/asyncio/__pycache__/proactor_events.cpython-310.pyc ADDED
Binary file (24.7 kB). View file
 
python310/asyncio/__pycache__/protocols.cpython-310.pyc ADDED
Binary file (8.3 kB). View file
 
python310/asyncio/__pycache__/queues.cpython-310.pyc ADDED
Binary file (8.3 kB). View file
 
python310/asyncio/__pycache__/runners.cpython-310.pyc ADDED
Binary file (2.09 kB). View file
 
python310/asyncio/__pycache__/selector_events.cpython-310.pyc ADDED
Binary file (29.6 kB). View file
 
python310/asyncio/__pycache__/sslproto.cpython-310.pyc ADDED
Binary file (22 kB). View file
 
python310/asyncio/__pycache__/staggered.cpython-310.pyc ADDED
Binary file (4.18 kB). View file
 
python310/asyncio/__pycache__/streams.cpython-310.pyc ADDED
Binary file (20.4 kB). View file
 
python310/asyncio/__pycache__/subprocess.cpython-310.pyc ADDED
Binary file (7.06 kB). View file
 
python310/asyncio/__pycache__/tasks.cpython-310.pyc ADDED
Binary file (24 kB). View file
 
python310/asyncio/__pycache__/threads.cpython-310.pyc ADDED
Binary file (985 Bytes). View file
 
python310/asyncio/__pycache__/transports.cpython-310.pyc ADDED
Binary file (12.3 kB). View file
 
python310/asyncio/__pycache__/trsock.cpython-310.pyc ADDED
Binary file (7.84 kB). View file
 
python310/asyncio/__pycache__/unix_events.cpython-310.pyc ADDED
Binary file (41.7 kB). View file
 
python310/asyncio/__pycache__/windows_events.cpython-310.pyc ADDED
Binary file (24.1 kB). View file
 
python310/asyncio/__pycache__/windows_utils.cpython-310.pyc ADDED
Binary file (4.5 kB). View file
 
python310/asyncio/base_events.py ADDED
@@ -0,0 +1,1934 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Base implementation of event loop.
2
+
3
+ The event loop can be broken up into a multiplexer (the part
4
+ responsible for notifying us of I/O events) and the event loop proper,
5
+ which wraps a multiplexer with functionality for scheduling callbacks,
6
+ immediately or at a given time in the future.
7
+
8
+ Whenever a public API takes a callback, subsequent positional
9
+ arguments will be passed to the callback if/when it is called. This
10
+ avoids the proliferation of trivial lambdas implementing closures.
11
+ Keyword arguments for the callback are not supported; this is a
12
+ conscious design decision, leaving the door open for keyword arguments
13
+ to modify the meaning of the API call itself.
14
+ """
15
+
16
+ import collections
17
+ import collections.abc
18
+ import concurrent.futures
19
+ import functools
20
+ import heapq
21
+ import itertools
22
+ import os
23
+ import socket
24
+ import stat
25
+ import subprocess
26
+ import threading
27
+ import time
28
+ import traceback
29
+ import sys
30
+ import warnings
31
+ import weakref
32
+
33
+ try:
34
+ import ssl
35
+ except ImportError: # pragma: no cover
36
+ ssl = None
37
+
38
+ from . import constants
39
+ from . import coroutines
40
+ from . import events
41
+ from . import exceptions
42
+ from . import futures
43
+ from . import protocols
44
+ from . import sslproto
45
+ from . import staggered
46
+ from . import tasks
47
+ from . import transports
48
+ from . import trsock
49
+ from .log import logger
50
+
51
+
52
+ __all__ = 'BaseEventLoop','Server',
53
+
54
+
55
+ # Minimum number of _scheduled timer handles before cleanup of
56
+ # cancelled handles is performed.
57
+ _MIN_SCHEDULED_TIMER_HANDLES = 100
58
+
59
+ # Minimum fraction of _scheduled timer handles that are cancelled
60
+ # before cleanup of cancelled handles is performed.
61
+ _MIN_CANCELLED_TIMER_HANDLES_FRACTION = 0.5
62
+
63
+
64
+ _HAS_IPv6 = hasattr(socket, 'AF_INET6')
65
+
66
+ # Maximum timeout passed to select to avoid OS limitations
67
+ MAXIMUM_SELECT_TIMEOUT = 24 * 3600
68
+
69
+ # Used for deprecation and removal of `loop.create_datagram_endpoint()`'s
70
+ # *reuse_address* parameter
71
+ _unset = object()
72
+
73
+
74
+ def _format_handle(handle):
75
+ cb = handle._callback
76
+ if isinstance(getattr(cb, '__self__', None), tasks.Task):
77
+ # format the task
78
+ return repr(cb.__self__)
79
+ else:
80
+ return str(handle)
81
+
82
+
83
+ def _format_pipe(fd):
84
+ if fd == subprocess.PIPE:
85
+ return '<pipe>'
86
+ elif fd == subprocess.STDOUT:
87
+ return '<stdout>'
88
+ else:
89
+ return repr(fd)
90
+
91
+
92
+ def _set_reuseport(sock):
93
+ if not hasattr(socket, 'SO_REUSEPORT'):
94
+ raise ValueError('reuse_port not supported by socket module')
95
+ else:
96
+ try:
97
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
98
+ except OSError:
99
+ raise ValueError('reuse_port not supported by socket module, '
100
+ 'SO_REUSEPORT defined but not implemented.')
101
+
102
+
103
+ def _ipaddr_info(host, port, family, type, proto, flowinfo=0, scopeid=0):
104
+ # Try to skip getaddrinfo if "host" is already an IP. Users might have
105
+ # handled name resolution in their own code and pass in resolved IPs.
106
+ if not hasattr(socket, 'inet_pton'):
107
+ return
108
+
109
+ if proto not in {0, socket.IPPROTO_TCP, socket.IPPROTO_UDP} or \
110
+ host is None:
111
+ return None
112
+
113
+ if type == socket.SOCK_STREAM:
114
+ proto = socket.IPPROTO_TCP
115
+ elif type == socket.SOCK_DGRAM:
116
+ proto = socket.IPPROTO_UDP
117
+ else:
118
+ return None
119
+
120
+ if port is None:
121
+ port = 0
122
+ elif isinstance(port, bytes) and port == b'':
123
+ port = 0
124
+ elif isinstance(port, str) and port == '':
125
+ port = 0
126
+ else:
127
+ # If port's a service name like "http", don't skip getaddrinfo.
128
+ try:
129
+ port = int(port)
130
+ except (TypeError, ValueError):
131
+ return None
132
+
133
+ if family == socket.AF_UNSPEC:
134
+ afs = [socket.AF_INET]
135
+ if _HAS_IPv6:
136
+ afs.append(socket.AF_INET6)
137
+ else:
138
+ afs = [family]
139
+
140
+ if isinstance(host, bytes):
141
+ host = host.decode('idna')
142
+ if '%' in host:
143
+ # Linux's inet_pton doesn't accept an IPv6 zone index after host,
144
+ # like '::1%lo0'.
145
+ return None
146
+
147
+ for af in afs:
148
+ try:
149
+ socket.inet_pton(af, host)
150
+ # The host has already been resolved.
151
+ if _HAS_IPv6 and af == socket.AF_INET6:
152
+ return af, type, proto, '', (host, port, flowinfo, scopeid)
153
+ else:
154
+ return af, type, proto, '', (host, port)
155
+ except OSError:
156
+ pass
157
+
158
+ # "host" is not an IP address.
159
+ return None
160
+
161
+
162
+ def _interleave_addrinfos(addrinfos, first_address_family_count=1):
163
+ """Interleave list of addrinfo tuples by family."""
164
+ # Group addresses by family
165
+ addrinfos_by_family = collections.OrderedDict()
166
+ for addr in addrinfos:
167
+ family = addr[0]
168
+ if family not in addrinfos_by_family:
169
+ addrinfos_by_family[family] = []
170
+ addrinfos_by_family[family].append(addr)
171
+ addrinfos_lists = list(addrinfos_by_family.values())
172
+
173
+ reordered = []
174
+ if first_address_family_count > 1:
175
+ reordered.extend(addrinfos_lists[0][:first_address_family_count - 1])
176
+ del addrinfos_lists[0][:first_address_family_count - 1]
177
+ reordered.extend(
178
+ a for a in itertools.chain.from_iterable(
179
+ itertools.zip_longest(*addrinfos_lists)
180
+ ) if a is not None)
181
+ return reordered
182
+
183
+
184
+ def _run_until_complete_cb(fut):
185
+ if not fut.cancelled():
186
+ exc = fut.exception()
187
+ if isinstance(exc, (SystemExit, KeyboardInterrupt)):
188
+ # Issue #22429: run_forever() already finished, no need to
189
+ # stop it.
190
+ return
191
+ futures._get_loop(fut).stop()
192
+
193
+
194
+ if hasattr(socket, 'TCP_NODELAY'):
195
+ def _set_nodelay(sock):
196
+ if (sock.family in {socket.AF_INET, socket.AF_INET6} and
197
+ sock.type == socket.SOCK_STREAM and
198
+ sock.proto == socket.IPPROTO_TCP):
199
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
200
+ else:
201
+ def _set_nodelay(sock):
202
+ pass
203
+
204
+
205
+ def _check_ssl_socket(sock):
206
+ if ssl is not None and isinstance(sock, ssl.SSLSocket):
207
+ raise TypeError("Socket cannot be of type SSLSocket")
208
+
209
+
210
+ class _SendfileFallbackProtocol(protocols.Protocol):
211
+ def __init__(self, transp):
212
+ if not isinstance(transp, transports._FlowControlMixin):
213
+ raise TypeError("transport should be _FlowControlMixin instance")
214
+ self._transport = transp
215
+ self._proto = transp.get_protocol()
216
+ self._should_resume_reading = transp.is_reading()
217
+ self._should_resume_writing = transp._protocol_paused
218
+ transp.pause_reading()
219
+ transp.set_protocol(self)
220
+ if self._should_resume_writing:
221
+ self._write_ready_fut = self._transport._loop.create_future()
222
+ else:
223
+ self._write_ready_fut = None
224
+
225
+ async def drain(self):
226
+ if self._transport.is_closing():
227
+ raise ConnectionError("Connection closed by peer")
228
+ fut = self._write_ready_fut
229
+ if fut is None:
230
+ return
231
+ await fut
232
+
233
+ def connection_made(self, transport):
234
+ raise RuntimeError("Invalid state: "
235
+ "connection should have been established already.")
236
+
237
+ def connection_lost(self, exc):
238
+ if self._write_ready_fut is not None:
239
+ # Never happens if peer disconnects after sending the whole content
240
+ # Thus disconnection is always an exception from user perspective
241
+ if exc is None:
242
+ self._write_ready_fut.set_exception(
243
+ ConnectionError("Connection is closed by peer"))
244
+ else:
245
+ self._write_ready_fut.set_exception(exc)
246
+ self._proto.connection_lost(exc)
247
+
248
+ def pause_writing(self):
249
+ if self._write_ready_fut is not None:
250
+ return
251
+ self._write_ready_fut = self._transport._loop.create_future()
252
+
253
+ def resume_writing(self):
254
+ if self._write_ready_fut is None:
255
+ return
256
+ self._write_ready_fut.set_result(False)
257
+ self._write_ready_fut = None
258
+
259
+ def data_received(self, data):
260
+ raise RuntimeError("Invalid state: reading should be paused")
261
+
262
+ def eof_received(self):
263
+ raise RuntimeError("Invalid state: reading should be paused")
264
+
265
+ async def restore(self):
266
+ self._transport.set_protocol(self._proto)
267
+ if self._should_resume_reading:
268
+ self._transport.resume_reading()
269
+ if self._write_ready_fut is not None:
270
+ # Cancel the future.
271
+ # Basically it has no effect because protocol is switched back,
272
+ # no code should wait for it anymore.
273
+ self._write_ready_fut.cancel()
274
+ if self._should_resume_writing:
275
+ self._proto.resume_writing()
276
+
277
+
278
+ class Server(events.AbstractServer):
279
+
280
+ def __init__(self, loop, sockets, protocol_factory, ssl_context, backlog,
281
+ ssl_handshake_timeout):
282
+ self._loop = loop
283
+ self._sockets = sockets
284
+ self._active_count = 0
285
+ self._waiters = []
286
+ self._protocol_factory = protocol_factory
287
+ self._backlog = backlog
288
+ self._ssl_context = ssl_context
289
+ self._ssl_handshake_timeout = ssl_handshake_timeout
290
+ self._serving = False
291
+ self._serving_forever_fut = None
292
+
293
+ def __repr__(self):
294
+ return f'<{self.__class__.__name__} sockets={self.sockets!r}>'
295
+
296
+ def _attach(self):
297
+ assert self._sockets is not None
298
+ self._active_count += 1
299
+
300
+ def _detach(self):
301
+ assert self._active_count > 0
302
+ self._active_count -= 1
303
+ if self._active_count == 0 and self._sockets is None:
304
+ self._wakeup()
305
+
306
+ def _wakeup(self):
307
+ waiters = self._waiters
308
+ self._waiters = None
309
+ for waiter in waiters:
310
+ if not waiter.done():
311
+ waiter.set_result(waiter)
312
+
313
+ def _start_serving(self):
314
+ if self._serving:
315
+ return
316
+ self._serving = True
317
+ for sock in self._sockets:
318
+ sock.listen(self._backlog)
319
+ self._loop._start_serving(
320
+ self._protocol_factory, sock, self._ssl_context,
321
+ self, self._backlog, self._ssl_handshake_timeout)
322
+
323
+ def get_loop(self):
324
+ return self._loop
325
+
326
+ def is_serving(self):
327
+ return self._serving
328
+
329
+ @property
330
+ def sockets(self):
331
+ if self._sockets is None:
332
+ return ()
333
+ return tuple(trsock.TransportSocket(s) for s in self._sockets)
334
+
335
+ def close(self):
336
+ sockets = self._sockets
337
+ if sockets is None:
338
+ return
339
+ self._sockets = None
340
+
341
+ for sock in sockets:
342
+ self._loop._stop_serving(sock)
343
+
344
+ self._serving = False
345
+
346
+ if (self._serving_forever_fut is not None and
347
+ not self._serving_forever_fut.done()):
348
+ self._serving_forever_fut.cancel()
349
+ self._serving_forever_fut = None
350
+
351
+ if self._active_count == 0:
352
+ self._wakeup()
353
+
354
+ async def start_serving(self):
355
+ self._start_serving()
356
+ # Skip one loop iteration so that all 'loop.add_reader'
357
+ # go through.
358
+ await tasks.sleep(0)
359
+
360
+ async def serve_forever(self):
361
+ if self._serving_forever_fut is not None:
362
+ raise RuntimeError(
363
+ f'server {self!r} is already being awaited on serve_forever()')
364
+ if self._sockets is None:
365
+ raise RuntimeError(f'server {self!r} is closed')
366
+
367
+ self._start_serving()
368
+ self._serving_forever_fut = self._loop.create_future()
369
+
370
+ try:
371
+ await self._serving_forever_fut
372
+ except exceptions.CancelledError:
373
+ try:
374
+ self.close()
375
+ await self.wait_closed()
376
+ finally:
377
+ raise
378
+ finally:
379
+ self._serving_forever_fut = None
380
+
381
+ async def wait_closed(self):
382
+ if self._sockets is None or self._waiters is None:
383
+ return
384
+ waiter = self._loop.create_future()
385
+ self._waiters.append(waiter)
386
+ await waiter
387
+
388
+
389
+ class BaseEventLoop(events.AbstractEventLoop):
390
+
391
+ def __init__(self):
392
+ self._timer_cancelled_count = 0
393
+ self._closed = False
394
+ self._stopping = False
395
+ self._ready = collections.deque()
396
+ self._scheduled = []
397
+ self._default_executor = None
398
+ self._internal_fds = 0
399
+ # Identifier of the thread running the event loop, or None if the
400
+ # event loop is not running
401
+ self._thread_id = None
402
+ self._clock_resolution = time.get_clock_info('monotonic').resolution
403
+ self._exception_handler = None
404
+ self.set_debug(coroutines._is_debug_mode())
405
+ # In debug mode, if the execution of a callback or a step of a task
406
+ # exceed this duration in seconds, the slow callback/task is logged.
407
+ self.slow_callback_duration = 0.1
408
+ self._current_handle = None
409
+ self._task_factory = None
410
+ self._coroutine_origin_tracking_enabled = False
411
+ self._coroutine_origin_tracking_saved_depth = None
412
+
413
+ # A weak set of all asynchronous generators that are
414
+ # being iterated by the loop.
415
+ self._asyncgens = weakref.WeakSet()
416
+ # Set to True when `loop.shutdown_asyncgens` is called.
417
+ self._asyncgens_shutdown_called = False
418
+ # Set to True when `loop.shutdown_default_executor` is called.
419
+ self._executor_shutdown_called = False
420
+
421
+ def __repr__(self):
422
+ return (
423
+ f'<{self.__class__.__name__} running={self.is_running()} '
424
+ f'closed={self.is_closed()} debug={self.get_debug()}>'
425
+ )
426
+
427
+ def create_future(self):
428
+ """Create a Future object attached to the loop."""
429
+ return futures.Future(loop=self)
430
+
431
+ def create_task(self, coro, *, name=None):
432
+ """Schedule a coroutine object.
433
+
434
+ Return a task object.
435
+ """
436
+ self._check_closed()
437
+ if self._task_factory is None:
438
+ task = tasks.Task(coro, loop=self, name=name)
439
+ if task._source_traceback:
440
+ del task._source_traceback[-1]
441
+ else:
442
+ task = self._task_factory(self, coro)
443
+ tasks._set_task_name(task, name)
444
+
445
+ return task
446
+
447
+ def set_task_factory(self, factory):
448
+ """Set a task factory that will be used by loop.create_task().
449
+
450
+ If factory is None the default task factory will be set.
451
+
452
+ If factory is a callable, it should have a signature matching
453
+ '(loop, coro)', where 'loop' will be a reference to the active
454
+ event loop, 'coro' will be a coroutine object. The callable
455
+ must return a Future.
456
+ """
457
+ if factory is not None and not callable(factory):
458
+ raise TypeError('task factory must be a callable or None')
459
+ self._task_factory = factory
460
+
461
+ def get_task_factory(self):
462
+ """Return a task factory, or None if the default one is in use."""
463
+ return self._task_factory
464
+
465
+ def _make_socket_transport(self, sock, protocol, waiter=None, *,
466
+ extra=None, server=None):
467
+ """Create socket transport."""
468
+ raise NotImplementedError
469
+
470
+ def _make_ssl_transport(
471
+ self, rawsock, protocol, sslcontext, waiter=None,
472
+ *, server_side=False, server_hostname=None,
473
+ extra=None, server=None,
474
+ ssl_handshake_timeout=None,
475
+ call_connection_made=True):
476
+ """Create SSL transport."""
477
+ raise NotImplementedError
478
+
479
+ def _make_datagram_transport(self, sock, protocol,
480
+ address=None, waiter=None, extra=None):
481
+ """Create datagram transport."""
482
+ raise NotImplementedError
483
+
484
+ def _make_read_pipe_transport(self, pipe, protocol, waiter=None,
485
+ extra=None):
486
+ """Create read pipe transport."""
487
+ raise NotImplementedError
488
+
489
+ def _make_write_pipe_transport(self, pipe, protocol, waiter=None,
490
+ extra=None):
491
+ """Create write pipe transport."""
492
+ raise NotImplementedError
493
+
494
+ async def _make_subprocess_transport(self, protocol, args, shell,
495
+ stdin, stdout, stderr, bufsize,
496
+ extra=None, **kwargs):
497
+ """Create subprocess transport."""
498
+ raise NotImplementedError
499
+
500
+ def _write_to_self(self):
501
+ """Write a byte to self-pipe, to wake up the event loop.
502
+
503
+ This may be called from a different thread.
504
+
505
+ The subclass is responsible for implementing the self-pipe.
506
+ """
507
+ raise NotImplementedError
508
+
509
+ def _process_events(self, event_list):
510
+ """Process selector events."""
511
+ raise NotImplementedError
512
+
513
+ def _check_closed(self):
514
+ if self._closed:
515
+ raise RuntimeError('Event loop is closed')
516
+
517
+ def _check_default_executor(self):
518
+ if self._executor_shutdown_called:
519
+ raise RuntimeError('Executor shutdown has been called')
520
+
521
+ def _asyncgen_finalizer_hook(self, agen):
522
+ self._asyncgens.discard(agen)
523
+ if not self.is_closed():
524
+ self.call_soon_threadsafe(self.create_task, agen.aclose())
525
+
526
+ def _asyncgen_firstiter_hook(self, agen):
527
+ if self._asyncgens_shutdown_called:
528
+ warnings.warn(
529
+ f"asynchronous generator {agen!r} was scheduled after "
530
+ f"loop.shutdown_asyncgens() call",
531
+ ResourceWarning, source=self)
532
+
533
+ self._asyncgens.add(agen)
534
+
535
+ async def shutdown_asyncgens(self):
536
+ """Shutdown all active asynchronous generators."""
537
+ self._asyncgens_shutdown_called = True
538
+
539
+ if not len(self._asyncgens):
540
+ # If Python version is <3.6 or we don't have any asynchronous
541
+ # generators alive.
542
+ return
543
+
544
+ closing_agens = list(self._asyncgens)
545
+ self._asyncgens.clear()
546
+
547
+ results = await tasks.gather(
548
+ *[ag.aclose() for ag in closing_agens],
549
+ return_exceptions=True)
550
+
551
+ for result, agen in zip(results, closing_agens):
552
+ if isinstance(result, Exception):
553
+ self.call_exception_handler({
554
+ 'message': f'an error occurred during closing of '
555
+ f'asynchronous generator {agen!r}',
556
+ 'exception': result,
557
+ 'asyncgen': agen
558
+ })
559
+
560
+ async def shutdown_default_executor(self):
561
+ """Schedule the shutdown of the default executor."""
562
+ self._executor_shutdown_called = True
563
+ if self._default_executor is None:
564
+ return
565
+ future = self.create_future()
566
+ thread = threading.Thread(target=self._do_shutdown, args=(future,))
567
+ thread.start()
568
+ try:
569
+ await future
570
+ finally:
571
+ thread.join()
572
+
573
+ def _do_shutdown(self, future):
574
+ try:
575
+ self._default_executor.shutdown(wait=True)
576
+ if not self.is_closed():
577
+ self.call_soon_threadsafe(future.set_result, None)
578
+ except Exception as ex:
579
+ if not self.is_closed():
580
+ self.call_soon_threadsafe(future.set_exception, ex)
581
+
582
+ def _check_running(self):
583
+ if self.is_running():
584
+ raise RuntimeError('This event loop is already running')
585
+ if events._get_running_loop() is not None:
586
+ raise RuntimeError(
587
+ 'Cannot run the event loop while another loop is running')
588
+
589
+ def run_forever(self):
590
+ """Run until stop() is called."""
591
+ self._check_closed()
592
+ self._check_running()
593
+ self._set_coroutine_origin_tracking(self._debug)
594
+
595
+ old_agen_hooks = sys.get_asyncgen_hooks()
596
+ try:
597
+ self._thread_id = threading.get_ident()
598
+ sys.set_asyncgen_hooks(firstiter=self._asyncgen_firstiter_hook,
599
+ finalizer=self._asyncgen_finalizer_hook)
600
+
601
+ events._set_running_loop(self)
602
+ while True:
603
+ self._run_once()
604
+ if self._stopping:
605
+ break
606
+ finally:
607
+ self._stopping = False
608
+ self._thread_id = None
609
+ events._set_running_loop(None)
610
+ self._set_coroutine_origin_tracking(False)
611
+ sys.set_asyncgen_hooks(*old_agen_hooks)
612
+
613
+ def run_until_complete(self, future):
614
+ """Run until the Future is done.
615
+
616
+ If the argument is a coroutine, it is wrapped in a Task.
617
+
618
+ WARNING: It would be disastrous to call run_until_complete()
619
+ with the same coroutine twice -- it would wrap it in two
620
+ different Tasks and that can't be good.
621
+
622
+ Return the Future's result, or raise its exception.
623
+ """
624
+ self._check_closed()
625
+ self._check_running()
626
+
627
+ new_task = not futures.isfuture(future)
628
+ future = tasks.ensure_future(future, loop=self)
629
+ if new_task:
630
+ # An exception is raised if the future didn't complete, so there
631
+ # is no need to log the "destroy pending task" message
632
+ future._log_destroy_pending = False
633
+
634
+ future.add_done_callback(_run_until_complete_cb)
635
+ try:
636
+ self.run_forever()
637
+ except:
638
+ if new_task and future.done() and not future.cancelled():
639
+ # The coroutine raised a BaseException. Consume the exception
640
+ # to not log a warning, the caller doesn't have access to the
641
+ # local task.
642
+ future.exception()
643
+ raise
644
+ finally:
645
+ future.remove_done_callback(_run_until_complete_cb)
646
+ if not future.done():
647
+ raise RuntimeError('Event loop stopped before Future completed.')
648
+
649
+ return future.result()
650
+
651
+ def stop(self):
652
+ """Stop running the event loop.
653
+
654
+ Every callback already scheduled will still run. This simply informs
655
+ run_forever to stop looping after a complete iteration.
656
+ """
657
+ self._stopping = True
658
+
659
+ def close(self):
660
+ """Close the event loop.
661
+
662
+ This clears the queues and shuts down the executor,
663
+ but does not wait for the executor to finish.
664
+
665
+ The event loop must not be running.
666
+ """
667
+ if self.is_running():
668
+ raise RuntimeError("Cannot close a running event loop")
669
+ if self._closed:
670
+ return
671
+ if self._debug:
672
+ logger.debug("Close %r", self)
673
+ self._closed = True
674
+ self._ready.clear()
675
+ self._scheduled.clear()
676
+ self._executor_shutdown_called = True
677
+ executor = self._default_executor
678
+ if executor is not None:
679
+ self._default_executor = None
680
+ executor.shutdown(wait=False)
681
+
682
+ def is_closed(self):
683
+ """Returns True if the event loop was closed."""
684
+ return self._closed
685
+
686
+ def __del__(self, _warn=warnings.warn):
687
+ if not self.is_closed():
688
+ _warn(f"unclosed event loop {self!r}", ResourceWarning, source=self)
689
+ if not self.is_running():
690
+ self.close()
691
+
692
+ def is_running(self):
693
+ """Returns True if the event loop is running."""
694
+ return (self._thread_id is not None)
695
+
696
+ def time(self):
697
+ """Return the time according to the event loop's clock.
698
+
699
+ This is a float expressed in seconds since an epoch, but the
700
+ epoch, precision, accuracy and drift are unspecified and may
701
+ differ per event loop.
702
+ """
703
+ return time.monotonic()
704
+
705
+ def call_later(self, delay, callback, *args, context=None):
706
+ """Arrange for a callback to be called at a given time.
707
+
708
+ Return a Handle: an opaque object with a cancel() method that
709
+ can be used to cancel the call.
710
+
711
+ The delay can be an int or float, expressed in seconds. It is
712
+ always relative to the current time.
713
+
714
+ Each callback will be called exactly once. If two callbacks
715
+ are scheduled for exactly the same time, it undefined which
716
+ will be called first.
717
+
718
+ Any positional arguments after the callback will be passed to
719
+ the callback when it is called.
720
+ """
721
+ timer = self.call_at(self.time() + delay, callback, *args,
722
+ context=context)
723
+ if timer._source_traceback:
724
+ del timer._source_traceback[-1]
725
+ return timer
726
+
727
+ def call_at(self, when, callback, *args, context=None):
728
+ """Like call_later(), but uses an absolute time.
729
+
730
+ Absolute time corresponds to the event loop's time() method.
731
+ """
732
+ self._check_closed()
733
+ if self._debug:
734
+ self._check_thread()
735
+ self._check_callback(callback, 'call_at')
736
+ timer = events.TimerHandle(when, callback, args, self, context)
737
+ if timer._source_traceback:
738
+ del timer._source_traceback[-1]
739
+ heapq.heappush(self._scheduled, timer)
740
+ timer._scheduled = True
741
+ return timer
742
+
743
+ def call_soon(self, callback, *args, context=None):
744
+ """Arrange for a callback to be called as soon as possible.
745
+
746
+ This operates as a FIFO queue: callbacks are called in the
747
+ order in which they are registered. Each callback will be
748
+ called exactly once.
749
+
750
+ Any positional arguments after the callback will be passed to
751
+ the callback when it is called.
752
+ """
753
+ self._check_closed()
754
+ if self._debug:
755
+ self._check_thread()
756
+ self._check_callback(callback, 'call_soon')
757
+ handle = self._call_soon(callback, args, context)
758
+ if handle._source_traceback:
759
+ del handle._source_traceback[-1]
760
+ return handle
761
+
762
+ def _check_callback(self, callback, method):
763
+ if (coroutines.iscoroutine(callback) or
764
+ coroutines.iscoroutinefunction(callback)):
765
+ raise TypeError(
766
+ f"coroutines cannot be used with {method}()")
767
+ if not callable(callback):
768
+ raise TypeError(
769
+ f'a callable object was expected by {method}(), '
770
+ f'got {callback!r}')
771
+
772
+ def _call_soon(self, callback, args, context):
773
+ handle = events.Handle(callback, args, self, context)
774
+ if handle._source_traceback:
775
+ del handle._source_traceback[-1]
776
+ self._ready.append(handle)
777
+ return handle
778
+
779
+ def _check_thread(self):
780
+ """Check that the current thread is the thread running the event loop.
781
+
782
+ Non-thread-safe methods of this class make this assumption and will
783
+ likely behave incorrectly when the assumption is violated.
784
+
785
+ Should only be called when (self._debug == True). The caller is
786
+ responsible for checking this condition for performance reasons.
787
+ """
788
+ if self._thread_id is None:
789
+ return
790
+ thread_id = threading.get_ident()
791
+ if thread_id != self._thread_id:
792
+ raise RuntimeError(
793
+ "Non-thread-safe operation invoked on an event loop other "
794
+ "than the current one")
795
+
796
+ def call_soon_threadsafe(self, callback, *args, context=None):
797
+ """Like call_soon(), but thread-safe."""
798
+ self._check_closed()
799
+ if self._debug:
800
+ self._check_callback(callback, 'call_soon_threadsafe')
801
+ handle = self._call_soon(callback, args, context)
802
+ if handle._source_traceback:
803
+ del handle._source_traceback[-1]
804
+ self._write_to_self()
805
+ return handle
806
+
807
+ def run_in_executor(self, executor, func, *args):
808
+ self._check_closed()
809
+ if self._debug:
810
+ self._check_callback(func, 'run_in_executor')
811
+ if executor is None:
812
+ executor = self._default_executor
813
+ # Only check when the default executor is being used
814
+ self._check_default_executor()
815
+ if executor is None:
816
+ executor = concurrent.futures.ThreadPoolExecutor(
817
+ thread_name_prefix='asyncio'
818
+ )
819
+ self._default_executor = executor
820
+ return futures.wrap_future(
821
+ executor.submit(func, *args), loop=self)
822
+
823
+ def set_default_executor(self, executor):
824
+ if not isinstance(executor, concurrent.futures.ThreadPoolExecutor):
825
+ warnings.warn(
826
+ 'Using the default executor that is not an instance of '
827
+ 'ThreadPoolExecutor is deprecated and will be prohibited '
828
+ 'in Python 3.9',
829
+ DeprecationWarning, 2)
830
+ self._default_executor = executor
831
+
832
+ def _getaddrinfo_debug(self, host, port, family, type, proto, flags):
833
+ msg = [f"{host}:{port!r}"]
834
+ if family:
835
+ msg.append(f'family={family!r}')
836
+ if type:
837
+ msg.append(f'type={type!r}')
838
+ if proto:
839
+ msg.append(f'proto={proto!r}')
840
+ if flags:
841
+ msg.append(f'flags={flags!r}')
842
+ msg = ', '.join(msg)
843
+ logger.debug('Get address info %s', msg)
844
+
845
+ t0 = self.time()
846
+ addrinfo = socket.getaddrinfo(host, port, family, type, proto, flags)
847
+ dt = self.time() - t0
848
+
849
+ msg = f'Getting address info {msg} took {dt * 1e3:.3f}ms: {addrinfo!r}'
850
+ if dt >= self.slow_callback_duration:
851
+ logger.info(msg)
852
+ else:
853
+ logger.debug(msg)
854
+ return addrinfo
855
+
856
+ async def getaddrinfo(self, host, port, *,
857
+ family=0, type=0, proto=0, flags=0):
858
+ if self._debug:
859
+ getaddr_func = self._getaddrinfo_debug
860
+ else:
861
+ getaddr_func = socket.getaddrinfo
862
+
863
+ return await self.run_in_executor(
864
+ None, getaddr_func, host, port, family, type, proto, flags)
865
+
866
+ async def getnameinfo(self, sockaddr, flags=0):
867
+ return await self.run_in_executor(
868
+ None, socket.getnameinfo, sockaddr, flags)
869
+
870
+ async def sock_sendfile(self, sock, file, offset=0, count=None,
871
+ *, fallback=True):
872
+ if self._debug and sock.gettimeout() != 0:
873
+ raise ValueError("the socket must be non-blocking")
874
+ _check_ssl_socket(sock)
875
+ self._check_sendfile_params(sock, file, offset, count)
876
+ try:
877
+ return await self._sock_sendfile_native(sock, file,
878
+ offset, count)
879
+ except exceptions.SendfileNotAvailableError as exc:
880
+ if not fallback:
881
+ raise
882
+ return await self._sock_sendfile_fallback(sock, file,
883
+ offset, count)
884
+
885
+ async def _sock_sendfile_native(self, sock, file, offset, count):
886
+ # NB: sendfile syscall is not supported for SSL sockets and
887
+ # non-mmap files even if sendfile is supported by OS
888
+ raise exceptions.SendfileNotAvailableError(
889
+ f"syscall sendfile is not available for socket {sock!r} "
890
+ f"and file {file!r} combination")
891
+
892
+ async def _sock_sendfile_fallback(self, sock, file, offset, count):
893
+ if offset:
894
+ file.seek(offset)
895
+ blocksize = (
896
+ min(count, constants.SENDFILE_FALLBACK_READBUFFER_SIZE)
897
+ if count else constants.SENDFILE_FALLBACK_READBUFFER_SIZE
898
+ )
899
+ buf = bytearray(blocksize)
900
+ total_sent = 0
901
+ try:
902
+ while True:
903
+ if count:
904
+ blocksize = min(count - total_sent, blocksize)
905
+ if blocksize <= 0:
906
+ break
907
+ view = memoryview(buf)[:blocksize]
908
+ read = await self.run_in_executor(None, file.readinto, view)
909
+ if not read:
910
+ break # EOF
911
+ await self.sock_sendall(sock, view[:read])
912
+ total_sent += read
913
+ return total_sent
914
+ finally:
915
+ if total_sent > 0 and hasattr(file, 'seek'):
916
+ file.seek(offset + total_sent)
917
+
918
+ def _check_sendfile_params(self, sock, file, offset, count):
919
+ if 'b' not in getattr(file, 'mode', 'b'):
920
+ raise ValueError("file should be opened in binary mode")
921
+ if not sock.type == socket.SOCK_STREAM:
922
+ raise ValueError("only SOCK_STREAM type sockets are supported")
923
+ if count is not None:
924
+ if not isinstance(count, int):
925
+ raise TypeError(
926
+ "count must be a positive integer (got {!r})".format(count))
927
+ if count <= 0:
928
+ raise ValueError(
929
+ "count must be a positive integer (got {!r})".format(count))
930
+ if not isinstance(offset, int):
931
+ raise TypeError(
932
+ "offset must be a non-negative integer (got {!r})".format(
933
+ offset))
934
+ if offset < 0:
935
+ raise ValueError(
936
+ "offset must be a non-negative integer (got {!r})".format(
937
+ offset))
938
+
939
+ async def _connect_sock(self, exceptions, addr_info, local_addr_infos=None):
940
+ """Create, bind and connect one socket."""
941
+ my_exceptions = []
942
+ exceptions.append(my_exceptions)
943
+ family, type_, proto, _, address = addr_info
944
+ sock = None
945
+ try:
946
+ sock = socket.socket(family=family, type=type_, proto=proto)
947
+ sock.setblocking(False)
948
+ if local_addr_infos is not None:
949
+ for lfamily, _, _, _, laddr in local_addr_infos:
950
+ # skip local addresses of different family
951
+ if lfamily != family:
952
+ continue
953
+ try:
954
+ sock.bind(laddr)
955
+ break
956
+ except OSError as exc:
957
+ msg = (
958
+ f'error while attempting to bind on '
959
+ f'address {laddr!r}: '
960
+ f'{exc.strerror.lower()}'
961
+ )
962
+ exc = OSError(exc.errno, msg)
963
+ my_exceptions.append(exc)
964
+ else: # all bind attempts failed
965
+ if my_exceptions:
966
+ raise my_exceptions.pop()
967
+ else:
968
+ raise OSError(f"no matching local address with {family=} found")
969
+ await self.sock_connect(sock, address)
970
+ return sock
971
+ except OSError as exc:
972
+ my_exceptions.append(exc)
973
+ if sock is not None:
974
+ sock.close()
975
+ raise
976
+ except:
977
+ if sock is not None:
978
+ sock.close()
979
+ raise
980
+ finally:
981
+ exceptions = my_exceptions = None
982
+
983
+ async def create_connection(
984
+ self, protocol_factory, host=None, port=None,
985
+ *, ssl=None, family=0,
986
+ proto=0, flags=0, sock=None,
987
+ local_addr=None, server_hostname=None,
988
+ ssl_handshake_timeout=None,
989
+ happy_eyeballs_delay=None, interleave=None):
990
+ """Connect to a TCP server.
991
+
992
+ Create a streaming transport connection to a given internet host and
993
+ port: socket family AF_INET or socket.AF_INET6 depending on host (or
994
+ family if specified), socket type SOCK_STREAM. protocol_factory must be
995
+ a callable returning a protocol instance.
996
+
997
+ This method is a coroutine which will try to establish the connection
998
+ in the background. When successful, the coroutine returns a
999
+ (transport, protocol) pair.
1000
+ """
1001
+ if server_hostname is not None and not ssl:
1002
+ raise ValueError('server_hostname is only meaningful with ssl')
1003
+
1004
+ if server_hostname is None and ssl:
1005
+ # Use host as default for server_hostname. It is an error
1006
+ # if host is empty or not set, e.g. when an
1007
+ # already-connected socket was passed or when only a port
1008
+ # is given. To avoid this error, you can pass
1009
+ # server_hostname='' -- this will bypass the hostname
1010
+ # check. (This also means that if host is a numeric
1011
+ # IP/IPv6 address, we will attempt to verify that exact
1012
+ # address; this will probably fail, but it is possible to
1013
+ # create a certificate for a specific IP address, so we
1014
+ # don't judge it here.)
1015
+ if not host:
1016
+ raise ValueError('You must set server_hostname '
1017
+ 'when using ssl without a host')
1018
+ server_hostname = host
1019
+
1020
+ if ssl_handshake_timeout is not None and not ssl:
1021
+ raise ValueError(
1022
+ 'ssl_handshake_timeout is only meaningful with ssl')
1023
+
1024
+ if sock is not None:
1025
+ _check_ssl_socket(sock)
1026
+
1027
+ if happy_eyeballs_delay is not None and interleave is None:
1028
+ # If using happy eyeballs, default to interleave addresses by family
1029
+ interleave = 1
1030
+
1031
+ if host is not None or port is not None:
1032
+ if sock is not None:
1033
+ raise ValueError(
1034
+ 'host/port and sock can not be specified at the same time')
1035
+
1036
+ infos = await self._ensure_resolved(
1037
+ (host, port), family=family,
1038
+ type=socket.SOCK_STREAM, proto=proto, flags=flags, loop=self)
1039
+ if not infos:
1040
+ raise OSError('getaddrinfo() returned empty list')
1041
+
1042
+ if local_addr is not None:
1043
+ laddr_infos = await self._ensure_resolved(
1044
+ local_addr, family=family,
1045
+ type=socket.SOCK_STREAM, proto=proto,
1046
+ flags=flags, loop=self)
1047
+ if not laddr_infos:
1048
+ raise OSError('getaddrinfo() returned empty list')
1049
+ else:
1050
+ laddr_infos = None
1051
+
1052
+ if interleave:
1053
+ infos = _interleave_addrinfos(infos, interleave)
1054
+
1055
+ exceptions = []
1056
+ if happy_eyeballs_delay is None:
1057
+ # not using happy eyeballs
1058
+ for addrinfo in infos:
1059
+ try:
1060
+ sock = await self._connect_sock(
1061
+ exceptions, addrinfo, laddr_infos)
1062
+ break
1063
+ except OSError:
1064
+ continue
1065
+ else: # using happy eyeballs
1066
+ sock, _, _ = await staggered.staggered_race(
1067
+ (functools.partial(self._connect_sock,
1068
+ exceptions, addrinfo, laddr_infos)
1069
+ for addrinfo in infos),
1070
+ happy_eyeballs_delay, loop=self)
1071
+
1072
+ if sock is None:
1073
+ exceptions = [exc for sub in exceptions for exc in sub]
1074
+ try:
1075
+ if len(exceptions) == 1:
1076
+ raise exceptions[0]
1077
+ else:
1078
+ # If they all have the same str(), raise one.
1079
+ model = str(exceptions[0])
1080
+ if all(str(exc) == model for exc in exceptions):
1081
+ raise exceptions[0]
1082
+ # Raise a combined exception so the user can see all
1083
+ # the various error messages.
1084
+ raise OSError('Multiple exceptions: {}'.format(
1085
+ ', '.join(str(exc) for exc in exceptions)))
1086
+ finally:
1087
+ exceptions = None
1088
+
1089
+ else:
1090
+ if sock is None:
1091
+ raise ValueError(
1092
+ 'host and port was not specified and no sock specified')
1093
+ if sock.type != socket.SOCK_STREAM:
1094
+ # We allow AF_INET, AF_INET6, AF_UNIX as long as they
1095
+ # are SOCK_STREAM.
1096
+ # We support passing AF_UNIX sockets even though we have
1097
+ # a dedicated API for that: create_unix_connection.
1098
+ # Disallowing AF_UNIX in this method, breaks backwards
1099
+ # compatibility.
1100
+ raise ValueError(
1101
+ f'A Stream Socket was expected, got {sock!r}')
1102
+
1103
+ transport, protocol = await self._create_connection_transport(
1104
+ sock, protocol_factory, ssl, server_hostname,
1105
+ ssl_handshake_timeout=ssl_handshake_timeout)
1106
+ if self._debug:
1107
+ # Get the socket from the transport because SSL transport closes
1108
+ # the old socket and creates a new SSL socket
1109
+ sock = transport.get_extra_info('socket')
1110
+ logger.debug("%r connected to %s:%r: (%r, %r)",
1111
+ sock, host, port, transport, protocol)
1112
+ return transport, protocol
1113
+
1114
+ async def _create_connection_transport(
1115
+ self, sock, protocol_factory, ssl,
1116
+ server_hostname, server_side=False,
1117
+ ssl_handshake_timeout=None):
1118
+
1119
+ sock.setblocking(False)
1120
+
1121
+ protocol = protocol_factory()
1122
+ waiter = self.create_future()
1123
+ if ssl:
1124
+ sslcontext = None if isinstance(ssl, bool) else ssl
1125
+ transport = self._make_ssl_transport(
1126
+ sock, protocol, sslcontext, waiter,
1127
+ server_side=server_side, server_hostname=server_hostname,
1128
+ ssl_handshake_timeout=ssl_handshake_timeout)
1129
+ else:
1130
+ transport = self._make_socket_transport(sock, protocol, waiter)
1131
+
1132
+ try:
1133
+ await waiter
1134
+ except:
1135
+ transport.close()
1136
+ raise
1137
+
1138
+ return transport, protocol
1139
+
1140
+ async def sendfile(self, transport, file, offset=0, count=None,
1141
+ *, fallback=True):
1142
+ """Send a file to transport.
1143
+
1144
+ Return the total number of bytes which were sent.
1145
+
1146
+ The method uses high-performance os.sendfile if available.
1147
+
1148
+ file must be a regular file object opened in binary mode.
1149
+
1150
+ offset tells from where to start reading the file. If specified,
1151
+ count is the total number of bytes to transmit as opposed to
1152
+ sending the file until EOF is reached. File position is updated on
1153
+ return or also in case of error in which case file.tell()
1154
+ can be used to figure out the number of bytes
1155
+ which were sent.
1156
+
1157
+ fallback set to True makes asyncio to manually read and send
1158
+ the file when the platform does not support the sendfile syscall
1159
+ (e.g. Windows or SSL socket on Unix).
1160
+
1161
+ Raise SendfileNotAvailableError if the system does not support
1162
+ sendfile syscall and fallback is False.
1163
+ """
1164
+ if transport.is_closing():
1165
+ raise RuntimeError("Transport is closing")
1166
+ mode = getattr(transport, '_sendfile_compatible',
1167
+ constants._SendfileMode.UNSUPPORTED)
1168
+ if mode is constants._SendfileMode.UNSUPPORTED:
1169
+ raise RuntimeError(
1170
+ f"sendfile is not supported for transport {transport!r}")
1171
+ if mode is constants._SendfileMode.TRY_NATIVE:
1172
+ try:
1173
+ return await self._sendfile_native(transport, file,
1174
+ offset, count)
1175
+ except exceptions.SendfileNotAvailableError as exc:
1176
+ if not fallback:
1177
+ raise
1178
+
1179
+ if not fallback:
1180
+ raise RuntimeError(
1181
+ f"fallback is disabled and native sendfile is not "
1182
+ f"supported for transport {transport!r}")
1183
+
1184
+ return await self._sendfile_fallback(transport, file,
1185
+ offset, count)
1186
+
1187
+ async def _sendfile_native(self, transp, file, offset, count):
1188
+ raise exceptions.SendfileNotAvailableError(
1189
+ "sendfile syscall is not supported")
1190
+
1191
+ async def _sendfile_fallback(self, transp, file, offset, count):
1192
+ if offset:
1193
+ file.seek(offset)
1194
+ blocksize = min(count, 16384) if count else 16384
1195
+ buf = bytearray(blocksize)
1196
+ total_sent = 0
1197
+ proto = _SendfileFallbackProtocol(transp)
1198
+ try:
1199
+ while True:
1200
+ if count:
1201
+ blocksize = min(count - total_sent, blocksize)
1202
+ if blocksize <= 0:
1203
+ return total_sent
1204
+ view = memoryview(buf)[:blocksize]
1205
+ read = await self.run_in_executor(None, file.readinto, view)
1206
+ if not read:
1207
+ return total_sent # EOF
1208
+ await proto.drain()
1209
+ transp.write(view[:read])
1210
+ total_sent += read
1211
+ finally:
1212
+ if total_sent > 0 and hasattr(file, 'seek'):
1213
+ file.seek(offset + total_sent)
1214
+ await proto.restore()
1215
+
1216
+ async def start_tls(self, transport, protocol, sslcontext, *,
1217
+ server_side=False,
1218
+ server_hostname=None,
1219
+ ssl_handshake_timeout=None):
1220
+ """Upgrade transport to TLS.
1221
+
1222
+ Return a new transport that *protocol* should start using
1223
+ immediately.
1224
+ """
1225
+ if ssl is None:
1226
+ raise RuntimeError('Python ssl module is not available')
1227
+
1228
+ if not isinstance(sslcontext, ssl.SSLContext):
1229
+ raise TypeError(
1230
+ f'sslcontext is expected to be an instance of ssl.SSLContext, '
1231
+ f'got {sslcontext!r}')
1232
+
1233
+ if not getattr(transport, '_start_tls_compatible', False):
1234
+ raise TypeError(
1235
+ f'transport {transport!r} is not supported by start_tls()')
1236
+
1237
+ waiter = self.create_future()
1238
+ ssl_protocol = sslproto.SSLProtocol(
1239
+ self, protocol, sslcontext, waiter,
1240
+ server_side, server_hostname,
1241
+ ssl_handshake_timeout=ssl_handshake_timeout,
1242
+ call_connection_made=False)
1243
+
1244
+ # Pause early so that "ssl_protocol.data_received()" doesn't
1245
+ # have a chance to get called before "ssl_protocol.connection_made()".
1246
+ transport.pause_reading()
1247
+
1248
+ transport.set_protocol(ssl_protocol)
1249
+ conmade_cb = self.call_soon(ssl_protocol.connection_made, transport)
1250
+ resume_cb = self.call_soon(transport.resume_reading)
1251
+
1252
+ try:
1253
+ await waiter
1254
+ except BaseException:
1255
+ transport.close()
1256
+ conmade_cb.cancel()
1257
+ resume_cb.cancel()
1258
+ raise
1259
+
1260
+ return ssl_protocol._app_transport
1261
+
1262
+ async def create_datagram_endpoint(self, protocol_factory,
1263
+ local_addr=None, remote_addr=None, *,
1264
+ family=0, proto=0, flags=0,
1265
+ reuse_address=_unset, reuse_port=None,
1266
+ allow_broadcast=None, sock=None):
1267
+ """Create datagram connection."""
1268
+ if sock is not None:
1269
+ if sock.type != socket.SOCK_DGRAM:
1270
+ raise ValueError(
1271
+ f'A UDP Socket was expected, got {sock!r}')
1272
+ if (local_addr or remote_addr or
1273
+ family or proto or flags or
1274
+ reuse_port or allow_broadcast):
1275
+ # show the problematic kwargs in exception msg
1276
+ opts = dict(local_addr=local_addr, remote_addr=remote_addr,
1277
+ family=family, proto=proto, flags=flags,
1278
+ reuse_address=reuse_address, reuse_port=reuse_port,
1279
+ allow_broadcast=allow_broadcast)
1280
+ problems = ', '.join(f'{k}={v}' for k, v in opts.items() if v)
1281
+ raise ValueError(
1282
+ f'socket modifier keyword arguments can not be used '
1283
+ f'when sock is specified. ({problems})')
1284
+ sock.setblocking(False)
1285
+ r_addr = None
1286
+ else:
1287
+ if not (local_addr or remote_addr):
1288
+ if family == 0:
1289
+ raise ValueError('unexpected address family')
1290
+ addr_pairs_info = (((family, proto), (None, None)),)
1291
+ elif hasattr(socket, 'AF_UNIX') and family == socket.AF_UNIX:
1292
+ for addr in (local_addr, remote_addr):
1293
+ if addr is not None and not isinstance(addr, str):
1294
+ raise TypeError('string is expected')
1295
+
1296
+ if local_addr and local_addr[0] not in (0, '\x00'):
1297
+ try:
1298
+ if stat.S_ISSOCK(os.stat(local_addr).st_mode):
1299
+ os.remove(local_addr)
1300
+ except FileNotFoundError:
1301
+ pass
1302
+ except OSError as err:
1303
+ # Directory may have permissions only to create socket.
1304
+ logger.error('Unable to check or remove stale UNIX '
1305
+ 'socket %r: %r',
1306
+ local_addr, err)
1307
+
1308
+ addr_pairs_info = (((family, proto),
1309
+ (local_addr, remote_addr)), )
1310
+ else:
1311
+ # join address by (family, protocol)
1312
+ addr_infos = {} # Using order preserving dict
1313
+ for idx, addr in ((0, local_addr), (1, remote_addr)):
1314
+ if addr is not None:
1315
+ assert isinstance(addr, tuple) and len(addr) == 2, (
1316
+ '2-tuple is expected')
1317
+
1318
+ infos = await self._ensure_resolved(
1319
+ addr, family=family, type=socket.SOCK_DGRAM,
1320
+ proto=proto, flags=flags, loop=self)
1321
+ if not infos:
1322
+ raise OSError('getaddrinfo() returned empty list')
1323
+
1324
+ for fam, _, pro, _, address in infos:
1325
+ key = (fam, pro)
1326
+ if key not in addr_infos:
1327
+ addr_infos[key] = [None, None]
1328
+ addr_infos[key][idx] = address
1329
+
1330
+ # each addr has to have info for each (family, proto) pair
1331
+ addr_pairs_info = [
1332
+ (key, addr_pair) for key, addr_pair in addr_infos.items()
1333
+ if not ((local_addr and addr_pair[0] is None) or
1334
+ (remote_addr and addr_pair[1] is None))]
1335
+
1336
+ if not addr_pairs_info:
1337
+ raise ValueError('can not get address information')
1338
+
1339
+ exceptions = []
1340
+
1341
+ # bpo-37228
1342
+ if reuse_address is not _unset:
1343
+ if reuse_address:
1344
+ raise ValueError("Passing `reuse_address=True` is no "
1345
+ "longer supported, as the usage of "
1346
+ "SO_REUSEPORT in UDP poses a significant "
1347
+ "security concern.")
1348
+ else:
1349
+ warnings.warn("The *reuse_address* parameter has been "
1350
+ "deprecated as of 3.5.10 and is scheduled "
1351
+ "for removal in 3.11.", DeprecationWarning,
1352
+ stacklevel=2)
1353
+
1354
+ for ((family, proto),
1355
+ (local_address, remote_address)) in addr_pairs_info:
1356
+ sock = None
1357
+ r_addr = None
1358
+ try:
1359
+ sock = socket.socket(
1360
+ family=family, type=socket.SOCK_DGRAM, proto=proto)
1361
+ if reuse_port:
1362
+ _set_reuseport(sock)
1363
+ if allow_broadcast:
1364
+ sock.setsockopt(
1365
+ socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
1366
+ sock.setblocking(False)
1367
+
1368
+ if local_addr:
1369
+ sock.bind(local_address)
1370
+ if remote_addr:
1371
+ if not allow_broadcast:
1372
+ await self.sock_connect(sock, remote_address)
1373
+ r_addr = remote_address
1374
+ except OSError as exc:
1375
+ if sock is not None:
1376
+ sock.close()
1377
+ exceptions.append(exc)
1378
+ except:
1379
+ if sock is not None:
1380
+ sock.close()
1381
+ raise
1382
+ else:
1383
+ break
1384
+ else:
1385
+ raise exceptions[0]
1386
+
1387
+ protocol = protocol_factory()
1388
+ waiter = self.create_future()
1389
+ transport = self._make_datagram_transport(
1390
+ sock, protocol, r_addr, waiter)
1391
+ if self._debug:
1392
+ if local_addr:
1393
+ logger.info("Datagram endpoint local_addr=%r remote_addr=%r "
1394
+ "created: (%r, %r)",
1395
+ local_addr, remote_addr, transport, protocol)
1396
+ else:
1397
+ logger.debug("Datagram endpoint remote_addr=%r created: "
1398
+ "(%r, %r)",
1399
+ remote_addr, transport, protocol)
1400
+
1401
+ try:
1402
+ await waiter
1403
+ except:
1404
+ transport.close()
1405
+ raise
1406
+
1407
+ return transport, protocol
1408
+
1409
+ async def _ensure_resolved(self, address, *,
1410
+ family=0, type=socket.SOCK_STREAM,
1411
+ proto=0, flags=0, loop):
1412
+ host, port = address[:2]
1413
+ info = _ipaddr_info(host, port, family, type, proto, *address[2:])
1414
+ if info is not None:
1415
+ # "host" is already a resolved IP.
1416
+ return [info]
1417
+ else:
1418
+ return await loop.getaddrinfo(host, port, family=family, type=type,
1419
+ proto=proto, flags=flags)
1420
+
1421
+ async def _create_server_getaddrinfo(self, host, port, family, flags):
1422
+ infos = await self._ensure_resolved((host, port), family=family,
1423
+ type=socket.SOCK_STREAM,
1424
+ flags=flags, loop=self)
1425
+ if not infos:
1426
+ raise OSError(f'getaddrinfo({host!r}) returned empty list')
1427
+ return infos
1428
+
1429
+ async def create_server(
1430
+ self, protocol_factory, host=None, port=None,
1431
+ *,
1432
+ family=socket.AF_UNSPEC,
1433
+ flags=socket.AI_PASSIVE,
1434
+ sock=None,
1435
+ backlog=100,
1436
+ ssl=None,
1437
+ reuse_address=None,
1438
+ reuse_port=None,
1439
+ ssl_handshake_timeout=None,
1440
+ start_serving=True):
1441
+ """Create a TCP server.
1442
+
1443
+ The host parameter can be a string, in that case the TCP server is
1444
+ bound to host and port.
1445
+
1446
+ The host parameter can also be a sequence of strings and in that case
1447
+ the TCP server is bound to all hosts of the sequence. If a host
1448
+ appears multiple times (possibly indirectly e.g. when hostnames
1449
+ resolve to the same IP address), the server is only bound once to that
1450
+ host.
1451
+
1452
+ Return a Server object which can be used to stop the service.
1453
+
1454
+ This method is a coroutine.
1455
+ """
1456
+ if isinstance(ssl, bool):
1457
+ raise TypeError('ssl argument must be an SSLContext or None')
1458
+
1459
+ if ssl_handshake_timeout is not None and ssl is None:
1460
+ raise ValueError(
1461
+ 'ssl_handshake_timeout is only meaningful with ssl')
1462
+
1463
+ if sock is not None:
1464
+ _check_ssl_socket(sock)
1465
+
1466
+ if host is not None or port is not None:
1467
+ if sock is not None:
1468
+ raise ValueError(
1469
+ 'host/port and sock can not be specified at the same time')
1470
+
1471
+ if reuse_address is None:
1472
+ reuse_address = os.name == 'posix' and sys.platform != 'cygwin'
1473
+ sockets = []
1474
+ if host == '':
1475
+ hosts = [None]
1476
+ elif (isinstance(host, str) or
1477
+ not isinstance(host, collections.abc.Iterable)):
1478
+ hosts = [host]
1479
+ else:
1480
+ hosts = host
1481
+
1482
+ fs = [self._create_server_getaddrinfo(host, port, family=family,
1483
+ flags=flags)
1484
+ for host in hosts]
1485
+ infos = await tasks.gather(*fs)
1486
+ infos = set(itertools.chain.from_iterable(infos))
1487
+
1488
+ completed = False
1489
+ try:
1490
+ for res in infos:
1491
+ af, socktype, proto, canonname, sa = res
1492
+ try:
1493
+ sock = socket.socket(af, socktype, proto)
1494
+ except socket.error:
1495
+ # Assume it's a bad family/type/protocol combination.
1496
+ if self._debug:
1497
+ logger.warning('create_server() failed to create '
1498
+ 'socket.socket(%r, %r, %r)',
1499
+ af, socktype, proto, exc_info=True)
1500
+ continue
1501
+ sockets.append(sock)
1502
+ if reuse_address:
1503
+ sock.setsockopt(
1504
+ socket.SOL_SOCKET, socket.SO_REUSEADDR, True)
1505
+ if reuse_port:
1506
+ _set_reuseport(sock)
1507
+ # Disable IPv4/IPv6 dual stack support (enabled by
1508
+ # default on Linux) which makes a single socket
1509
+ # listen on both address families.
1510
+ if (_HAS_IPv6 and
1511
+ af == socket.AF_INET6 and
1512
+ hasattr(socket, 'IPPROTO_IPV6')):
1513
+ sock.setsockopt(socket.IPPROTO_IPV6,
1514
+ socket.IPV6_V6ONLY,
1515
+ True)
1516
+ try:
1517
+ sock.bind(sa)
1518
+ except OSError as err:
1519
+ raise OSError(err.errno, 'error while attempting '
1520
+ 'to bind on address %r: %s'
1521
+ % (sa, err.strerror.lower())) from None
1522
+ completed = True
1523
+ finally:
1524
+ if not completed:
1525
+ for sock in sockets:
1526
+ sock.close()
1527
+ else:
1528
+ if sock is None:
1529
+ raise ValueError('Neither host/port nor sock were specified')
1530
+ if sock.type != socket.SOCK_STREAM:
1531
+ raise ValueError(f'A Stream Socket was expected, got {sock!r}')
1532
+ sockets = [sock]
1533
+
1534
+ for sock in sockets:
1535
+ sock.setblocking(False)
1536
+
1537
+ server = Server(self, sockets, protocol_factory,
1538
+ ssl, backlog, ssl_handshake_timeout)
1539
+ if start_serving:
1540
+ server._start_serving()
1541
+ # Skip one loop iteration so that all 'loop.add_reader'
1542
+ # go through.
1543
+ await tasks.sleep(0)
1544
+
1545
+ if self._debug:
1546
+ logger.info("%r is serving", server)
1547
+ return server
1548
+
1549
+ async def connect_accepted_socket(
1550
+ self, protocol_factory, sock,
1551
+ *, ssl=None,
1552
+ ssl_handshake_timeout=None):
1553
+ if sock.type != socket.SOCK_STREAM:
1554
+ raise ValueError(f'A Stream Socket was expected, got {sock!r}')
1555
+
1556
+ if ssl_handshake_timeout is not None and not ssl:
1557
+ raise ValueError(
1558
+ 'ssl_handshake_timeout is only meaningful with ssl')
1559
+
1560
+ if sock is not None:
1561
+ _check_ssl_socket(sock)
1562
+
1563
+ transport, protocol = await self._create_connection_transport(
1564
+ sock, protocol_factory, ssl, '', server_side=True,
1565
+ ssl_handshake_timeout=ssl_handshake_timeout)
1566
+ if self._debug:
1567
+ # Get the socket from the transport because SSL transport closes
1568
+ # the old socket and creates a new SSL socket
1569
+ sock = transport.get_extra_info('socket')
1570
+ logger.debug("%r handled: (%r, %r)", sock, transport, protocol)
1571
+ return transport, protocol
1572
+
1573
+ async def connect_read_pipe(self, protocol_factory, pipe):
1574
+ protocol = protocol_factory()
1575
+ waiter = self.create_future()
1576
+ transport = self._make_read_pipe_transport(pipe, protocol, waiter)
1577
+
1578
+ try:
1579
+ await waiter
1580
+ except:
1581
+ transport.close()
1582
+ raise
1583
+
1584
+ if self._debug:
1585
+ logger.debug('Read pipe %r connected: (%r, %r)',
1586
+ pipe.fileno(), transport, protocol)
1587
+ return transport, protocol
1588
+
1589
+ async def connect_write_pipe(self, protocol_factory, pipe):
1590
+ protocol = protocol_factory()
1591
+ waiter = self.create_future()
1592
+ transport = self._make_write_pipe_transport(pipe, protocol, waiter)
1593
+
1594
+ try:
1595
+ await waiter
1596
+ except:
1597
+ transport.close()
1598
+ raise
1599
+
1600
+ if self._debug:
1601
+ logger.debug('Write pipe %r connected: (%r, %r)',
1602
+ pipe.fileno(), transport, protocol)
1603
+ return transport, protocol
1604
+
1605
+ def _log_subprocess(self, msg, stdin, stdout, stderr):
1606
+ info = [msg]
1607
+ if stdin is not None:
1608
+ info.append(f'stdin={_format_pipe(stdin)}')
1609
+ if stdout is not None and stderr == subprocess.STDOUT:
1610
+ info.append(f'stdout=stderr={_format_pipe(stdout)}')
1611
+ else:
1612
+ if stdout is not None:
1613
+ info.append(f'stdout={_format_pipe(stdout)}')
1614
+ if stderr is not None:
1615
+ info.append(f'stderr={_format_pipe(stderr)}')
1616
+ logger.debug(' '.join(info))
1617
+
1618
+ async def subprocess_shell(self, protocol_factory, cmd, *,
1619
+ stdin=subprocess.PIPE,
1620
+ stdout=subprocess.PIPE,
1621
+ stderr=subprocess.PIPE,
1622
+ universal_newlines=False,
1623
+ shell=True, bufsize=0,
1624
+ encoding=None, errors=None, text=None,
1625
+ **kwargs):
1626
+ if not isinstance(cmd, (bytes, str)):
1627
+ raise ValueError("cmd must be a string")
1628
+ if universal_newlines:
1629
+ raise ValueError("universal_newlines must be False")
1630
+ if not shell:
1631
+ raise ValueError("shell must be True")
1632
+ if bufsize != 0:
1633
+ raise ValueError("bufsize must be 0")
1634
+ if text:
1635
+ raise ValueError("text must be False")
1636
+ if encoding is not None:
1637
+ raise ValueError("encoding must be None")
1638
+ if errors is not None:
1639
+ raise ValueError("errors must be None")
1640
+
1641
+ protocol = protocol_factory()
1642
+ debug_log = None
1643
+ if self._debug:
1644
+ # don't log parameters: they may contain sensitive information
1645
+ # (password) and may be too long
1646
+ debug_log = 'run shell command %r' % cmd
1647
+ self._log_subprocess(debug_log, stdin, stdout, stderr)
1648
+ transport = await self._make_subprocess_transport(
1649
+ protocol, cmd, True, stdin, stdout, stderr, bufsize, **kwargs)
1650
+ if self._debug and debug_log is not None:
1651
+ logger.info('%s: %r', debug_log, transport)
1652
+ return transport, protocol
1653
+
1654
+ async def subprocess_exec(self, protocol_factory, program, *args,
1655
+ stdin=subprocess.PIPE, stdout=subprocess.PIPE,
1656
+ stderr=subprocess.PIPE, universal_newlines=False,
1657
+ shell=False, bufsize=0,
1658
+ encoding=None, errors=None, text=None,
1659
+ **kwargs):
1660
+ if universal_newlines:
1661
+ raise ValueError("universal_newlines must be False")
1662
+ if shell:
1663
+ raise ValueError("shell must be False")
1664
+ if bufsize != 0:
1665
+ raise ValueError("bufsize must be 0")
1666
+ if text:
1667
+ raise ValueError("text must be False")
1668
+ if encoding is not None:
1669
+ raise ValueError("encoding must be None")
1670
+ if errors is not None:
1671
+ raise ValueError("errors must be None")
1672
+
1673
+ popen_args = (program,) + args
1674
+ protocol = protocol_factory()
1675
+ debug_log = None
1676
+ if self._debug:
1677
+ # don't log parameters: they may contain sensitive information
1678
+ # (password) and may be too long
1679
+ debug_log = f'execute program {program!r}'
1680
+ self._log_subprocess(debug_log, stdin, stdout, stderr)
1681
+ transport = await self._make_subprocess_transport(
1682
+ protocol, popen_args, False, stdin, stdout, stderr,
1683
+ bufsize, **kwargs)
1684
+ if self._debug and debug_log is not None:
1685
+ logger.info('%s: %r', debug_log, transport)
1686
+ return transport, protocol
1687
+
1688
+ def get_exception_handler(self):
1689
+ """Return an exception handler, or None if the default one is in use.
1690
+ """
1691
+ return self._exception_handler
1692
+
1693
+ def set_exception_handler(self, handler):
1694
+ """Set handler as the new event loop exception handler.
1695
+
1696
+ If handler is None, the default exception handler will
1697
+ be set.
1698
+
1699
+ If handler is a callable object, it should have a
1700
+ signature matching '(loop, context)', where 'loop'
1701
+ will be a reference to the active event loop, 'context'
1702
+ will be a dict object (see `call_exception_handler()`
1703
+ documentation for details about context).
1704
+ """
1705
+ if handler is not None and not callable(handler):
1706
+ raise TypeError(f'A callable object or None is expected, '
1707
+ f'got {handler!r}')
1708
+ self._exception_handler = handler
1709
+
1710
+ def default_exception_handler(self, context):
1711
+ """Default exception handler.
1712
+
1713
+ This is called when an exception occurs and no exception
1714
+ handler is set, and can be called by a custom exception
1715
+ handler that wants to defer to the default behavior.
1716
+
1717
+ This default handler logs the error message and other
1718
+ context-dependent information. In debug mode, a truncated
1719
+ stack trace is also appended showing where the given object
1720
+ (e.g. a handle or future or task) was created, if any.
1721
+
1722
+ The context parameter has the same meaning as in
1723
+ `call_exception_handler()`.
1724
+ """
1725
+ message = context.get('message')
1726
+ if not message:
1727
+ message = 'Unhandled exception in event loop'
1728
+
1729
+ exception = context.get('exception')
1730
+ if exception is not None:
1731
+ exc_info = (type(exception), exception, exception.__traceback__)
1732
+ else:
1733
+ exc_info = False
1734
+
1735
+ if ('source_traceback' not in context and
1736
+ self._current_handle is not None and
1737
+ self._current_handle._source_traceback):
1738
+ context['handle_traceback'] = \
1739
+ self._current_handle._source_traceback
1740
+
1741
+ log_lines = [message]
1742
+ for key in sorted(context):
1743
+ if key in {'message', 'exception'}:
1744
+ continue
1745
+ value = context[key]
1746
+ if key == 'source_traceback':
1747
+ tb = ''.join(traceback.format_list(value))
1748
+ value = 'Object created at (most recent call last):\n'
1749
+ value += tb.rstrip()
1750
+ elif key == 'handle_traceback':
1751
+ tb = ''.join(traceback.format_list(value))
1752
+ value = 'Handle created at (most recent call last):\n'
1753
+ value += tb.rstrip()
1754
+ else:
1755
+ value = repr(value)
1756
+ log_lines.append(f'{key}: {value}')
1757
+
1758
+ logger.error('\n'.join(log_lines), exc_info=exc_info)
1759
+
1760
+ def call_exception_handler(self, context):
1761
+ """Call the current event loop's exception handler.
1762
+
1763
+ The context argument is a dict containing the following keys:
1764
+
1765
+ - 'message': Error message;
1766
+ - 'exception' (optional): Exception object;
1767
+ - 'future' (optional): Future instance;
1768
+ - 'task' (optional): Task instance;
1769
+ - 'handle' (optional): Handle instance;
1770
+ - 'protocol' (optional): Protocol instance;
1771
+ - 'transport' (optional): Transport instance;
1772
+ - 'socket' (optional): Socket instance;
1773
+ - 'asyncgen' (optional): Asynchronous generator that caused
1774
+ the exception.
1775
+
1776
+ New keys maybe introduced in the future.
1777
+
1778
+ Note: do not overload this method in an event loop subclass.
1779
+ For custom exception handling, use the
1780
+ `set_exception_handler()` method.
1781
+ """
1782
+ if self._exception_handler is None:
1783
+ try:
1784
+ self.default_exception_handler(context)
1785
+ except (SystemExit, KeyboardInterrupt):
1786
+ raise
1787
+ except BaseException:
1788
+ # Second protection layer for unexpected errors
1789
+ # in the default implementation, as well as for subclassed
1790
+ # event loops with overloaded "default_exception_handler".
1791
+ logger.error('Exception in default exception handler',
1792
+ exc_info=True)
1793
+ else:
1794
+ try:
1795
+ self._exception_handler(self, context)
1796
+ except (SystemExit, KeyboardInterrupt):
1797
+ raise
1798
+ except BaseException as exc:
1799
+ # Exception in the user set custom exception handler.
1800
+ try:
1801
+ # Let's try default handler.
1802
+ self.default_exception_handler({
1803
+ 'message': 'Unhandled error in exception handler',
1804
+ 'exception': exc,
1805
+ 'context': context,
1806
+ })
1807
+ except (SystemExit, KeyboardInterrupt):
1808
+ raise
1809
+ except BaseException:
1810
+ # Guard 'default_exception_handler' in case it is
1811
+ # overloaded.
1812
+ logger.error('Exception in default exception handler '
1813
+ 'while handling an unexpected error '
1814
+ 'in custom exception handler',
1815
+ exc_info=True)
1816
+
1817
+ def _add_callback(self, handle):
1818
+ """Add a Handle to _ready."""
1819
+ if not handle._cancelled:
1820
+ self._ready.append(handle)
1821
+
1822
+ def _add_callback_signalsafe(self, handle):
1823
+ """Like _add_callback() but called from a signal handler."""
1824
+ self._add_callback(handle)
1825
+ self._write_to_self()
1826
+
1827
+ def _timer_handle_cancelled(self, handle):
1828
+ """Notification that a TimerHandle has been cancelled."""
1829
+ if handle._scheduled:
1830
+ self._timer_cancelled_count += 1
1831
+
1832
+ def _run_once(self):
1833
+ """Run one full iteration of the event loop.
1834
+
1835
+ This calls all currently ready callbacks, polls for I/O,
1836
+ schedules the resulting callbacks, and finally schedules
1837
+ 'call_later' callbacks.
1838
+ """
1839
+
1840
+ sched_count = len(self._scheduled)
1841
+ if (sched_count > _MIN_SCHEDULED_TIMER_HANDLES and
1842
+ self._timer_cancelled_count / sched_count >
1843
+ _MIN_CANCELLED_TIMER_HANDLES_FRACTION):
1844
+ # Remove delayed calls that were cancelled if their number
1845
+ # is too high
1846
+ new_scheduled = []
1847
+ for handle in self._scheduled:
1848
+ if handle._cancelled:
1849
+ handle._scheduled = False
1850
+ else:
1851
+ new_scheduled.append(handle)
1852
+
1853
+ heapq.heapify(new_scheduled)
1854
+ self._scheduled = new_scheduled
1855
+ self._timer_cancelled_count = 0
1856
+ else:
1857
+ # Remove delayed calls that were cancelled from head of queue.
1858
+ while self._scheduled and self._scheduled[0]._cancelled:
1859
+ self._timer_cancelled_count -= 1
1860
+ handle = heapq.heappop(self._scheduled)
1861
+ handle._scheduled = False
1862
+
1863
+ timeout = None
1864
+ if self._ready or self._stopping:
1865
+ timeout = 0
1866
+ elif self._scheduled:
1867
+ # Compute the desired timeout.
1868
+ when = self._scheduled[0]._when
1869
+ timeout = min(max(0, when - self.time()), MAXIMUM_SELECT_TIMEOUT)
1870
+
1871
+ event_list = self._selector.select(timeout)
1872
+ self._process_events(event_list)
1873
+ # Needed to break cycles when an exception occurs.
1874
+ event_list = None
1875
+
1876
+ # Handle 'later' callbacks that are ready.
1877
+ end_time = self.time() + self._clock_resolution
1878
+ while self._scheduled:
1879
+ handle = self._scheduled[0]
1880
+ if handle._when >= end_time:
1881
+ break
1882
+ handle = heapq.heappop(self._scheduled)
1883
+ handle._scheduled = False
1884
+ self._ready.append(handle)
1885
+
1886
+ # This is the only place where callbacks are actually *called*.
1887
+ # All other places just add them to ready.
1888
+ # Note: We run all currently scheduled callbacks, but not any
1889
+ # callbacks scheduled by callbacks run this time around --
1890
+ # they will be run the next time (after another I/O poll).
1891
+ # Use an idiom that is thread-safe without using locks.
1892
+ ntodo = len(self._ready)
1893
+ for i in range(ntodo):
1894
+ handle = self._ready.popleft()
1895
+ if handle._cancelled:
1896
+ continue
1897
+ if self._debug:
1898
+ try:
1899
+ self._current_handle = handle
1900
+ t0 = self.time()
1901
+ handle._run()
1902
+ dt = self.time() - t0
1903
+ if dt >= self.slow_callback_duration:
1904
+ logger.warning('Executing %s took %.3f seconds',
1905
+ _format_handle(handle), dt)
1906
+ finally:
1907
+ self._current_handle = None
1908
+ else:
1909
+ handle._run()
1910
+ handle = None # Needed to break cycles when an exception occurs.
1911
+
1912
+ def _set_coroutine_origin_tracking(self, enabled):
1913
+ if bool(enabled) == bool(self._coroutine_origin_tracking_enabled):
1914
+ return
1915
+
1916
+ if enabled:
1917
+ self._coroutine_origin_tracking_saved_depth = (
1918
+ sys.get_coroutine_origin_tracking_depth())
1919
+ sys.set_coroutine_origin_tracking_depth(
1920
+ constants.DEBUG_STACK_DEPTH)
1921
+ else:
1922
+ sys.set_coroutine_origin_tracking_depth(
1923
+ self._coroutine_origin_tracking_saved_depth)
1924
+
1925
+ self._coroutine_origin_tracking_enabled = enabled
1926
+
1927
+ def get_debug(self):
1928
+ return self._debug
1929
+
1930
+ def set_debug(self, enabled):
1931
+ self._debug = enabled
1932
+
1933
+ if self.is_running():
1934
+ self.call_soon_threadsafe(self._set_coroutine_origin_tracking, enabled)
python310/asyncio/base_futures.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = ()
2
+
3
+ import reprlib
4
+ from _thread import get_ident
5
+
6
+ from . import format_helpers
7
+
8
+ # States for Future.
9
+ _PENDING = 'PENDING'
10
+ _CANCELLED = 'CANCELLED'
11
+ _FINISHED = 'FINISHED'
12
+
13
+
14
+ def isfuture(obj):
15
+ """Check for a Future.
16
+
17
+ This returns True when obj is a Future instance or is advertising
18
+ itself as duck-type compatible by setting _asyncio_future_blocking.
19
+ See comment in Future for more details.
20
+ """
21
+ return (hasattr(obj.__class__, '_asyncio_future_blocking') and
22
+ obj._asyncio_future_blocking is not None)
23
+
24
+
25
+ def _format_callbacks(cb):
26
+ """helper function for Future.__repr__"""
27
+ size = len(cb)
28
+ if not size:
29
+ cb = ''
30
+
31
+ def format_cb(callback):
32
+ return format_helpers._format_callback_source(callback, ())
33
+
34
+ if size == 1:
35
+ cb = format_cb(cb[0][0])
36
+ elif size == 2:
37
+ cb = '{}, {}'.format(format_cb(cb[0][0]), format_cb(cb[1][0]))
38
+ elif size > 2:
39
+ cb = '{}, <{} more>, {}'.format(format_cb(cb[0][0]),
40
+ size - 2,
41
+ format_cb(cb[-1][0]))
42
+ return f'cb=[{cb}]'
43
+
44
+
45
+ # bpo-42183: _repr_running is needed for repr protection
46
+ # when a Future or Task result contains itself directly or indirectly.
47
+ # The logic is borrowed from @reprlib.recursive_repr decorator.
48
+ # Unfortunately, the direct decorator usage is impossible because of
49
+ # AttributeError: '_asyncio.Task' object has no attribute '__module__' error.
50
+ #
51
+ # After fixing this thing we can return to the decorator based approach.
52
+ _repr_running = set()
53
+
54
+
55
+ def _future_repr_info(future):
56
+ # (Future) -> str
57
+ """helper function for Future.__repr__"""
58
+ info = [future._state.lower()]
59
+ if future._state == _FINISHED:
60
+ if future._exception is not None:
61
+ info.append(f'exception={future._exception!r}')
62
+ else:
63
+ key = id(future), get_ident()
64
+ if key in _repr_running:
65
+ result = '...'
66
+ else:
67
+ _repr_running.add(key)
68
+ try:
69
+ # use reprlib to limit the length of the output, especially
70
+ # for very long strings
71
+ result = reprlib.repr(future._result)
72
+ finally:
73
+ _repr_running.discard(key)
74
+ info.append(f'result={result}')
75
+ if future._callbacks:
76
+ info.append(_format_callbacks(future._callbacks))
77
+ if future._source_traceback:
78
+ frame = future._source_traceback[-1]
79
+ info.append(f'created at {frame[0]}:{frame[1]}')
80
+ return info
python310/asyncio/base_subprocess.py ADDED
@@ -0,0 +1,285 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import collections
2
+ import subprocess
3
+ import warnings
4
+
5
+ from . import protocols
6
+ from . import transports
7
+ from .log import logger
8
+
9
+
10
+ class BaseSubprocessTransport(transports.SubprocessTransport):
11
+
12
+ def __init__(self, loop, protocol, args, shell,
13
+ stdin, stdout, stderr, bufsize,
14
+ waiter=None, extra=None, **kwargs):
15
+ super().__init__(extra)
16
+ self._closed = False
17
+ self._protocol = protocol
18
+ self._loop = loop
19
+ self._proc = None
20
+ self._pid = None
21
+ self._returncode = None
22
+ self._exit_waiters = []
23
+ self._pending_calls = collections.deque()
24
+ self._pipes = {}
25
+ self._finished = False
26
+
27
+ if stdin == subprocess.PIPE:
28
+ self._pipes[0] = None
29
+ if stdout == subprocess.PIPE:
30
+ self._pipes[1] = None
31
+ if stderr == subprocess.PIPE:
32
+ self._pipes[2] = None
33
+
34
+ # Create the child process: set the _proc attribute
35
+ try:
36
+ self._start(args=args, shell=shell, stdin=stdin, stdout=stdout,
37
+ stderr=stderr, bufsize=bufsize, **kwargs)
38
+ except:
39
+ self.close()
40
+ raise
41
+
42
+ self._pid = self._proc.pid
43
+ self._extra['subprocess'] = self._proc
44
+
45
+ if self._loop.get_debug():
46
+ if isinstance(args, (bytes, str)):
47
+ program = args
48
+ else:
49
+ program = args[0]
50
+ logger.debug('process %r created: pid %s',
51
+ program, self._pid)
52
+
53
+ self._loop.create_task(self._connect_pipes(waiter))
54
+
55
+ def __repr__(self):
56
+ info = [self.__class__.__name__]
57
+ if self._closed:
58
+ info.append('closed')
59
+ if self._pid is not None:
60
+ info.append(f'pid={self._pid}')
61
+ if self._returncode is not None:
62
+ info.append(f'returncode={self._returncode}')
63
+ elif self._pid is not None:
64
+ info.append('running')
65
+ else:
66
+ info.append('not started')
67
+
68
+ stdin = self._pipes.get(0)
69
+ if stdin is not None:
70
+ info.append(f'stdin={stdin.pipe}')
71
+
72
+ stdout = self._pipes.get(1)
73
+ stderr = self._pipes.get(2)
74
+ if stdout is not None and stderr is stdout:
75
+ info.append(f'stdout=stderr={stdout.pipe}')
76
+ else:
77
+ if stdout is not None:
78
+ info.append(f'stdout={stdout.pipe}')
79
+ if stderr is not None:
80
+ info.append(f'stderr={stderr.pipe}')
81
+
82
+ return '<{}>'.format(' '.join(info))
83
+
84
+ def _start(self, args, shell, stdin, stdout, stderr, bufsize, **kwargs):
85
+ raise NotImplementedError
86
+
87
+ def set_protocol(self, protocol):
88
+ self._protocol = protocol
89
+
90
+ def get_protocol(self):
91
+ return self._protocol
92
+
93
+ def is_closing(self):
94
+ return self._closed
95
+
96
+ def close(self):
97
+ if self._closed:
98
+ return
99
+ self._closed = True
100
+
101
+ for proto in self._pipes.values():
102
+ if proto is None:
103
+ continue
104
+ proto.pipe.close()
105
+
106
+ if (self._proc is not None and
107
+ # has the child process finished?
108
+ self._returncode is None and
109
+ # the child process has finished, but the
110
+ # transport hasn't been notified yet?
111
+ self._proc.poll() is None):
112
+
113
+ if self._loop.get_debug():
114
+ logger.warning('Close running child process: kill %r', self)
115
+
116
+ try:
117
+ self._proc.kill()
118
+ except ProcessLookupError:
119
+ pass
120
+
121
+ # Don't clear the _proc reference yet: _post_init() may still run
122
+
123
+ def __del__(self, _warn=warnings.warn):
124
+ if not self._closed:
125
+ _warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
126
+ self.close()
127
+
128
+ def get_pid(self):
129
+ return self._pid
130
+
131
+ def get_returncode(self):
132
+ return self._returncode
133
+
134
+ def get_pipe_transport(self, fd):
135
+ if fd in self._pipes:
136
+ return self._pipes[fd].pipe
137
+ else:
138
+ return None
139
+
140
+ def _check_proc(self):
141
+ if self._proc is None:
142
+ raise ProcessLookupError()
143
+
144
+ def send_signal(self, signal):
145
+ self._check_proc()
146
+ self._proc.send_signal(signal)
147
+
148
+ def terminate(self):
149
+ self._check_proc()
150
+ self._proc.terminate()
151
+
152
+ def kill(self):
153
+ self._check_proc()
154
+ self._proc.kill()
155
+
156
+ async def _connect_pipes(self, waiter):
157
+ try:
158
+ proc = self._proc
159
+ loop = self._loop
160
+
161
+ if proc.stdin is not None:
162
+ _, pipe = await loop.connect_write_pipe(
163
+ lambda: WriteSubprocessPipeProto(self, 0),
164
+ proc.stdin)
165
+ self._pipes[0] = pipe
166
+
167
+ if proc.stdout is not None:
168
+ _, pipe = await loop.connect_read_pipe(
169
+ lambda: ReadSubprocessPipeProto(self, 1),
170
+ proc.stdout)
171
+ self._pipes[1] = pipe
172
+
173
+ if proc.stderr is not None:
174
+ _, pipe = await loop.connect_read_pipe(
175
+ lambda: ReadSubprocessPipeProto(self, 2),
176
+ proc.stderr)
177
+ self._pipes[2] = pipe
178
+
179
+ assert self._pending_calls is not None
180
+
181
+ loop.call_soon(self._protocol.connection_made, self)
182
+ for callback, data in self._pending_calls:
183
+ loop.call_soon(callback, *data)
184
+ self._pending_calls = None
185
+ except (SystemExit, KeyboardInterrupt):
186
+ raise
187
+ except BaseException as exc:
188
+ if waiter is not None and not waiter.cancelled():
189
+ waiter.set_exception(exc)
190
+ else:
191
+ if waiter is not None and not waiter.cancelled():
192
+ waiter.set_result(None)
193
+
194
+ def _call(self, cb, *data):
195
+ if self._pending_calls is not None:
196
+ self._pending_calls.append((cb, data))
197
+ else:
198
+ self._loop.call_soon(cb, *data)
199
+
200
+ def _pipe_connection_lost(self, fd, exc):
201
+ self._call(self._protocol.pipe_connection_lost, fd, exc)
202
+ self._try_finish()
203
+
204
+ def _pipe_data_received(self, fd, data):
205
+ self._call(self._protocol.pipe_data_received, fd, data)
206
+
207
+ def _process_exited(self, returncode):
208
+ assert returncode is not None, returncode
209
+ assert self._returncode is None, self._returncode
210
+ if self._loop.get_debug():
211
+ logger.info('%r exited with return code %r', self, returncode)
212
+ self._returncode = returncode
213
+ if self._proc.returncode is None:
214
+ # asyncio uses a child watcher: copy the status into the Popen
215
+ # object. On Python 3.6, it is required to avoid a ResourceWarning.
216
+ self._proc.returncode = returncode
217
+ self._call(self._protocol.process_exited)
218
+ self._try_finish()
219
+
220
+ # wake up futures waiting for wait()
221
+ for waiter in self._exit_waiters:
222
+ if not waiter.cancelled():
223
+ waiter.set_result(returncode)
224
+ self._exit_waiters = None
225
+
226
+ async def _wait(self):
227
+ """Wait until the process exit and return the process return code.
228
+
229
+ This method is a coroutine."""
230
+ if self._returncode is not None:
231
+ return self._returncode
232
+
233
+ waiter = self._loop.create_future()
234
+ self._exit_waiters.append(waiter)
235
+ return await waiter
236
+
237
+ def _try_finish(self):
238
+ assert not self._finished
239
+ if self._returncode is None:
240
+ return
241
+ if all(p is not None and p.disconnected
242
+ for p in self._pipes.values()):
243
+ self._finished = True
244
+ self._call(self._call_connection_lost, None)
245
+
246
+ def _call_connection_lost(self, exc):
247
+ try:
248
+ self._protocol.connection_lost(exc)
249
+ finally:
250
+ self._loop = None
251
+ self._proc = None
252
+ self._protocol = None
253
+
254
+
255
+ class WriteSubprocessPipeProto(protocols.BaseProtocol):
256
+
257
+ def __init__(self, proc, fd):
258
+ self.proc = proc
259
+ self.fd = fd
260
+ self.pipe = None
261
+ self.disconnected = False
262
+
263
+ def connection_made(self, transport):
264
+ self.pipe = transport
265
+
266
+ def __repr__(self):
267
+ return f'<{self.__class__.__name__} fd={self.fd} pipe={self.pipe!r}>'
268
+
269
+ def connection_lost(self, exc):
270
+ self.disconnected = True
271
+ self.proc._pipe_connection_lost(self.fd, exc)
272
+ self.proc = None
273
+
274
+ def pause_writing(self):
275
+ self.proc._protocol.pause_writing()
276
+
277
+ def resume_writing(self):
278
+ self.proc._protocol.resume_writing()
279
+
280
+
281
+ class ReadSubprocessPipeProto(WriteSubprocessPipeProto,
282
+ protocols.Protocol):
283
+
284
+ def data_received(self, data):
285
+ self.proc._pipe_data_received(self.fd, data)
python310/asyncio/base_tasks.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import linecache
2
+ import traceback
3
+
4
+ from . import base_futures
5
+ from . import coroutines
6
+
7
+
8
+ def _task_repr_info(task):
9
+ info = base_futures._future_repr_info(task)
10
+
11
+ if task._must_cancel:
12
+ # replace status
13
+ info[0] = 'cancelling'
14
+
15
+ info.insert(1, 'name=%r' % task.get_name())
16
+
17
+ coro = coroutines._format_coroutine(task._coro)
18
+ info.insert(2, f'coro=<{coro}>')
19
+
20
+ if task._fut_waiter is not None:
21
+ info.insert(3, f'wait_for={task._fut_waiter!r}')
22
+ return info
23
+
24
+
25
+ def _task_get_stack(task, limit):
26
+ frames = []
27
+ if hasattr(task._coro, 'cr_frame'):
28
+ # case 1: 'async def' coroutines
29
+ f = task._coro.cr_frame
30
+ elif hasattr(task._coro, 'gi_frame'):
31
+ # case 2: legacy coroutines
32
+ f = task._coro.gi_frame
33
+ elif hasattr(task._coro, 'ag_frame'):
34
+ # case 3: async generators
35
+ f = task._coro.ag_frame
36
+ else:
37
+ # case 4: unknown objects
38
+ f = None
39
+ if f is not None:
40
+ while f is not None:
41
+ if limit is not None:
42
+ if limit <= 0:
43
+ break
44
+ limit -= 1
45
+ frames.append(f)
46
+ f = f.f_back
47
+ frames.reverse()
48
+ elif task._exception is not None:
49
+ tb = task._exception.__traceback__
50
+ while tb is not None:
51
+ if limit is not None:
52
+ if limit <= 0:
53
+ break
54
+ limit -= 1
55
+ frames.append(tb.tb_frame)
56
+ tb = tb.tb_next
57
+ return frames
58
+
59
+
60
+ def _task_print_stack(task, limit, file):
61
+ extracted_list = []
62
+ checked = set()
63
+ for f in task.get_stack(limit=limit):
64
+ lineno = f.f_lineno
65
+ co = f.f_code
66
+ filename = co.co_filename
67
+ name = co.co_name
68
+ if filename not in checked:
69
+ checked.add(filename)
70
+ linecache.checkcache(filename)
71
+ line = linecache.getline(filename, lineno, f.f_globals)
72
+ extracted_list.append((filename, lineno, name, line))
73
+
74
+ exc = task._exception
75
+ if not extracted_list:
76
+ print(f'No stack for {task!r}', file=file)
77
+ elif exc is not None:
78
+ print(f'Traceback for {task!r} (most recent call last):', file=file)
79
+ else:
80
+ print(f'Stack for {task!r} (most recent call last):', file=file)
81
+
82
+ traceback.print_list(extracted_list, file=file)
83
+ if exc is not None:
84
+ for line in traceback.format_exception_only(exc.__class__, exc):
85
+ print(line, file=file, end='')
python310/asyncio/constants.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import enum
2
+
3
+ # After the connection is lost, log warnings after this many write()s.
4
+ LOG_THRESHOLD_FOR_CONNLOST_WRITES = 5
5
+
6
+ # Seconds to wait before retrying accept().
7
+ ACCEPT_RETRY_DELAY = 1
8
+
9
+ # Number of stack entries to capture in debug mode.
10
+ # The larger the number, the slower the operation in debug mode
11
+ # (see extract_stack() in format_helpers.py).
12
+ DEBUG_STACK_DEPTH = 10
13
+
14
+ # Number of seconds to wait for SSL handshake to complete
15
+ # The default timeout matches that of Nginx.
16
+ SSL_HANDSHAKE_TIMEOUT = 60.0
17
+
18
+ # Used in sendfile fallback code. We use fallback for platforms
19
+ # that don't support sendfile, or for TLS connections.
20
+ SENDFILE_FALLBACK_READBUFFER_SIZE = 1024 * 256
21
+
22
+ # The enum should be here to break circular dependencies between
23
+ # base_events and sslproto
24
+ class _SendfileMode(enum.Enum):
25
+ UNSUPPORTED = enum.auto()
26
+ TRY_NATIVE = enum.auto()
27
+ FALLBACK = enum.auto()
python310/asyncio/coroutines.py ADDED
@@ -0,0 +1,269 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ __all__ = 'coroutine', 'iscoroutinefunction', 'iscoroutine'
2
+
3
+ import collections.abc
4
+ import functools
5
+ import inspect
6
+ import os
7
+ import sys
8
+ import traceback
9
+ import types
10
+ import warnings
11
+
12
+ from . import base_futures
13
+ from . import constants
14
+ from . import format_helpers
15
+ from .log import logger
16
+
17
+
18
+ def _is_debug_mode():
19
+ # If you set _DEBUG to true, @coroutine will wrap the resulting
20
+ # generator objects in a CoroWrapper instance (defined below). That
21
+ # instance will log a message when the generator is never iterated
22
+ # over, which may happen when you forget to use "await" or "yield from"
23
+ # with a coroutine call.
24
+ # Note that the value of the _DEBUG flag is taken
25
+ # when the decorator is used, so to be of any use it must be set
26
+ # before you define your coroutines. A downside of using this feature
27
+ # is that tracebacks show entries for the CoroWrapper.__next__ method
28
+ # when _DEBUG is true.
29
+ return sys.flags.dev_mode or (not sys.flags.ignore_environment and
30
+ bool(os.environ.get('PYTHONASYNCIODEBUG')))
31
+
32
+
33
+ _DEBUG = _is_debug_mode()
34
+
35
+
36
+ class CoroWrapper:
37
+ # Wrapper for coroutine object in _DEBUG mode.
38
+
39
+ def __init__(self, gen, func=None):
40
+ assert inspect.isgenerator(gen) or inspect.iscoroutine(gen), gen
41
+ self.gen = gen
42
+ self.func = func # Used to unwrap @coroutine decorator
43
+ self._source_traceback = format_helpers.extract_stack(sys._getframe(1))
44
+ self.__name__ = getattr(gen, '__name__', None)
45
+ self.__qualname__ = getattr(gen, '__qualname__', None)
46
+
47
+ def __repr__(self):
48
+ coro_repr = _format_coroutine(self)
49
+ if self._source_traceback:
50
+ frame = self._source_traceback[-1]
51
+ coro_repr += f', created at {frame[0]}:{frame[1]}'
52
+
53
+ return f'<{self.__class__.__name__} {coro_repr}>'
54
+
55
+ def __iter__(self):
56
+ return self
57
+
58
+ def __next__(self):
59
+ return self.gen.send(None)
60
+
61
+ def send(self, value):
62
+ return self.gen.send(value)
63
+
64
+ def throw(self, type, value=None, traceback=None):
65
+ return self.gen.throw(type, value, traceback)
66
+
67
+ def close(self):
68
+ return self.gen.close()
69
+
70
+ @property
71
+ def gi_frame(self):
72
+ return self.gen.gi_frame
73
+
74
+ @property
75
+ def gi_running(self):
76
+ return self.gen.gi_running
77
+
78
+ @property
79
+ def gi_code(self):
80
+ return self.gen.gi_code
81
+
82
+ def __await__(self):
83
+ return self
84
+
85
+ @property
86
+ def gi_yieldfrom(self):
87
+ return self.gen.gi_yieldfrom
88
+
89
+ def __del__(self):
90
+ # Be careful accessing self.gen.frame -- self.gen might not exist.
91
+ gen = getattr(self, 'gen', None)
92
+ frame = getattr(gen, 'gi_frame', None)
93
+ if frame is not None and frame.f_lasti == -1:
94
+ msg = f'{self!r} was never yielded from'
95
+ tb = getattr(self, '_source_traceback', ())
96
+ if tb:
97
+ tb = ''.join(traceback.format_list(tb))
98
+ msg += (f'\nCoroutine object created at '
99
+ f'(most recent call last, truncated to '
100
+ f'{constants.DEBUG_STACK_DEPTH} last lines):\n')
101
+ msg += tb.rstrip()
102
+ logger.error(msg)
103
+
104
+
105
+ def coroutine(func):
106
+ """Decorator to mark coroutines.
107
+
108
+ If the coroutine is not yielded from before it is destroyed,
109
+ an error message is logged.
110
+ """
111
+ warnings.warn('"@coroutine" decorator is deprecated since Python 3.8, use "async def" instead',
112
+ DeprecationWarning,
113
+ stacklevel=2)
114
+ if inspect.iscoroutinefunction(func):
115
+ # In Python 3.5 that's all we need to do for coroutines
116
+ # defined with "async def".
117
+ return func
118
+
119
+ if inspect.isgeneratorfunction(func):
120
+ coro = func
121
+ else:
122
+ @functools.wraps(func)
123
+ def coro(*args, **kw):
124
+ res = func(*args, **kw)
125
+ if (base_futures.isfuture(res) or inspect.isgenerator(res) or
126
+ isinstance(res, CoroWrapper)):
127
+ res = yield from res
128
+ else:
129
+ # If 'res' is an awaitable, run it.
130
+ try:
131
+ await_meth = res.__await__
132
+ except AttributeError:
133
+ pass
134
+ else:
135
+ if isinstance(res, collections.abc.Awaitable):
136
+ res = yield from await_meth()
137
+ return res
138
+
139
+ coro = types.coroutine(coro)
140
+ if not _DEBUG:
141
+ wrapper = coro
142
+ else:
143
+ @functools.wraps(func)
144
+ def wrapper(*args, **kwds):
145
+ w = CoroWrapper(coro(*args, **kwds), func=func)
146
+ if w._source_traceback:
147
+ del w._source_traceback[-1]
148
+ # Python < 3.5 does not implement __qualname__
149
+ # on generator objects, so we set it manually.
150
+ # We use getattr as some callables (such as
151
+ # functools.partial may lack __qualname__).
152
+ w.__name__ = getattr(func, '__name__', None)
153
+ w.__qualname__ = getattr(func, '__qualname__', None)
154
+ return w
155
+
156
+ wrapper._is_coroutine = _is_coroutine # For iscoroutinefunction().
157
+ return wrapper
158
+
159
+
160
+ # A marker for iscoroutinefunction.
161
+ _is_coroutine = object()
162
+
163
+
164
+ def iscoroutinefunction(func):
165
+ """Return True if func is a decorated coroutine function."""
166
+ return (inspect.iscoroutinefunction(func) or
167
+ getattr(func, '_is_coroutine', None) is _is_coroutine)
168
+
169
+
170
+ # Prioritize native coroutine check to speed-up
171
+ # asyncio.iscoroutine.
172
+ _COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType,
173
+ collections.abc.Coroutine, CoroWrapper)
174
+ _iscoroutine_typecache = set()
175
+
176
+
177
+ def iscoroutine(obj):
178
+ """Return True if obj is a coroutine object."""
179
+ if type(obj) in _iscoroutine_typecache:
180
+ return True
181
+
182
+ if isinstance(obj, _COROUTINE_TYPES):
183
+ # Just in case we don't want to cache more than 100
184
+ # positive types. That shouldn't ever happen, unless
185
+ # someone stressing the system on purpose.
186
+ if len(_iscoroutine_typecache) < 100:
187
+ _iscoroutine_typecache.add(type(obj))
188
+ return True
189
+ else:
190
+ return False
191
+
192
+
193
+ def _format_coroutine(coro):
194
+ assert iscoroutine(coro)
195
+
196
+ is_corowrapper = isinstance(coro, CoroWrapper)
197
+
198
+ def get_name(coro):
199
+ # Coroutines compiled with Cython sometimes don't have
200
+ # proper __qualname__ or __name__. While that is a bug
201
+ # in Cython, asyncio shouldn't crash with an AttributeError
202
+ # in its __repr__ functions.
203
+ if is_corowrapper:
204
+ return format_helpers._format_callback(coro.func, (), {})
205
+
206
+ if hasattr(coro, '__qualname__') and coro.__qualname__:
207
+ coro_name = coro.__qualname__
208
+ elif hasattr(coro, '__name__') and coro.__name__:
209
+ coro_name = coro.__name__
210
+ else:
211
+ # Stop masking Cython bugs, expose them in a friendly way.
212
+ coro_name = f'<{type(coro).__name__} without __name__>'
213
+ return f'{coro_name}()'
214
+
215
+ def is_running(coro):
216
+ try:
217
+ return coro.cr_running
218
+ except AttributeError:
219
+ try:
220
+ return coro.gi_running
221
+ except AttributeError:
222
+ return False
223
+
224
+ coro_code = None
225
+ if hasattr(coro, 'cr_code') and coro.cr_code:
226
+ coro_code = coro.cr_code
227
+ elif hasattr(coro, 'gi_code') and coro.gi_code:
228
+ coro_code = coro.gi_code
229
+
230
+ coro_name = get_name(coro)
231
+
232
+ if not coro_code:
233
+ # Built-in types might not have __qualname__ or __name__.
234
+ if is_running(coro):
235
+ return f'{coro_name} running'
236
+ else:
237
+ return coro_name
238
+
239
+ coro_frame = None
240
+ if hasattr(coro, 'gi_frame') and coro.gi_frame:
241
+ coro_frame = coro.gi_frame
242
+ elif hasattr(coro, 'cr_frame') and coro.cr_frame:
243
+ coro_frame = coro.cr_frame
244
+
245
+ # If Cython's coroutine has a fake code object without proper
246
+ # co_filename -- expose that.
247
+ filename = coro_code.co_filename or '<empty co_filename>'
248
+
249
+ lineno = 0
250
+ if (is_corowrapper and
251
+ coro.func is not None and
252
+ not inspect.isgeneratorfunction(coro.func)):
253
+ source = format_helpers._get_function_source(coro.func)
254
+ if source is not None:
255
+ filename, lineno = source
256
+ if coro_frame is None:
257
+ coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
258
+ else:
259
+ coro_repr = f'{coro_name} running, defined at {filename}:{lineno}'
260
+
261
+ elif coro_frame is not None:
262
+ lineno = coro_frame.f_lineno
263
+ coro_repr = f'{coro_name} running at {filename}:{lineno}'
264
+
265
+ else:
266
+ lineno = coro_code.co_firstlineno
267
+ coro_repr = f'{coro_name} done, defined at {filename}:{lineno}'
268
+
269
+ return coro_repr
python310/asyncio/events.py ADDED
@@ -0,0 +1,819 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Event loop and event loop policy."""
2
+
3
+ __all__ = (
4
+ 'AbstractEventLoopPolicy',
5
+ 'AbstractEventLoop', 'AbstractServer',
6
+ 'Handle', 'TimerHandle',
7
+ 'get_event_loop_policy', 'set_event_loop_policy',
8
+ 'get_event_loop', 'set_event_loop', 'new_event_loop',
9
+ 'get_child_watcher', 'set_child_watcher',
10
+ '_set_running_loop', 'get_running_loop',
11
+ '_get_running_loop',
12
+ )
13
+
14
+ import contextvars
15
+ import os
16
+ import socket
17
+ import subprocess
18
+ import sys
19
+ import threading
20
+
21
+ from . import format_helpers
22
+
23
+
24
+ class Handle:
25
+ """Object returned by callback registration methods."""
26
+
27
+ __slots__ = ('_callback', '_args', '_cancelled', '_loop',
28
+ '_source_traceback', '_repr', '__weakref__',
29
+ '_context')
30
+
31
+ def __init__(self, callback, args, loop, context=None):
32
+ if context is None:
33
+ context = contextvars.copy_context()
34
+ self._context = context
35
+ self._loop = loop
36
+ self._callback = callback
37
+ self._args = args
38
+ self._cancelled = False
39
+ self._repr = None
40
+ if self._loop.get_debug():
41
+ self._source_traceback = format_helpers.extract_stack(
42
+ sys._getframe(1))
43
+ else:
44
+ self._source_traceback = None
45
+
46
+ def _repr_info(self):
47
+ info = [self.__class__.__name__]
48
+ if self._cancelled:
49
+ info.append('cancelled')
50
+ if self._callback is not None:
51
+ info.append(format_helpers._format_callback_source(
52
+ self._callback, self._args))
53
+ if self._source_traceback:
54
+ frame = self._source_traceback[-1]
55
+ info.append(f'created at {frame[0]}:{frame[1]}')
56
+ return info
57
+
58
+ def __repr__(self):
59
+ if self._repr is not None:
60
+ return self._repr
61
+ info = self._repr_info()
62
+ return '<{}>'.format(' '.join(info))
63
+
64
+ def cancel(self):
65
+ if not self._cancelled:
66
+ self._cancelled = True
67
+ if self._loop.get_debug():
68
+ # Keep a representation in debug mode to keep callback and
69
+ # parameters. For example, to log the warning
70
+ # "Executing <Handle...> took 2.5 second"
71
+ self._repr = repr(self)
72
+ self._callback = None
73
+ self._args = None
74
+
75
+ def cancelled(self):
76
+ return self._cancelled
77
+
78
+ def _run(self):
79
+ try:
80
+ self._context.run(self._callback, *self._args)
81
+ except (SystemExit, KeyboardInterrupt):
82
+ raise
83
+ except BaseException as exc:
84
+ cb = format_helpers._format_callback_source(
85
+ self._callback, self._args)
86
+ msg = f'Exception in callback {cb}'
87
+ context = {
88
+ 'message': msg,
89
+ 'exception': exc,
90
+ 'handle': self,
91
+ }
92
+ if self._source_traceback:
93
+ context['source_traceback'] = self._source_traceback
94
+ self._loop.call_exception_handler(context)
95
+ self = None # Needed to break cycles when an exception occurs.
96
+
97
+
98
+ class TimerHandle(Handle):
99
+ """Object returned by timed callback registration methods."""
100
+
101
+ __slots__ = ['_scheduled', '_when']
102
+
103
+ def __init__(self, when, callback, args, loop, context=None):
104
+ assert when is not None
105
+ super().__init__(callback, args, loop, context)
106
+ if self._source_traceback:
107
+ del self._source_traceback[-1]
108
+ self._when = when
109
+ self._scheduled = False
110
+
111
+ def _repr_info(self):
112
+ info = super()._repr_info()
113
+ pos = 2 if self._cancelled else 1
114
+ info.insert(pos, f'when={self._when}')
115
+ return info
116
+
117
+ def __hash__(self):
118
+ return hash(self._when)
119
+
120
+ def __lt__(self, other):
121
+ if isinstance(other, TimerHandle):
122
+ return self._when < other._when
123
+ return NotImplemented
124
+
125
+ def __le__(self, other):
126
+ if isinstance(other, TimerHandle):
127
+ return self._when < other._when or self.__eq__(other)
128
+ return NotImplemented
129
+
130
+ def __gt__(self, other):
131
+ if isinstance(other, TimerHandle):
132
+ return self._when > other._when
133
+ return NotImplemented
134
+
135
+ def __ge__(self, other):
136
+ if isinstance(other, TimerHandle):
137
+ return self._when > other._when or self.__eq__(other)
138
+ return NotImplemented
139
+
140
+ def __eq__(self, other):
141
+ if isinstance(other, TimerHandle):
142
+ return (self._when == other._when and
143
+ self._callback == other._callback and
144
+ self._args == other._args and
145
+ self._cancelled == other._cancelled)
146
+ return NotImplemented
147
+
148
+ def cancel(self):
149
+ if not self._cancelled:
150
+ self._loop._timer_handle_cancelled(self)
151
+ super().cancel()
152
+
153
+ def when(self):
154
+ """Return a scheduled callback time.
155
+
156
+ The time is an absolute timestamp, using the same time
157
+ reference as loop.time().
158
+ """
159
+ return self._when
160
+
161
+
162
+ class AbstractServer:
163
+ """Abstract server returned by create_server()."""
164
+
165
+ def close(self):
166
+ """Stop serving. This leaves existing connections open."""
167
+ raise NotImplementedError
168
+
169
+ def get_loop(self):
170
+ """Get the event loop the Server object is attached to."""
171
+ raise NotImplementedError
172
+
173
+ def is_serving(self):
174
+ """Return True if the server is accepting connections."""
175
+ raise NotImplementedError
176
+
177
+ async def start_serving(self):
178
+ """Start accepting connections.
179
+
180
+ This method is idempotent, so it can be called when
181
+ the server is already being serving.
182
+ """
183
+ raise NotImplementedError
184
+
185
+ async def serve_forever(self):
186
+ """Start accepting connections until the coroutine is cancelled.
187
+
188
+ The server is closed when the coroutine is cancelled.
189
+ """
190
+ raise NotImplementedError
191
+
192
+ async def wait_closed(self):
193
+ """Coroutine to wait until service is closed."""
194
+ raise NotImplementedError
195
+
196
+ async def __aenter__(self):
197
+ return self
198
+
199
+ async def __aexit__(self, *exc):
200
+ self.close()
201
+ await self.wait_closed()
202
+
203
+
204
+ class AbstractEventLoop:
205
+ """Abstract event loop."""
206
+
207
+ # Running and stopping the event loop.
208
+
209
+ def run_forever(self):
210
+ """Run the event loop until stop() is called."""
211
+ raise NotImplementedError
212
+
213
+ def run_until_complete(self, future):
214
+ """Run the event loop until a Future is done.
215
+
216
+ Return the Future's result, or raise its exception.
217
+ """
218
+ raise NotImplementedError
219
+
220
+ def stop(self):
221
+ """Stop the event loop as soon as reasonable.
222
+
223
+ Exactly how soon that is may depend on the implementation, but
224
+ no more I/O callbacks should be scheduled.
225
+ """
226
+ raise NotImplementedError
227
+
228
+ def is_running(self):
229
+ """Return whether the event loop is currently running."""
230
+ raise NotImplementedError
231
+
232
+ def is_closed(self):
233
+ """Returns True if the event loop was closed."""
234
+ raise NotImplementedError
235
+
236
+ def close(self):
237
+ """Close the loop.
238
+
239
+ The loop should not be running.
240
+
241
+ This is idempotent and irreversible.
242
+
243
+ No other methods should be called after this one.
244
+ """
245
+ raise NotImplementedError
246
+
247
+ async def shutdown_asyncgens(self):
248
+ """Shutdown all active asynchronous generators."""
249
+ raise NotImplementedError
250
+
251
+ async def shutdown_default_executor(self):
252
+ """Schedule the shutdown of the default executor."""
253
+ raise NotImplementedError
254
+
255
+ # Methods scheduling callbacks. All these return Handles.
256
+
257
+ def _timer_handle_cancelled(self, handle):
258
+ """Notification that a TimerHandle has been cancelled."""
259
+ raise NotImplementedError
260
+
261
+ def call_soon(self, callback, *args, context=None):
262
+ return self.call_later(0, callback, *args, context=context)
263
+
264
+ def call_later(self, delay, callback, *args, context=None):
265
+ raise NotImplementedError
266
+
267
+ def call_at(self, when, callback, *args, context=None):
268
+ raise NotImplementedError
269
+
270
+ def time(self):
271
+ raise NotImplementedError
272
+
273
+ def create_future(self):
274
+ raise NotImplementedError
275
+
276
+ # Method scheduling a coroutine object: create a task.
277
+
278
+ def create_task(self, coro, *, name=None):
279
+ raise NotImplementedError
280
+
281
+ # Methods for interacting with threads.
282
+
283
+ def call_soon_threadsafe(self, callback, *args, context=None):
284
+ raise NotImplementedError
285
+
286
+ def run_in_executor(self, executor, func, *args):
287
+ raise NotImplementedError
288
+
289
+ def set_default_executor(self, executor):
290
+ raise NotImplementedError
291
+
292
+ # Network I/O methods returning Futures.
293
+
294
+ async def getaddrinfo(self, host, port, *,
295
+ family=0, type=0, proto=0, flags=0):
296
+ raise NotImplementedError
297
+
298
+ async def getnameinfo(self, sockaddr, flags=0):
299
+ raise NotImplementedError
300
+
301
+ async def create_connection(
302
+ self, protocol_factory, host=None, port=None,
303
+ *, ssl=None, family=0, proto=0,
304
+ flags=0, sock=None, local_addr=None,
305
+ server_hostname=None,
306
+ ssl_handshake_timeout=None,
307
+ happy_eyeballs_delay=None, interleave=None):
308
+ raise NotImplementedError
309
+
310
+ async def create_server(
311
+ self, protocol_factory, host=None, port=None,
312
+ *, family=socket.AF_UNSPEC,
313
+ flags=socket.AI_PASSIVE, sock=None, backlog=100,
314
+ ssl=None, reuse_address=None, reuse_port=None,
315
+ ssl_handshake_timeout=None,
316
+ start_serving=True):
317
+ """A coroutine which creates a TCP server bound to host and port.
318
+
319
+ The return value is a Server object which can be used to stop
320
+ the service.
321
+
322
+ If host is an empty string or None all interfaces are assumed
323
+ and a list of multiple sockets will be returned (most likely
324
+ one for IPv4 and another one for IPv6). The host parameter can also be
325
+ a sequence (e.g. list) of hosts to bind to.
326
+
327
+ family can be set to either AF_INET or AF_INET6 to force the
328
+ socket to use IPv4 or IPv6. If not set it will be determined
329
+ from host (defaults to AF_UNSPEC).
330
+
331
+ flags is a bitmask for getaddrinfo().
332
+
333
+ sock can optionally be specified in order to use a preexisting
334
+ socket object.
335
+
336
+ backlog is the maximum number of queued connections passed to
337
+ listen() (defaults to 100).
338
+
339
+ ssl can be set to an SSLContext to enable SSL over the
340
+ accepted connections.
341
+
342
+ reuse_address tells the kernel to reuse a local socket in
343
+ TIME_WAIT state, without waiting for its natural timeout to
344
+ expire. If not specified will automatically be set to True on
345
+ UNIX.
346
+
347
+ reuse_port tells the kernel to allow this endpoint to be bound to
348
+ the same port as other existing endpoints are bound to, so long as
349
+ they all set this flag when being created. This option is not
350
+ supported on Windows.
351
+
352
+ ssl_handshake_timeout is the time in seconds that an SSL server
353
+ will wait for completion of the SSL handshake before aborting the
354
+ connection. Default is 60s.
355
+
356
+ start_serving set to True (default) causes the created server
357
+ to start accepting connections immediately. When set to False,
358
+ the user should await Server.start_serving() or Server.serve_forever()
359
+ to make the server to start accepting connections.
360
+ """
361
+ raise NotImplementedError
362
+
363
+ async def sendfile(self, transport, file, offset=0, count=None,
364
+ *, fallback=True):
365
+ """Send a file through a transport.
366
+
367
+ Return an amount of sent bytes.
368
+ """
369
+ raise NotImplementedError
370
+
371
+ async def start_tls(self, transport, protocol, sslcontext, *,
372
+ server_side=False,
373
+ server_hostname=None,
374
+ ssl_handshake_timeout=None):
375
+ """Upgrade a transport to TLS.
376
+
377
+ Return a new transport that *protocol* should start using
378
+ immediately.
379
+ """
380
+ raise NotImplementedError
381
+
382
+ async def create_unix_connection(
383
+ self, protocol_factory, path=None, *,
384
+ ssl=None, sock=None,
385
+ server_hostname=None,
386
+ ssl_handshake_timeout=None):
387
+ raise NotImplementedError
388
+
389
+ async def create_unix_server(
390
+ self, protocol_factory, path=None, *,
391
+ sock=None, backlog=100, ssl=None,
392
+ ssl_handshake_timeout=None,
393
+ start_serving=True):
394
+ """A coroutine which creates a UNIX Domain Socket server.
395
+
396
+ The return value is a Server object, which can be used to stop
397
+ the service.
398
+
399
+ path is a str, representing a file system path to bind the
400
+ server socket to.
401
+
402
+ sock can optionally be specified in order to use a preexisting
403
+ socket object.
404
+
405
+ backlog is the maximum number of queued connections passed to
406
+ listen() (defaults to 100).
407
+
408
+ ssl can be set to an SSLContext to enable SSL over the
409
+ accepted connections.
410
+
411
+ ssl_handshake_timeout is the time in seconds that an SSL server
412
+ will wait for the SSL handshake to complete (defaults to 60s).
413
+
414
+ start_serving set to True (default) causes the created server
415
+ to start accepting connections immediately. When set to False,
416
+ the user should await Server.start_serving() or Server.serve_forever()
417
+ to make the server to start accepting connections.
418
+ """
419
+ raise NotImplementedError
420
+
421
+ async def connect_accepted_socket(
422
+ self, protocol_factory, sock,
423
+ *, ssl=None,
424
+ ssl_handshake_timeout=None):
425
+ """Handle an accepted connection.
426
+
427
+ This is used by servers that accept connections outside of
428
+ asyncio, but use asyncio to handle connections.
429
+
430
+ This method is a coroutine. When completed, the coroutine
431
+ returns a (transport, protocol) pair.
432
+ """
433
+ raise NotImplementedError
434
+
435
+ async def create_datagram_endpoint(self, protocol_factory,
436
+ local_addr=None, remote_addr=None, *,
437
+ family=0, proto=0, flags=0,
438
+ reuse_address=None, reuse_port=None,
439
+ allow_broadcast=None, sock=None):
440
+ """A coroutine which creates a datagram endpoint.
441
+
442
+ This method will try to establish the endpoint in the background.
443
+ When successful, the coroutine returns a (transport, protocol) pair.
444
+
445
+ protocol_factory must be a callable returning a protocol instance.
446
+
447
+ socket family AF_INET, socket.AF_INET6 or socket.AF_UNIX depending on
448
+ host (or family if specified), socket type SOCK_DGRAM.
449
+
450
+ reuse_address tells the kernel to reuse a local socket in
451
+ TIME_WAIT state, without waiting for its natural timeout to
452
+ expire. If not specified it will automatically be set to True on
453
+ UNIX.
454
+
455
+ reuse_port tells the kernel to allow this endpoint to be bound to
456
+ the same port as other existing endpoints are bound to, so long as
457
+ they all set this flag when being created. This option is not
458
+ supported on Windows and some UNIX's. If the
459
+ :py:data:`~socket.SO_REUSEPORT` constant is not defined then this
460
+ capability is unsupported.
461
+
462
+ allow_broadcast tells the kernel to allow this endpoint to send
463
+ messages to the broadcast address.
464
+
465
+ sock can optionally be specified in order to use a preexisting
466
+ socket object.
467
+ """
468
+ raise NotImplementedError
469
+
470
+ # Pipes and subprocesses.
471
+
472
+ async def connect_read_pipe(self, protocol_factory, pipe):
473
+ """Register read pipe in event loop. Set the pipe to non-blocking mode.
474
+
475
+ protocol_factory should instantiate object with Protocol interface.
476
+ pipe is a file-like object.
477
+ Return pair (transport, protocol), where transport supports the
478
+ ReadTransport interface."""
479
+ # The reason to accept file-like object instead of just file descriptor
480
+ # is: we need to own pipe and close it at transport finishing
481
+ # Can got complicated errors if pass f.fileno(),
482
+ # close fd in pipe transport then close f and vice versa.
483
+ raise NotImplementedError
484
+
485
+ async def connect_write_pipe(self, protocol_factory, pipe):
486
+ """Register write pipe in event loop.
487
+
488
+ protocol_factory should instantiate object with BaseProtocol interface.
489
+ Pipe is file-like object already switched to nonblocking.
490
+ Return pair (transport, protocol), where transport support
491
+ WriteTransport interface."""
492
+ # The reason to accept file-like object instead of just file descriptor
493
+ # is: we need to own pipe and close it at transport finishing
494
+ # Can got complicated errors if pass f.fileno(),
495
+ # close fd in pipe transport then close f and vice versa.
496
+ raise NotImplementedError
497
+
498
+ async def subprocess_shell(self, protocol_factory, cmd, *,
499
+ stdin=subprocess.PIPE,
500
+ stdout=subprocess.PIPE,
501
+ stderr=subprocess.PIPE,
502
+ **kwargs):
503
+ raise NotImplementedError
504
+
505
+ async def subprocess_exec(self, protocol_factory, *args,
506
+ stdin=subprocess.PIPE,
507
+ stdout=subprocess.PIPE,
508
+ stderr=subprocess.PIPE,
509
+ **kwargs):
510
+ raise NotImplementedError
511
+
512
+ # Ready-based callback registration methods.
513
+ # The add_*() methods return None.
514
+ # The remove_*() methods return True if something was removed,
515
+ # False if there was nothing to delete.
516
+
517
+ def add_reader(self, fd, callback, *args):
518
+ raise NotImplementedError
519
+
520
+ def remove_reader(self, fd):
521
+ raise NotImplementedError
522
+
523
+ def add_writer(self, fd, callback, *args):
524
+ raise NotImplementedError
525
+
526
+ def remove_writer(self, fd):
527
+ raise NotImplementedError
528
+
529
+ # Completion based I/O methods returning Futures.
530
+
531
+ async def sock_recv(self, sock, nbytes):
532
+ raise NotImplementedError
533
+
534
+ async def sock_recv_into(self, sock, buf):
535
+ raise NotImplementedError
536
+
537
+ async def sock_sendall(self, sock, data):
538
+ raise NotImplementedError
539
+
540
+ async def sock_connect(self, sock, address):
541
+ raise NotImplementedError
542
+
543
+ async def sock_accept(self, sock):
544
+ raise NotImplementedError
545
+
546
+ async def sock_sendfile(self, sock, file, offset=0, count=None,
547
+ *, fallback=None):
548
+ raise NotImplementedError
549
+
550
+ # Signal handling.
551
+
552
+ def add_signal_handler(self, sig, callback, *args):
553
+ raise NotImplementedError
554
+
555
+ def remove_signal_handler(self, sig):
556
+ raise NotImplementedError
557
+
558
+ # Task factory.
559
+
560
+ def set_task_factory(self, factory):
561
+ raise NotImplementedError
562
+
563
+ def get_task_factory(self):
564
+ raise NotImplementedError
565
+
566
+ # Error handlers.
567
+
568
+ def get_exception_handler(self):
569
+ raise NotImplementedError
570
+
571
+ def set_exception_handler(self, handler):
572
+ raise NotImplementedError
573
+
574
+ def default_exception_handler(self, context):
575
+ raise NotImplementedError
576
+
577
+ def call_exception_handler(self, context):
578
+ raise NotImplementedError
579
+
580
+ # Debug flag management.
581
+
582
+ def get_debug(self):
583
+ raise NotImplementedError
584
+
585
+ def set_debug(self, enabled):
586
+ raise NotImplementedError
587
+
588
+
589
+ class AbstractEventLoopPolicy:
590
+ """Abstract policy for accessing the event loop."""
591
+
592
+ def get_event_loop(self):
593
+ """Get the event loop for the current context.
594
+
595
+ Returns an event loop object implementing the BaseEventLoop interface,
596
+ or raises an exception in case no event loop has been set for the
597
+ current context and the current policy does not specify to create one.
598
+
599
+ It should never return None."""
600
+ raise NotImplementedError
601
+
602
+ def set_event_loop(self, loop):
603
+ """Set the event loop for the current context to loop."""
604
+ raise NotImplementedError
605
+
606
+ def new_event_loop(self):
607
+ """Create and return a new event loop object according to this
608
+ policy's rules. If there's need to set this loop as the event loop for
609
+ the current context, set_event_loop must be called explicitly."""
610
+ raise NotImplementedError
611
+
612
+ # Child processes handling (Unix only).
613
+
614
+ def get_child_watcher(self):
615
+ "Get the watcher for child processes."
616
+ raise NotImplementedError
617
+
618
+ def set_child_watcher(self, watcher):
619
+ """Set the watcher for child processes."""
620
+ raise NotImplementedError
621
+
622
+
623
+ class BaseDefaultEventLoopPolicy(AbstractEventLoopPolicy):
624
+ """Default policy implementation for accessing the event loop.
625
+
626
+ In this policy, each thread has its own event loop. However, we
627
+ only automatically create an event loop by default for the main
628
+ thread; other threads by default have no event loop.
629
+
630
+ Other policies may have different rules (e.g. a single global
631
+ event loop, or automatically creating an event loop per thread, or
632
+ using some other notion of context to which an event loop is
633
+ associated).
634
+ """
635
+
636
+ _loop_factory = None
637
+
638
+ class _Local(threading.local):
639
+ _loop = None
640
+ _set_called = False
641
+
642
+ def __init__(self):
643
+ self._local = self._Local()
644
+
645
+ def get_event_loop(self):
646
+ """Get the event loop for the current context.
647
+
648
+ Returns an instance of EventLoop or raises an exception.
649
+ """
650
+ if (self._local._loop is None and
651
+ not self._local._set_called and
652
+ threading.current_thread() is threading.main_thread()):
653
+ self.set_event_loop(self.new_event_loop())
654
+
655
+ if self._local._loop is None:
656
+ raise RuntimeError('There is no current event loop in thread %r.'
657
+ % threading.current_thread().name)
658
+
659
+ return self._local._loop
660
+
661
+ def set_event_loop(self, loop):
662
+ """Set the event loop."""
663
+ self._local._set_called = True
664
+ assert loop is None or isinstance(loop, AbstractEventLoop)
665
+ self._local._loop = loop
666
+
667
+ def new_event_loop(self):
668
+ """Create a new event loop.
669
+
670
+ You must call set_event_loop() to make this the current event
671
+ loop.
672
+ """
673
+ return self._loop_factory()
674
+
675
+
676
+ # Event loop policy. The policy itself is always global, even if the
677
+ # policy's rules say that there is an event loop per thread (or other
678
+ # notion of context). The default policy is installed by the first
679
+ # call to get_event_loop_policy().
680
+ _event_loop_policy = None
681
+
682
+ # Lock for protecting the on-the-fly creation of the event loop policy.
683
+ _lock = threading.Lock()
684
+
685
+
686
+ # A TLS for the running event loop, used by _get_running_loop.
687
+ class _RunningLoop(threading.local):
688
+ loop_pid = (None, None)
689
+
690
+
691
+ _running_loop = _RunningLoop()
692
+
693
+
694
+ def get_running_loop():
695
+ """Return the running event loop. Raise a RuntimeError if there is none.
696
+
697
+ This function is thread-specific.
698
+ """
699
+ # NOTE: this function is implemented in C (see _asynciomodule.c)
700
+ loop = _get_running_loop()
701
+ if loop is None:
702
+ raise RuntimeError('no running event loop')
703
+ return loop
704
+
705
+
706
+ def _get_running_loop():
707
+ """Return the running event loop or None.
708
+
709
+ This is a low-level function intended to be used by event loops.
710
+ This function is thread-specific.
711
+ """
712
+ # NOTE: this function is implemented in C (see _asynciomodule.c)
713
+ running_loop, pid = _running_loop.loop_pid
714
+ if running_loop is not None and pid == os.getpid():
715
+ return running_loop
716
+
717
+
718
+ def _set_running_loop(loop):
719
+ """Set the running event loop.
720
+
721
+ This is a low-level function intended to be used by event loops.
722
+ This function is thread-specific.
723
+ """
724
+ # NOTE: this function is implemented in C (see _asynciomodule.c)
725
+ _running_loop.loop_pid = (loop, os.getpid())
726
+
727
+
728
+ def _init_event_loop_policy():
729
+ global _event_loop_policy
730
+ with _lock:
731
+ if _event_loop_policy is None: # pragma: no branch
732
+ from . import DefaultEventLoopPolicy
733
+ _event_loop_policy = DefaultEventLoopPolicy()
734
+
735
+
736
+ def get_event_loop_policy():
737
+ """Get the current event loop policy."""
738
+ if _event_loop_policy is None:
739
+ _init_event_loop_policy()
740
+ return _event_loop_policy
741
+
742
+
743
+ def set_event_loop_policy(policy):
744
+ """Set the current event loop policy.
745
+
746
+ If policy is None, the default policy is restored."""
747
+ global _event_loop_policy
748
+ assert policy is None or isinstance(policy, AbstractEventLoopPolicy)
749
+ _event_loop_policy = policy
750
+
751
+
752
+ def get_event_loop():
753
+ """Return an asyncio event loop.
754
+
755
+ When called from a coroutine or a callback (e.g. scheduled with call_soon
756
+ or similar API), this function will always return the running event loop.
757
+
758
+ If there is no running event loop set, the function will return
759
+ the result of `get_event_loop_policy().get_event_loop()` call.
760
+ """
761
+ # NOTE: this function is implemented in C (see _asynciomodule.c)
762
+ return _py__get_event_loop()
763
+
764
+
765
+ def _get_event_loop(stacklevel=3):
766
+ # This internal method is going away in Python 3.12, left here only for
767
+ # backwards compatibility with 3.10.0 - 3.10.8 and 3.11.0.
768
+ # Similarly, this method's C equivalent in _asyncio is going away as well.
769
+ # See GH-99949 for more details.
770
+ current_loop = _get_running_loop()
771
+ if current_loop is not None:
772
+ return current_loop
773
+ return get_event_loop_policy().get_event_loop()
774
+
775
+
776
+ def set_event_loop(loop):
777
+ """Equivalent to calling get_event_loop_policy().set_event_loop(loop)."""
778
+ get_event_loop_policy().set_event_loop(loop)
779
+
780
+
781
+ def new_event_loop():
782
+ """Equivalent to calling get_event_loop_policy().new_event_loop()."""
783
+ return get_event_loop_policy().new_event_loop()
784
+
785
+
786
+ def get_child_watcher():
787
+ """Equivalent to calling get_event_loop_policy().get_child_watcher()."""
788
+ return get_event_loop_policy().get_child_watcher()
789
+
790
+
791
+ def set_child_watcher(watcher):
792
+ """Equivalent to calling
793
+ get_event_loop_policy().set_child_watcher(watcher)."""
794
+ return get_event_loop_policy().set_child_watcher(watcher)
795
+
796
+
797
+ # Alias pure-Python implementations for testing purposes.
798
+ _py__get_running_loop = _get_running_loop
799
+ _py__set_running_loop = _set_running_loop
800
+ _py_get_running_loop = get_running_loop
801
+ _py_get_event_loop = get_event_loop
802
+ _py__get_event_loop = _get_event_loop
803
+
804
+
805
+ try:
806
+ # get_event_loop() is one of the most frequently called
807
+ # functions in asyncio. Pure Python implementation is
808
+ # about 4 times slower than C-accelerated.
809
+ from _asyncio import (_get_running_loop, _set_running_loop,
810
+ get_running_loop, get_event_loop, _get_event_loop)
811
+ except ImportError:
812
+ pass
813
+ else:
814
+ # Alias C implementations for testing purposes.
815
+ _c__get_running_loop = _get_running_loop
816
+ _c__set_running_loop = _set_running_loop
817
+ _c_get_running_loop = get_running_loop
818
+ _c_get_event_loop = get_event_loop
819
+ _c__get_event_loop = _get_event_loop
python310/asyncio/exceptions.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """asyncio exceptions."""
2
+
3
+
4
+ __all__ = ('CancelledError', 'InvalidStateError', 'TimeoutError',
5
+ 'IncompleteReadError', 'LimitOverrunError',
6
+ 'SendfileNotAvailableError')
7
+
8
+
9
+ class CancelledError(BaseException):
10
+ """The Future or Task was cancelled."""
11
+
12
+
13
+ class TimeoutError(Exception):
14
+ """The operation exceeded the given deadline."""
15
+
16
+
17
+ class InvalidStateError(Exception):
18
+ """The operation is not allowed in this state."""
19
+
20
+
21
+ class SendfileNotAvailableError(RuntimeError):
22
+ """Sendfile syscall is not available.
23
+
24
+ Raised if OS does not support sendfile syscall for given socket or
25
+ file type.
26
+ """
27
+
28
+
29
+ class IncompleteReadError(EOFError):
30
+ """
31
+ Incomplete read error. Attributes:
32
+
33
+ - partial: read bytes string before the end of stream was reached
34
+ - expected: total number of expected bytes (or None if unknown)
35
+ """
36
+ def __init__(self, partial, expected):
37
+ r_expected = 'undefined' if expected is None else repr(expected)
38
+ super().__init__(f'{len(partial)} bytes read on a total of '
39
+ f'{r_expected} expected bytes')
40
+ self.partial = partial
41
+ self.expected = expected
42
+
43
+ def __reduce__(self):
44
+ return type(self), (self.partial, self.expected)
45
+
46
+
47
+ class LimitOverrunError(Exception):
48
+ """Reached the buffer limit while looking for a separator.
49
+
50
+ Attributes:
51
+ - consumed: total number of to be consumed bytes.
52
+ """
53
+ def __init__(self, message, consumed):
54
+ super().__init__(message)
55
+ self.consumed = consumed
56
+
57
+ def __reduce__(self):
58
+ return type(self), (self.args[0], self.consumed)
python310/asyncio/format_helpers.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import functools
2
+ import inspect
3
+ import reprlib
4
+ import sys
5
+ import traceback
6
+
7
+ from . import constants
8
+
9
+
10
+ def _get_function_source(func):
11
+ func = inspect.unwrap(func)
12
+ if inspect.isfunction(func):
13
+ code = func.__code__
14
+ return (code.co_filename, code.co_firstlineno)
15
+ if isinstance(func, functools.partial):
16
+ return _get_function_source(func.func)
17
+ if isinstance(func, functools.partialmethod):
18
+ return _get_function_source(func.func)
19
+ return None
20
+
21
+
22
+ def _format_callback_source(func, args):
23
+ func_repr = _format_callback(func, args, None)
24
+ source = _get_function_source(func)
25
+ if source:
26
+ func_repr += f' at {source[0]}:{source[1]}'
27
+ return func_repr
28
+
29
+
30
+ def _format_args_and_kwargs(args, kwargs):
31
+ """Format function arguments and keyword arguments.
32
+
33
+ Special case for a single parameter: ('hello',) is formatted as ('hello').
34
+ """
35
+ # use reprlib to limit the length of the output
36
+ items = []
37
+ if args:
38
+ items.extend(reprlib.repr(arg) for arg in args)
39
+ if kwargs:
40
+ items.extend(f'{k}={reprlib.repr(v)}' for k, v in kwargs.items())
41
+ return '({})'.format(', '.join(items))
42
+
43
+
44
+ def _format_callback(func, args, kwargs, suffix=''):
45
+ if isinstance(func, functools.partial):
46
+ suffix = _format_args_and_kwargs(args, kwargs) + suffix
47
+ return _format_callback(func.func, func.args, func.keywords, suffix)
48
+
49
+ if hasattr(func, '__qualname__') and func.__qualname__:
50
+ func_repr = func.__qualname__
51
+ elif hasattr(func, '__name__') and func.__name__:
52
+ func_repr = func.__name__
53
+ else:
54
+ func_repr = repr(func)
55
+
56
+ func_repr += _format_args_and_kwargs(args, kwargs)
57
+ if suffix:
58
+ func_repr += suffix
59
+ return func_repr
60
+
61
+
62
+ def extract_stack(f=None, limit=None):
63
+ """Replacement for traceback.extract_stack() that only does the
64
+ necessary work for asyncio debug mode.
65
+ """
66
+ if f is None:
67
+ f = sys._getframe().f_back
68
+ if limit is None:
69
+ # Limit the amount of work to a reasonable amount, as extract_stack()
70
+ # can be called for each coroutine and future in debug mode.
71
+ limit = constants.DEBUG_STACK_DEPTH
72
+ stack = traceback.StackSummary.extract(traceback.walk_stack(f),
73
+ limit=limit,
74
+ lookup_lines=False)
75
+ stack.reverse()
76
+ return stack