koichi12 commited on
Commit
2614c3c
·
verified ·
1 Parent(s): 09a76c8

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. .venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/INSTALLER +1 -0
  3. .venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/LICENSE +185 -0
  4. .venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/METADATA +208 -0
  5. .venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/RECORD +48 -0
  6. .venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/REQUESTED +0 -0
  7. .venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/WHEEL +5 -0
  8. .venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/entry_points.txt +3 -0
  9. .venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/top_level.txt +1 -0
  10. .venv/lib/python3.11/site-packages/cffi-1.17.1.dist-info/INSTALLER +1 -0
  11. .venv/lib/python3.11/site-packages/cffi-1.17.1.dist-info/RECORD +48 -0
  12. .venv/lib/python3.11/site-packages/cffi-1.17.1.dist-info/entry_points.txt +2 -0
  13. .venv/lib/python3.11/site-packages/cffi-1.17.1.dist-info/top_level.txt +2 -0
  14. .venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/_rust.abi3.so +3 -0
  15. .venv/lib/python3.11/site-packages/httpcore/__init__.py +140 -0
  16. .venv/lib/python3.11/site-packages/httpcore/__pycache__/__init__.cpython-311.pyc +0 -0
  17. .venv/lib/python3.11/site-packages/httpcore/__pycache__/_api.cpython-311.pyc +0 -0
  18. .venv/lib/python3.11/site-packages/httpcore/__pycache__/_exceptions.cpython-311.pyc +0 -0
  19. .venv/lib/python3.11/site-packages/httpcore/__pycache__/_models.cpython-311.pyc +0 -0
  20. .venv/lib/python3.11/site-packages/httpcore/__pycache__/_ssl.cpython-311.pyc +0 -0
  21. .venv/lib/python3.11/site-packages/httpcore/__pycache__/_synchronization.cpython-311.pyc +0 -0
  22. .venv/lib/python3.11/site-packages/httpcore/__pycache__/_trace.cpython-311.pyc +0 -0
  23. .venv/lib/python3.11/site-packages/httpcore/__pycache__/_utils.cpython-311.pyc +0 -0
  24. .venv/lib/python3.11/site-packages/httpcore/_api.py +94 -0
  25. .venv/lib/python3.11/site-packages/httpcore/_async/__init__.py +39 -0
  26. .venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/__init__.cpython-311.pyc +0 -0
  27. .venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/connection.cpython-311.pyc +0 -0
  28. .venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/connection_pool.cpython-311.pyc +0 -0
  29. .venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/http11.cpython-311.pyc +0 -0
  30. .venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/http2.cpython-311.pyc +0 -0
  31. .venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/http_proxy.cpython-311.pyc +0 -0
  32. .venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/interfaces.cpython-311.pyc +0 -0
  33. .venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/socks_proxy.cpython-311.pyc +0 -0
  34. .venv/lib/python3.11/site-packages/httpcore/_async/connection.py +222 -0
  35. .venv/lib/python3.11/site-packages/httpcore/_async/connection_pool.py +420 -0
  36. .venv/lib/python3.11/site-packages/httpcore/_async/http11.py +379 -0
  37. .venv/lib/python3.11/site-packages/httpcore/_async/http2.py +583 -0
  38. .venv/lib/python3.11/site-packages/httpcore/_async/http_proxy.py +367 -0
  39. .venv/lib/python3.11/site-packages/httpcore/_async/interfaces.py +137 -0
  40. .venv/lib/python3.11/site-packages/httpcore/_async/socks_proxy.py +341 -0
  41. .venv/lib/python3.11/site-packages/httpcore/_backends/__pycache__/__init__.cpython-311.pyc +0 -0
  42. .venv/lib/python3.11/site-packages/httpcore/_backends/__pycache__/anyio.cpython-311.pyc +0 -0
  43. .venv/lib/python3.11/site-packages/httpcore/_backends/__pycache__/trio.cpython-311.pyc +0 -0
  44. .venv/lib/python3.11/site-packages/httpcore/_backends/auto.py +52 -0
  45. .venv/lib/python3.11/site-packages/httpcore/_backends/mock.py +143 -0
  46. .venv/lib/python3.11/site-packages/httpcore/_backends/sync.py +241 -0
  47. .venv/lib/python3.11/site-packages/httpcore/_backends/trio.py +159 -0
  48. .venv/lib/python3.11/site-packages/httpcore/_exceptions.py +81 -0
  49. .venv/lib/python3.11/site-packages/httpcore/_models.py +516 -0
  50. .venv/lib/python3.11/site-packages/httpcore/_sync/__init__.py +39 -0
.gitattributes CHANGED
@@ -331,3 +331,5 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia/cudnn/lib/
331
  .venv/lib/python3.11/site-packages/yarl/_quoting_c.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
332
  .venv/lib/python3.11/site-packages/cryptography/x509/__pycache__/extensions.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
333
  .venv/lib/python3.11/site-packages/sentencepiece/_sentencepiece.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
 
 
 
331
  .venv/lib/python3.11/site-packages/yarl/_quoting_c.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
332
  .venv/lib/python3.11/site-packages/cryptography/x509/__pycache__/extensions.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
333
  .venv/lib/python3.11/site-packages/sentencepiece/_sentencepiece.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
334
+ .venv/lib/python3.11/site-packages/tiktoken/_tiktoken.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
335
+ .venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/_rust.abi3.so filter=lfs diff=lfs merge=lfs -text
.venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
.venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/LICENSE ADDED
@@ -0,0 +1,185 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Copyright 2013 Google Inc. All Rights Reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+
6
+ Unless required by applicable law or agreed to in writing, software
7
+ distributed under the License is distributed on an "AS IS" BASIS,
8
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
9
+ See the License for the specific language governing permissions and
10
+ limitations under the License.
11
+
12
+ Apache License
13
+ Version 2.0, January 2004
14
+ http://www.apache.org/licenses/
15
+
16
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
17
+
18
+ 1. Definitions.
19
+
20
+ "License" shall mean the terms and conditions for use, reproduction,
21
+ and distribution as defined by Sections 1 through 9 of this document.
22
+
23
+ "Licensor" shall mean the copyright owner or entity authorized by
24
+ the copyright owner that is granting the License.
25
+
26
+ "Legal Entity" shall mean the union of the acting entity and all
27
+ other entities that control, are controlled by, or are under common
28
+ control with that entity. For the purposes of this definition,
29
+ "control" means (i) the power, direct or indirect, to cause the
30
+ direction or management of such entity, whether by contract or
31
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
32
+ outstanding shares, or (iii) beneficial ownership of such entity.
33
+
34
+ "You" (or "Your") shall mean an individual or Legal Entity
35
+ exercising permissions granted by this License.
36
+
37
+ "Source" form shall mean the preferred form for making modifications,
38
+ including but not limited to software source code, documentation
39
+ source, and configuration files.
40
+
41
+ "Object" form shall mean any form resulting from mechanical
42
+ transformation or translation of a Source form, including but
43
+ not limited to compiled object code, generated documentation,
44
+ and conversions to other media types.
45
+
46
+ "Work" shall mean the work of authorship, whether in Source or
47
+ Object form, made available under the License, as indicated by a
48
+ copyright notice that is included in or attached to the work
49
+ (an example is provided in the Appendix below).
50
+
51
+ "Derivative Works" shall mean any work, whether in Source or Object
52
+ form, that is based on (or derived from) the Work and for which the
53
+ editorial revisions, annotations, elaborations, or other modifications
54
+ represent, as a whole, an original work of authorship. For the purposes
55
+ of this License, Derivative Works shall not include works that remain
56
+ separable from, or merely link (or bind by name) to the interfaces of,
57
+ the Work and Derivative Works thereof.
58
+
59
+ "Contribution" shall mean any work of authorship, including
60
+ the original version of the Work and any modifications or additions
61
+ to that Work or Derivative Works thereof, that is intentionally
62
+ submitted to Licensor for inclusion in the Work by the copyright owner
63
+ or by an individual or Legal Entity authorized to submit on behalf of
64
+ the copyright owner. For the purposes of this definition, "submitted"
65
+ means any form of electronic, verbal, or written communication sent
66
+ to the Licensor or its representatives, including but not limited to
67
+ communication on electronic mailing lists, source code control systems,
68
+ and issue tracking systems that are managed by, or on behalf of, the
69
+ Licensor for the purpose of discussing and improving the Work, but
70
+ excluding communication that is conspicuously marked or otherwise
71
+ designated in writing by the copyright owner as "Not a Contribution."
72
+
73
+ "Contributor" shall mean Licensor and any individual or Legal Entity
74
+ on behalf of whom a Contribution has been received by Licensor and
75
+ subsequently incorporated within the Work.
76
+
77
+ 2. Grant of Copyright License. Subject to the terms and conditions of
78
+ this License, each Contributor hereby grants to You a perpetual,
79
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
80
+ copyright license to reproduce, prepare Derivative Works of,
81
+ publicly display, publicly perform, sublicense, and distribute the
82
+ Work and such Derivative Works in Source or Object form.
83
+
84
+ 3. Grant of Patent License. Subject to the terms and conditions of
85
+ this License, each Contributor hereby grants to You a perpetual,
86
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
87
+ (except as stated in this section) patent license to make, have made,
88
+ use, offer to sell, sell, import, and otherwise transfer the Work,
89
+ where such license applies only to those patent claims licensable
90
+ by such Contributor that are necessarily infringed by their
91
+ Contribution(s) alone or by combination of their Contribution(s)
92
+ with the Work to which such Contribution(s) was submitted. If You
93
+ institute patent litigation against any entity (including a
94
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
95
+ or a Contribution incorporated within the Work constitutes direct
96
+ or contributory patent infringement, then any patent licenses
97
+ granted to You under this License for that Work shall terminate
98
+ as of the date such litigation is filed.
99
+
100
+ 4. Redistribution. You may reproduce and distribute copies of the
101
+ Work or Derivative Works thereof in any medium, with or without
102
+ modifications, and in Source or Object form, provided that You
103
+ meet the following conditions:
104
+
105
+ (a) You must give any other recipients of the Work or
106
+ Derivative Works a copy of this License; and
107
+
108
+ (b) You must cause any modified files to carry prominent notices
109
+ stating that You changed the files; and
110
+
111
+ (c) You must retain, in the Source form of any Derivative Works
112
+ that You distribute, all copyright, patent, trademark, and
113
+ attribution notices from the Source form of the Work,
114
+ excluding those notices that do not pertain to any part of
115
+ the Derivative Works; and
116
+
117
+ (d) If the Work includes a "NOTICE" text file as part of its
118
+ distribution, then any Derivative Works that You distribute must
119
+ include a readable copy of the attribution notices contained
120
+ within such NOTICE file, excluding those notices that do not
121
+ pertain to any part of the Derivative Works, in at least one
122
+ of the following places: within a NOTICE text file distributed
123
+ as part of the Derivative Works; within the Source form or
124
+ documentation, if provided along with the Derivative Works; or,
125
+ within a display generated by the Derivative Works, if and
126
+ wherever such third-party notices normally appear. The contents
127
+ of the NOTICE file are for informational purposes only and
128
+ do not modify the License. You may add Your own attribution
129
+ notices within Derivative Works that You distribute, alongside
130
+ or as an addendum to the NOTICE text from the Work, provided
131
+ that such additional attribution notices cannot be construed
132
+ as modifying the License.
133
+
134
+ You may add Your own copyright statement to Your modifications and
135
+ may provide additional or different license terms and conditions
136
+ for use, reproduction, or distribution of Your modifications, or
137
+ for any such Derivative Works as a whole, provided Your use,
138
+ reproduction, and distribution of the Work otherwise complies with
139
+ the conditions stated in this License.
140
+
141
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
142
+ any Contribution intentionally submitted for inclusion in the Work
143
+ by You to the Licensor shall be under the terms and conditions of
144
+ this License, without any additional terms or conditions.
145
+ Notwithstanding the above, nothing herein shall supersede or modify
146
+ the terms of any separate license agreement you may have executed
147
+ with Licensor regarding such Contributions.
148
+
149
+ 6. Trademarks. This License does not grant permission to use the trade
150
+ names, trademarks, service marks, or product names of the Licensor,
151
+ except as required for reasonable and customary use in describing the
152
+ origin of the Work and reproducing the content of the NOTICE file.
153
+
154
+ 7. Disclaimer of Warranty. Unless required by applicable law or
155
+ agreed to in writing, Licensor provides the Work (and each
156
+ Contributor provides its Contributions) on an "AS IS" BASIS,
157
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
158
+ implied, including, without limitation, any warranties or conditions
159
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
160
+ PARTICULAR PURPOSE. You are solely responsible for determining the
161
+ appropriateness of using or redistributing the Work and assume any
162
+ risks associated with Your exercise of permissions under this License.
163
+
164
+ 8. Limitation of Liability. In no event and under no legal theory,
165
+ whether in tort (including negligence), contract, or otherwise,
166
+ unless required by applicable law (such as deliberate and grossly
167
+ negligent acts) or agreed to in writing, shall any Contributor be
168
+ liable to You for damages, including any direct, indirect, special,
169
+ incidental, or consequential damages of any character arising as a
170
+ result of this License or out of the use or inability to use the
171
+ Work (including but not limited to damages for loss of goodwill,
172
+ work stoppage, computer failure or malfunction, or any and all
173
+ other commercial damages or losses), even if such Contributor
174
+ has been advised of the possibility of such damages.
175
+
176
+ 9. Accepting Warranty or Additional Liability. While redistributing
177
+ the Work or Derivative Works thereof, You may choose to offer,
178
+ and charge a fee for, acceptance of support, warranty, indemnity,
179
+ or other liability obligations and/or rights consistent with this
180
+ License. However, in accepting such obligations, You may act only
181
+ on Your own behalf and on Your sole responsibility, not on behalf
182
+ of any other Contributor, and only if You agree to indemnify,
183
+ defend, and hold each Contributor harmless for any liability
184
+ incurred by, or claims asserted against, such Contributor by reason
185
+ of your accepting any such warranty or additional liability.
.venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/METADATA ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: PyDrive2
3
+ Version: 1.21.3
4
+ Summary: Google Drive API made easy. Maintained fork of PyDrive.
5
+ Home-page: https://github.com/iterative/PyDrive2
6
+ Author: JunYoung Gwak
7
+ Author-email: jgwak@dreamylab.com
8
+ Maintainer: DVC team
9
+ Maintainer-email: support@dvc.org
10
+ License: Apache License 2.0
11
+ Project-URL: Documentation, https://docs.iterative.ai/PyDrive2
12
+ Project-URL: Changelog, https://github.com/iterative/PyDrive2/releases
13
+ Classifier: Development Status :: 4 - Beta
14
+ Classifier: Programming Language :: Python :: 3
15
+ Classifier: Programming Language :: Python :: 3.8
16
+ Classifier: Programming Language :: Python :: 3.9
17
+ Classifier: Programming Language :: Python :: 3.10
18
+ Classifier: Programming Language :: Python :: 3.11
19
+ Requires-Python: >=3.8
20
+ Description-Content-Type: text/x-rst
21
+ License-File: LICENSE
22
+ Requires-Dist: google-api-python-client >=1.12.5
23
+ Requires-Dist: oauth2client >=4.0.0
24
+ Requires-Dist: PyYAML >=3.0
25
+ Requires-Dist: cryptography <44
26
+ Requires-Dist: pyOpenSSL <=24.2.1,>=19.1.0
27
+ Provides-Extra: fsspec
28
+ Requires-Dist: fsspec >=2021.07.0 ; extra == 'fsspec'
29
+ Requires-Dist: tqdm >=4.0.0 ; extra == 'fsspec'
30
+ Requires-Dist: funcy >=1.14 ; extra == 'fsspec'
31
+ Requires-Dist: appdirs >=1.4.3 ; extra == 'fsspec'
32
+ Provides-Extra: tests
33
+ Requires-Dist: pytest >=4.6.0 ; extra == 'tests'
34
+ Requires-Dist: timeout-decorator ; extra == 'tests'
35
+ Requires-Dist: funcy >=1.14 ; extra == 'tests'
36
+ Requires-Dist: flake8 ; extra == 'tests'
37
+ Requires-Dist: flake8-docstrings ; extra == 'tests'
38
+ Requires-Dist: pytest-mock ; extra == 'tests'
39
+ Requires-Dist: pyinstaller ; extra == 'tests'
40
+ Requires-Dist: black ==24.10.0 ; extra == 'tests'
41
+ Requires-Dist: importlib-resources <6 ; (python_version < "3.10") and extra == 'tests'
42
+
43
+ |CI| |Conda| |PyPI|
44
+
45
+ .. |CI| image:: https://github.com/iterative/PyDrive2/workflows/Tests/badge.svg?branch=main
46
+ :target: https://github.com/iterative/PyDrive2/actions
47
+ :alt: GHA Tests
48
+
49
+ .. |Conda| image:: https://img.shields.io/conda/v/conda-forge/PyDrive2.svg?label=conda&logo=conda-forge
50
+ :target: https://anaconda.org/conda-forge/PyDrive2
51
+ :alt: Conda-forge
52
+
53
+ .. |PyPI| image:: https://img.shields.io/pypi/v/PyDrive2.svg?label=pip&logo=PyPI&logoColor=white
54
+ :target: https://pypi.org/project/PyDrive2
55
+ :alt: PyPI
56
+
57
+ PyDrive2
58
+ --------
59
+
60
+ *PyDrive2* is a wrapper library of
61
+ `google-api-python-client <https://github.com/google/google-api-python-client>`_
62
+ that simplifies many common Google Drive API V2 tasks. It is an actively
63
+ maintained fork of `https://pypi.python.org/pypi/PyDrive <https://pypi.python.org/pypi/PyDrive>`_.
64
+ By the authors and maintainers of the `Git for Data <https://dvc.org>`_ - DVC
65
+ project.
66
+
67
+ Project Info
68
+ ------------
69
+
70
+ - Package: `https://pypi.python.org/pypi/PyDrive2 <https://pypi.python.org/pypi/PyDrive2>`_
71
+ - Documentation: `https://docs.iterative.ai/PyDrive2 <https://docs.iterative.ai/PyDrive2>`_
72
+ - Source: `https://github.com/iterative/PyDrive2 <https://github.com/iterative/PyDrive2>`_
73
+ - Changelog: `https://github.com/iterative/PyDrive2/releases <https://github.com/iterative/PyDrive2/releases>`_
74
+ - `Running tests </pydrive2/test/README.rst>`_
75
+
76
+ Features of PyDrive2
77
+ --------------------
78
+
79
+ - Simplifies OAuth2.0 into just few lines with flexible settings.
80
+ - Wraps `Google Drive API V2 <https://developers.google.com/drive/v2/web/about-sdk>`_ into
81
+ classes of each resource to make your program more object-oriented.
82
+ - Helps common operations else than API calls, such as content fetching
83
+ and pagination control.
84
+ - Provides `fsspec`_ filesystem implementation.
85
+
86
+ How to install
87
+ --------------
88
+
89
+ You can install PyDrive2 with regular ``pip`` command.
90
+
91
+ ::
92
+
93
+ $ pip install PyDrive2
94
+
95
+ To install the current development version from GitHub, use:
96
+
97
+ ::
98
+
99
+ $ pip install git+https://github.com/iterative/PyDrive2.git#egg=PyDrive2
100
+
101
+ OAuth made easy
102
+ ---------------
103
+
104
+ Download *client\_secrets.json* from Google API Console and OAuth2.0 is
105
+ done in two lines. You can customize behavior of OAuth2 in one settings
106
+ file *settings.yaml*.
107
+
108
+ .. code:: python
109
+
110
+
111
+ from pydrive2.auth import GoogleAuth
112
+ from pydrive2.drive import GoogleDrive
113
+
114
+ gauth = GoogleAuth()
115
+ gauth.LocalWebserverAuth()
116
+
117
+ drive = GoogleDrive(gauth)
118
+
119
+ File management made easy
120
+ -------------------------
121
+
122
+ Upload/update the file with one method. PyDrive2 will do it in the most
123
+ efficient way.
124
+
125
+ .. code:: python
126
+
127
+ file1 = drive.CreateFile({'title': 'Hello.txt'})
128
+ file1.SetContentString('Hello')
129
+ file1.Upload() # Files.insert()
130
+
131
+ file1['title'] = 'HelloWorld.txt' # Change title of the file
132
+ file1.Upload() # Files.patch()
133
+
134
+ content = file1.GetContentString() # 'Hello'
135
+ file1.SetContentString(content+' World!') # 'Hello World!'
136
+ file1.Upload() # Files.update()
137
+
138
+ file2 = drive.CreateFile()
139
+ file2.SetContentFile('hello.png')
140
+ file2.Upload()
141
+ print('Created file %s with mimeType %s' % (file2['title'],
142
+ file2['mimeType']))
143
+ # Created file hello.png with mimeType image/png
144
+
145
+ file3 = drive.CreateFile({'id': file2['id']})
146
+ print('Downloading file %s from Google Drive' % file3['title']) # 'hello.png'
147
+ file3.GetContentFile('world.png') # Save Drive file as a local file
148
+
149
+ # or download Google Docs files in an export format provided.
150
+ # downloading a docs document as an html file:
151
+ docsfile.GetContentFile('test.html', mimetype='text/html')
152
+
153
+ File listing pagination made easy
154
+ ---------------------------------
155
+
156
+ *PyDrive2* handles file listing pagination for you.
157
+
158
+ .. code:: python
159
+
160
+ # Auto-iterate through all files that matches this query
161
+ file_list = drive.ListFile({'q': "'root' in parents"}).GetList()
162
+ for file1 in file_list:
163
+ print('title: {}, id: {}'.format(file1['title'], file1['id']))
164
+
165
+ # Paginate file lists by specifying number of max results
166
+ for file_list in drive.ListFile({'maxResults': 10}):
167
+ print('Received {} files from Files.list()'.format(len(file_list))) # <= 10
168
+ for file1 in file_list:
169
+ print('title: {}, id: {}'.format(file1['title'], file1['id']))
170
+
171
+ Fsspec filesystem
172
+ -----------------
173
+
174
+ *PyDrive2* provides easy way to work with your files through `fsspec`_
175
+ compatible `GDriveFileSystem`_.
176
+
177
+ Install PyDrive2 with the required dependencies
178
+
179
+ ::
180
+
181
+ $ pip install PyDrive2[fsspec]
182
+
183
+ .. code:: python
184
+
185
+ from pydrive2.fs import GDriveFileSystem
186
+
187
+ # replace `root` with ID of a drive or directory and give service account access to it
188
+ fs = GDriveFileSystem("root", client_id=my_id, client_secret=my_secret)
189
+
190
+ for root, dnames, fnames in fs.walk("root"):
191
+ ...
192
+
193
+ .. _`GDriveFileSystem`: https://docs.iterative.ai/PyDrive2/fsspec/
194
+
195
+ Concurrent access made easy
196
+ ---------------------------
197
+
198
+ All API functions made to be thread-safe.
199
+
200
+ Contributors
201
+ ------------
202
+
203
+ Thanks to all our contributors!
204
+
205
+ .. image:: https://contrib.rocks/image?repo=iterative/PyDrive2
206
+ :target: https://github.com/iterative/PyDrive2/graphs/contributors
207
+
208
+ .. _`fsspec`: https://filesystem-spec.readthedocs.io/en/latest/
.venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/RECORD ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ PyDrive2-1.21.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ PyDrive2-1.21.3.dist-info/LICENSE,sha256=V3YxOBF9BqvkmtBCy49BTD8wbPdR_3GI8jmr2FayPkM,10180
3
+ PyDrive2-1.21.3.dist-info/METADATA,sha256=rKulNug5vCqJU3YHcrfhF6wffmC2UP1P-4KX_Y-0Wwk,6972
4
+ PyDrive2-1.21.3.dist-info/RECORD,,
5
+ PyDrive2-1.21.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
+ PyDrive2-1.21.3.dist-info/WHEEL,sha256=P9jw-gEje8ByB7_hXoICnHtVCrEwMQh-630tKvQWehc,91
7
+ PyDrive2-1.21.3.dist-info/entry_points.txt,sha256=Q9osoKLJaYRdOWnCsm33PtvGOb_-OtJ1zzTrzlfuvEw,118
8
+ PyDrive2-1.21.3.dist-info/top_level.txt,sha256=WGaHW8R_w8XVDvjOLEw83ef_4p_FRVzFH-yh_kVDa74,9
9
+ pydrive2/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
10
+ pydrive2/__pycache__/__init__.cpython-311.pyc,,
11
+ pydrive2/__pycache__/apiattr.cpython-311.pyc,,
12
+ pydrive2/__pycache__/auth.cpython-311.pyc,,
13
+ pydrive2/__pycache__/drive.cpython-311.pyc,,
14
+ pydrive2/__pycache__/files.cpython-311.pyc,,
15
+ pydrive2/__pycache__/settings.cpython-311.pyc,,
16
+ pydrive2/__pyinstaller/__init__.py,sha256=uLxcndXJQC67tSw5v8VDT9Qv67w0VbVXHYPwpOloZEc,142
17
+ pydrive2/__pyinstaller/__pycache__/__init__.cpython-311.pyc,,
18
+ pydrive2/__pyinstaller/__pycache__/hook-googleapiclient.cpython-311.pyc,,
19
+ pydrive2/__pyinstaller/__pycache__/test_hook-googleapiclient.cpython-311.pyc,,
20
+ pydrive2/__pyinstaller/hook-googleapiclient.py,sha256=Cd3l0xHBEeFaED2erR_ErjbhjWENfPuV10vAkGo0cEM,257
21
+ pydrive2/__pyinstaller/test_hook-googleapiclient.py,sha256=rmWrHsSTGx7ZkTfpkF_LdYluV5n6gDa5uhelFO8jOMY,1135
22
+ pydrive2/apiattr.py,sha256=x6ob2XVgI5x7wvdtJaVpN2OYvlaF2Gc3hpmrTCfZIO8,5535
23
+ pydrive2/auth.py,sha256=W_Ck3FwnYzqjfTwN5icrCAQaiwZw3Pw8Kd-jmu9I-6Q,27164
24
+ pydrive2/drive.py,sha256=cK_jCLyxblV4kocopEA6vMAgL_xSHIASrkH4LU6EnMU,1686
25
+ pydrive2/files.py,sha256=cg725DOo1URcg7sjv6aOjCWjAmqD9g53sPZhRykw75E,33444
26
+ pydrive2/fs/__init__.py,sha256=mY-RU-w59rXpdDgm6Stsn8BrJoKqUjfxb1dwFgmiNcw,78
27
+ pydrive2/fs/__pycache__/__init__.cpython-311.pyc,,
28
+ pydrive2/fs/__pycache__/spec.cpython-311.pyc,,
29
+ pydrive2/fs/__pycache__/utils.cpython-311.pyc,,
30
+ pydrive2/fs/spec.py,sha256=iHuJ0vlcxnRIRJ_mD1xiijO_a73OpqcO3Hnwzmxm3z8,22376
31
+ pydrive2/fs/utils.py,sha256=wVCLrSaKgIiTRFERhPokMxasBY9ehITRVFYhCm9r4XI,1591
32
+ pydrive2/settings.py,sha256=8bInU4n9Wr24Uj1O7-z8_kZ2WVe_mPpl0SFR6GbSe5Q,6298
33
+ pydrive2/test/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
34
+ pydrive2/test/__pycache__/__init__.cpython-311.pyc,,
35
+ pydrive2/test/__pycache__/test_apiattr.cpython-311.pyc,,
36
+ pydrive2/test/__pycache__/test_drive.cpython-311.pyc,,
37
+ pydrive2/test/__pycache__/test_file.cpython-311.pyc,,
38
+ pydrive2/test/__pycache__/test_filelist.cpython-311.pyc,,
39
+ pydrive2/test/__pycache__/test_fs.cpython-311.pyc,,
40
+ pydrive2/test/__pycache__/test_oauth.cpython-311.pyc,,
41
+ pydrive2/test/__pycache__/test_util.cpython-311.pyc,,
42
+ pydrive2/test/test_apiattr.py,sha256=otV3-RzUrAnbFCyk4UPm5e4vRZw0TwNadNmVufFEioA,1136
43
+ pydrive2/test/test_drive.py,sha256=lFyGXH96VxBJVVXqBWQUzltvEAlQHqlrs47Mu0k-rmY,769
44
+ pydrive2/test/test_file.py,sha256=fbpwpBOqScf9hSirXKelpIDRhe0Sp44kCj46zq_sD1g,41645
45
+ pydrive2/test/test_filelist.py,sha256=E-J67RhS24Pyq1-ZYgoJr2jPZC3JcBqPF0Y_bsVHkZ4,3818
46
+ pydrive2/test/test_fs.py,sha256=CDu7BVr4PXNoM8vVejIBUohssAGnnPrfk3v0XYe3jAk,11458
47
+ pydrive2/test/test_oauth.py,sha256=S6xxvseyaZtmxtc294rAq1ATZM-uhpa5sgCdTdmHxzc,7278
48
+ pydrive2/test/test_util.py,sha256=xE2d-GC0iCbVdtMEdE1MyeZ0GUzhEBctADauRau8I4Q,2675
.venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/REQUESTED ADDED
File without changes
.venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/WHEEL ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: setuptools (75.3.0)
3
+ Root-Is-Purelib: true
4
+ Tag: py3-none-any
5
+
.venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/entry_points.txt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ [pyinstaller40]
2
+ hook-dirs = pydrive2.__pyinstaller:get_hook_dirs
3
+ tests = pydrive2.__pyinstaller:get_PyInstaller_tests
.venv/lib/python3.11/site-packages/PyDrive2-1.21.3.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ pydrive2
.venv/lib/python3.11/site-packages/cffi-1.17.1.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
.venv/lib/python3.11/site-packages/cffi-1.17.1.dist-info/RECORD ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _cffi_backend.cpython-311-x86_64-linux-gnu.so,sha256=K3Ig76G2fNGS7ef9yadiP-gNjpCHXd-J1ZNzvv6jfQs,1068624
2
+ cffi-1.17.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
3
+ cffi-1.17.1.dist-info/LICENSE,sha256=BLgPWwd7vtaICM_rreteNSPyqMmpZJXFh72W3x6sKjM,1294
4
+ cffi-1.17.1.dist-info/METADATA,sha256=u6nuvP_qPJKu2zvIbi2zkGzVu7KjnnRIYUFyIrOY3j4,1531
5
+ cffi-1.17.1.dist-info/RECORD,,
6
+ cffi-1.17.1.dist-info/WHEEL,sha256=JyEZ6Cxo51rQOYRMkM7cW9w1CnY9FuHxSeIJCGoxjS4,151
7
+ cffi-1.17.1.dist-info/entry_points.txt,sha256=y6jTxnyeuLnL-XJcDv8uML3n6wyYiGRg8MTp_QGJ9Ho,75
8
+ cffi-1.17.1.dist-info/top_level.txt,sha256=rE7WR3rZfNKxWI9-jn6hsHCAl7MDkB-FmuQbxWjFehQ,19
9
+ cffi/__init__.py,sha256=H6t_ebva6EeHpUuItFLW1gbRp94eZRNJODLaWKdbx1I,513
10
+ cffi/__pycache__/__init__.cpython-311.pyc,,
11
+ cffi/__pycache__/_imp_emulation.cpython-311.pyc,,
12
+ cffi/__pycache__/_shimmed_dist_utils.cpython-311.pyc,,
13
+ cffi/__pycache__/api.cpython-311.pyc,,
14
+ cffi/__pycache__/backend_ctypes.cpython-311.pyc,,
15
+ cffi/__pycache__/cffi_opcode.cpython-311.pyc,,
16
+ cffi/__pycache__/commontypes.cpython-311.pyc,,
17
+ cffi/__pycache__/cparser.cpython-311.pyc,,
18
+ cffi/__pycache__/error.cpython-311.pyc,,
19
+ cffi/__pycache__/ffiplatform.cpython-311.pyc,,
20
+ cffi/__pycache__/lock.cpython-311.pyc,,
21
+ cffi/__pycache__/model.cpython-311.pyc,,
22
+ cffi/__pycache__/pkgconfig.cpython-311.pyc,,
23
+ cffi/__pycache__/recompiler.cpython-311.pyc,,
24
+ cffi/__pycache__/setuptools_ext.cpython-311.pyc,,
25
+ cffi/__pycache__/vengine_cpy.cpython-311.pyc,,
26
+ cffi/__pycache__/vengine_gen.cpython-311.pyc,,
27
+ cffi/__pycache__/verifier.cpython-311.pyc,,
28
+ cffi/_cffi_errors.h,sha256=zQXt7uR_m8gUW-fI2hJg0KoSkJFwXv8RGUkEDZ177dQ,3908
29
+ cffi/_cffi_include.h,sha256=Exhmgm9qzHWzWivjfTe0D7Xp4rPUkVxdNuwGhMTMzbw,15055
30
+ cffi/_embedding.h,sha256=EDKw5QrLvQoe3uosXB3H1xPVTYxsn33eV3A43zsA_Fw,18787
31
+ cffi/_imp_emulation.py,sha256=RxREG8zAbI2RPGBww90u_5fi8sWdahpdipOoPzkp7C0,2960
32
+ cffi/_shimmed_dist_utils.py,sha256=Bjj2wm8yZbvFvWEx5AEfmqaqZyZFhYfoyLLQHkXZuao,2230
33
+ cffi/api.py,sha256=alBv6hZQkjpmZplBphdaRn2lPO9-CORs_M7ixabvZWI,42169
34
+ cffi/backend_ctypes.py,sha256=h5ZIzLc6BFVXnGyc9xPqZWUS7qGy7yFSDqXe68Sa8z4,42454
35
+ cffi/cffi_opcode.py,sha256=JDV5l0R0_OadBX_uE7xPPTYtMdmpp8I9UYd6av7aiDU,5731
36
+ cffi/commontypes.py,sha256=7N6zPtCFlvxXMWhHV08psUjdYIK2XgsN3yo5dgua_v4,2805
37
+ cffi/cparser.py,sha256=0qI3mEzZSNVcCangoyXOoAcL-RhpQL08eG8798T024s,44789
38
+ cffi/error.py,sha256=v6xTiS4U0kvDcy4h_BDRo5v39ZQuj-IMRYLv5ETddZs,877
39
+ cffi/ffiplatform.py,sha256=avxFjdikYGJoEtmJO7ewVmwG_VEVl6EZ_WaNhZYCqv4,3584
40
+ cffi/lock.py,sha256=l9TTdwMIMpi6jDkJGnQgE9cvTIR7CAntIJr8EGHt3pY,747
41
+ cffi/model.py,sha256=W30UFQZE73jL5Mx5N81YT77us2W2iJjTm0XYfnwz1cg,21797
42
+ cffi/parse_c_type.h,sha256=OdwQfwM9ktq6vlCB43exFQmxDBtj2MBNdK8LYl15tjw,5976
43
+ cffi/pkgconfig.py,sha256=LP1w7vmWvmKwyqLaU1Z243FOWGNQMrgMUZrvgFuOlco,4374
44
+ cffi/recompiler.py,sha256=sim4Tm7lamt2Jn8uzKN0wMYp6ODByk3g7of47-h9LD4,65367
45
+ cffi/setuptools_ext.py,sha256=-ebj79lO2_AUH-kRcaja2pKY1Z_5tloGwsJgzK8P3Cc,8871
46
+ cffi/vengine_cpy.py,sha256=8UagT6ZEOZf6Dju7_CfNulue8CnsHLEzJYhnqUhoF04,43752
47
+ cffi/vengine_gen.py,sha256=DUlEIrDiVin1Pnhn1sfoamnS5NLqfJcOdhRoeSNeJRg,26939
48
+ cffi/verifier.py,sha256=oX8jpaohg2Qm3aHcznidAdvrVm5N4sQYG0a3Eo5mIl4,11182
.venv/lib/python3.11/site-packages/cffi-1.17.1.dist-info/entry_points.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ [distutils.setup_keywords]
2
+ cffi_modules = cffi.setuptools_ext:cffi_modules
.venv/lib/python3.11/site-packages/cffi-1.17.1.dist-info/top_level.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ _cffi_backend
2
+ cffi
.venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/_rust.abi3.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42b8217456bac7ebc6e6516f1957effac96aa32d1403d6bc7879ada7d8731829
3
+ size 10862344
.venv/lib/python3.11/site-packages/httpcore/__init__.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ._api import request, stream
2
+ from ._async import (
3
+ AsyncConnectionInterface,
4
+ AsyncConnectionPool,
5
+ AsyncHTTP2Connection,
6
+ AsyncHTTP11Connection,
7
+ AsyncHTTPConnection,
8
+ AsyncHTTPProxy,
9
+ AsyncSOCKSProxy,
10
+ )
11
+ from ._backends.base import (
12
+ SOCKET_OPTION,
13
+ AsyncNetworkBackend,
14
+ AsyncNetworkStream,
15
+ NetworkBackend,
16
+ NetworkStream,
17
+ )
18
+ from ._backends.mock import AsyncMockBackend, AsyncMockStream, MockBackend, MockStream
19
+ from ._backends.sync import SyncBackend
20
+ from ._exceptions import (
21
+ ConnectError,
22
+ ConnectionNotAvailable,
23
+ ConnectTimeout,
24
+ LocalProtocolError,
25
+ NetworkError,
26
+ PoolTimeout,
27
+ ProtocolError,
28
+ ProxyError,
29
+ ReadError,
30
+ ReadTimeout,
31
+ RemoteProtocolError,
32
+ TimeoutException,
33
+ UnsupportedProtocol,
34
+ WriteError,
35
+ WriteTimeout,
36
+ )
37
+ from ._models import URL, Origin, Proxy, Request, Response
38
+ from ._ssl import default_ssl_context
39
+ from ._sync import (
40
+ ConnectionInterface,
41
+ ConnectionPool,
42
+ HTTP2Connection,
43
+ HTTP11Connection,
44
+ HTTPConnection,
45
+ HTTPProxy,
46
+ SOCKSProxy,
47
+ )
48
+
49
+ # The 'httpcore.AnyIOBackend' class is conditional on 'anyio' being installed.
50
+ try:
51
+ from ._backends.anyio import AnyIOBackend
52
+ except ImportError: # pragma: nocover
53
+
54
+ class AnyIOBackend: # type: ignore
55
+ def __init__(self, *args, **kwargs): # type: ignore
56
+ msg = (
57
+ "Attempted to use 'httpcore.AnyIOBackend' but 'anyio' is not installed."
58
+ )
59
+ raise RuntimeError(msg)
60
+
61
+
62
+ # The 'httpcore.TrioBackend' class is conditional on 'trio' being installed.
63
+ try:
64
+ from ._backends.trio import TrioBackend
65
+ except ImportError: # pragma: nocover
66
+
67
+ class TrioBackend: # type: ignore
68
+ def __init__(self, *args, **kwargs): # type: ignore
69
+ msg = "Attempted to use 'httpcore.TrioBackend' but 'trio' is not installed."
70
+ raise RuntimeError(msg)
71
+
72
+
73
+ __all__ = [
74
+ # top-level requests
75
+ "request",
76
+ "stream",
77
+ # models
78
+ "Origin",
79
+ "URL",
80
+ "Request",
81
+ "Response",
82
+ "Proxy",
83
+ # async
84
+ "AsyncHTTPConnection",
85
+ "AsyncConnectionPool",
86
+ "AsyncHTTPProxy",
87
+ "AsyncHTTP11Connection",
88
+ "AsyncHTTP2Connection",
89
+ "AsyncConnectionInterface",
90
+ "AsyncSOCKSProxy",
91
+ # sync
92
+ "HTTPConnection",
93
+ "ConnectionPool",
94
+ "HTTPProxy",
95
+ "HTTP11Connection",
96
+ "HTTP2Connection",
97
+ "ConnectionInterface",
98
+ "SOCKSProxy",
99
+ # network backends, implementations
100
+ "SyncBackend",
101
+ "AnyIOBackend",
102
+ "TrioBackend",
103
+ # network backends, mock implementations
104
+ "AsyncMockBackend",
105
+ "AsyncMockStream",
106
+ "MockBackend",
107
+ "MockStream",
108
+ # network backends, interface
109
+ "AsyncNetworkStream",
110
+ "AsyncNetworkBackend",
111
+ "NetworkStream",
112
+ "NetworkBackend",
113
+ # util
114
+ "default_ssl_context",
115
+ "SOCKET_OPTION",
116
+ # exceptions
117
+ "ConnectionNotAvailable",
118
+ "ProxyError",
119
+ "ProtocolError",
120
+ "LocalProtocolError",
121
+ "RemoteProtocolError",
122
+ "UnsupportedProtocol",
123
+ "TimeoutException",
124
+ "PoolTimeout",
125
+ "ConnectTimeout",
126
+ "ReadTimeout",
127
+ "WriteTimeout",
128
+ "NetworkError",
129
+ "ConnectError",
130
+ "ReadError",
131
+ "WriteError",
132
+ ]
133
+
134
+ __version__ = "1.0.7"
135
+
136
+
137
+ __locals = locals()
138
+ for __name in __all__:
139
+ if not __name.startswith("__"):
140
+ setattr(__locals[__name], "__module__", "httpcore") # noqa
.venv/lib/python3.11/site-packages/httpcore/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (3.91 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/__pycache__/_api.cpython-311.pyc ADDED
Binary file (4.08 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/__pycache__/_exceptions.cpython-311.pyc ADDED
Binary file (3.8 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/__pycache__/_models.cpython-311.pyc ADDED
Binary file (24.5 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/__pycache__/_ssl.cpython-311.pyc ADDED
Binary file (627 Bytes). View file
 
.venv/lib/python3.11/site-packages/httpcore/__pycache__/_synchronization.cpython-311.pyc ADDED
Binary file (15.8 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/__pycache__/_trace.cpython-311.pyc ADDED
Binary file (5.89 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/__pycache__/_utils.cpython-311.pyc ADDED
Binary file (1.41 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/_api.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import typing
5
+
6
+ from ._models import URL, Extensions, HeaderTypes, Response
7
+ from ._sync.connection_pool import ConnectionPool
8
+
9
+
10
+ def request(
11
+ method: bytes | str,
12
+ url: URL | bytes | str,
13
+ *,
14
+ headers: HeaderTypes = None,
15
+ content: bytes | typing.Iterator[bytes] | None = None,
16
+ extensions: Extensions | None = None,
17
+ ) -> Response:
18
+ """
19
+ Sends an HTTP request, returning the response.
20
+
21
+ ```
22
+ response = httpcore.request("GET", "https://www.example.com/")
23
+ ```
24
+
25
+ Arguments:
26
+ method: The HTTP method for the request. Typically one of `"GET"`,
27
+ `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`.
28
+ url: The URL of the HTTP request. Either as an instance of `httpcore.URL`,
29
+ or as str/bytes.
30
+ headers: The HTTP request headers. Either as a dictionary of str/bytes,
31
+ or as a list of two-tuples of str/bytes.
32
+ content: The content of the request body. Either as bytes,
33
+ or as a bytes iterator.
34
+ extensions: A dictionary of optional extra information included on the request.
35
+ Possible keys include `"timeout"`.
36
+
37
+ Returns:
38
+ An instance of `httpcore.Response`.
39
+ """
40
+ with ConnectionPool() as pool:
41
+ return pool.request(
42
+ method=method,
43
+ url=url,
44
+ headers=headers,
45
+ content=content,
46
+ extensions=extensions,
47
+ )
48
+
49
+
50
+ @contextlib.contextmanager
51
+ def stream(
52
+ method: bytes | str,
53
+ url: URL | bytes | str,
54
+ *,
55
+ headers: HeaderTypes = None,
56
+ content: bytes | typing.Iterator[bytes] | None = None,
57
+ extensions: Extensions | None = None,
58
+ ) -> typing.Iterator[Response]:
59
+ """
60
+ Sends an HTTP request, returning the response within a content manager.
61
+
62
+ ```
63
+ with httpcore.stream("GET", "https://www.example.com/") as response:
64
+ ...
65
+ ```
66
+
67
+ When using the `stream()` function, the body of the response will not be
68
+ automatically read. If you want to access the response body you should
69
+ either use `content = response.read()`, or `for chunk in response.iter_content()`.
70
+
71
+ Arguments:
72
+ method: The HTTP method for the request. Typically one of `"GET"`,
73
+ `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`.
74
+ url: The URL of the HTTP request. Either as an instance of `httpcore.URL`,
75
+ or as str/bytes.
76
+ headers: The HTTP request headers. Either as a dictionary of str/bytes,
77
+ or as a list of two-tuples of str/bytes.
78
+ content: The content of the request body. Either as bytes,
79
+ or as a bytes iterator.
80
+ extensions: A dictionary of optional extra information included on the request.
81
+ Possible keys include `"timeout"`.
82
+
83
+ Returns:
84
+ An instance of `httpcore.Response`.
85
+ """
86
+ with ConnectionPool() as pool:
87
+ with pool.stream(
88
+ method=method,
89
+ url=url,
90
+ headers=headers,
91
+ content=content,
92
+ extensions=extensions,
93
+ ) as response:
94
+ yield response
.venv/lib/python3.11/site-packages/httpcore/_async/__init__.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .connection import AsyncHTTPConnection
2
+ from .connection_pool import AsyncConnectionPool
3
+ from .http11 import AsyncHTTP11Connection
4
+ from .http_proxy import AsyncHTTPProxy
5
+ from .interfaces import AsyncConnectionInterface
6
+
7
+ try:
8
+ from .http2 import AsyncHTTP2Connection
9
+ except ImportError: # pragma: nocover
10
+
11
+ class AsyncHTTP2Connection: # type: ignore
12
+ def __init__(self, *args, **kwargs) -> None: # type: ignore
13
+ raise RuntimeError(
14
+ "Attempted to use http2 support, but the `h2` package is not "
15
+ "installed. Use 'pip install httpcore[http2]'."
16
+ )
17
+
18
+
19
+ try:
20
+ from .socks_proxy import AsyncSOCKSProxy
21
+ except ImportError: # pragma: nocover
22
+
23
+ class AsyncSOCKSProxy: # type: ignore
24
+ def __init__(self, *args, **kwargs) -> None: # type: ignore
25
+ raise RuntimeError(
26
+ "Attempted to use SOCKS support, but the `socksio` package is not "
27
+ "installed. Use 'pip install httpcore[socks]'."
28
+ )
29
+
30
+
31
+ __all__ = [
32
+ "AsyncHTTPConnection",
33
+ "AsyncConnectionPool",
34
+ "AsyncHTTPProxy",
35
+ "AsyncHTTP11Connection",
36
+ "AsyncHTTP2Connection",
37
+ "AsyncConnectionInterface",
38
+ "AsyncSOCKSProxy",
39
+ ]
.venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (1.91 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/connection.cpython-311.pyc ADDED
Binary file (12.3 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/connection_pool.cpython-311.pyc ADDED
Binary file (21.6 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/http11.cpython-311.pyc ADDED
Binary file (21.5 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/http2.cpython-311.pyc ADDED
Binary file (32.6 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/http_proxy.cpython-311.pyc ADDED
Binary file (18.7 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/interfaces.cpython-311.pyc ADDED
Binary file (6.31 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/_async/__pycache__/socks_proxy.cpython-311.pyc ADDED
Binary file (17.3 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/_async/connection.py ADDED
@@ -0,0 +1,222 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import itertools
4
+ import logging
5
+ import ssl
6
+ import types
7
+ import typing
8
+
9
+ from .._backends.auto import AutoBackend
10
+ from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream
11
+ from .._exceptions import ConnectError, ConnectTimeout
12
+ from .._models import Origin, Request, Response
13
+ from .._ssl import default_ssl_context
14
+ from .._synchronization import AsyncLock
15
+ from .._trace import Trace
16
+ from .http11 import AsyncHTTP11Connection
17
+ from .interfaces import AsyncConnectionInterface
18
+
19
+ RETRIES_BACKOFF_FACTOR = 0.5 # 0s, 0.5s, 1s, 2s, 4s, etc.
20
+
21
+
22
+ logger = logging.getLogger("httpcore.connection")
23
+
24
+
25
+ def exponential_backoff(factor: float) -> typing.Iterator[float]:
26
+ """
27
+ Generate a geometric sequence that has a ratio of 2 and starts with 0.
28
+
29
+ For example:
30
+ - `factor = 2`: `0, 2, 4, 8, 16, 32, 64, ...`
31
+ - `factor = 3`: `0, 3, 6, 12, 24, 48, 96, ...`
32
+ """
33
+ yield 0
34
+ for n in itertools.count():
35
+ yield factor * 2**n
36
+
37
+
38
+ class AsyncHTTPConnection(AsyncConnectionInterface):
39
+ def __init__(
40
+ self,
41
+ origin: Origin,
42
+ ssl_context: ssl.SSLContext | None = None,
43
+ keepalive_expiry: float | None = None,
44
+ http1: bool = True,
45
+ http2: bool = False,
46
+ retries: int = 0,
47
+ local_address: str | None = None,
48
+ uds: str | None = None,
49
+ network_backend: AsyncNetworkBackend | None = None,
50
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
51
+ ) -> None:
52
+ self._origin = origin
53
+ self._ssl_context = ssl_context
54
+ self._keepalive_expiry = keepalive_expiry
55
+ self._http1 = http1
56
+ self._http2 = http2
57
+ self._retries = retries
58
+ self._local_address = local_address
59
+ self._uds = uds
60
+
61
+ self._network_backend: AsyncNetworkBackend = (
62
+ AutoBackend() if network_backend is None else network_backend
63
+ )
64
+ self._connection: AsyncConnectionInterface | None = None
65
+ self._connect_failed: bool = False
66
+ self._request_lock = AsyncLock()
67
+ self._socket_options = socket_options
68
+
69
+ async def handle_async_request(self, request: Request) -> Response:
70
+ if not self.can_handle_request(request.url.origin):
71
+ raise RuntimeError(
72
+ f"Attempted to send request to {request.url.origin} on connection to {self._origin}"
73
+ )
74
+
75
+ try:
76
+ async with self._request_lock:
77
+ if self._connection is None:
78
+ stream = await self._connect(request)
79
+
80
+ ssl_object = stream.get_extra_info("ssl_object")
81
+ http2_negotiated = (
82
+ ssl_object is not None
83
+ and ssl_object.selected_alpn_protocol() == "h2"
84
+ )
85
+ if http2_negotiated or (self._http2 and not self._http1):
86
+ from .http2 import AsyncHTTP2Connection
87
+
88
+ self._connection = AsyncHTTP2Connection(
89
+ origin=self._origin,
90
+ stream=stream,
91
+ keepalive_expiry=self._keepalive_expiry,
92
+ )
93
+ else:
94
+ self._connection = AsyncHTTP11Connection(
95
+ origin=self._origin,
96
+ stream=stream,
97
+ keepalive_expiry=self._keepalive_expiry,
98
+ )
99
+ except BaseException as exc:
100
+ self._connect_failed = True
101
+ raise exc
102
+
103
+ return await self._connection.handle_async_request(request)
104
+
105
+ async def _connect(self, request: Request) -> AsyncNetworkStream:
106
+ timeouts = request.extensions.get("timeout", {})
107
+ sni_hostname = request.extensions.get("sni_hostname", None)
108
+ timeout = timeouts.get("connect", None)
109
+
110
+ retries_left = self._retries
111
+ delays = exponential_backoff(factor=RETRIES_BACKOFF_FACTOR)
112
+
113
+ while True:
114
+ try:
115
+ if self._uds is None:
116
+ kwargs = {
117
+ "host": self._origin.host.decode("ascii"),
118
+ "port": self._origin.port,
119
+ "local_address": self._local_address,
120
+ "timeout": timeout,
121
+ "socket_options": self._socket_options,
122
+ }
123
+ async with Trace("connect_tcp", logger, request, kwargs) as trace:
124
+ stream = await self._network_backend.connect_tcp(**kwargs)
125
+ trace.return_value = stream
126
+ else:
127
+ kwargs = {
128
+ "path": self._uds,
129
+ "timeout": timeout,
130
+ "socket_options": self._socket_options,
131
+ }
132
+ async with Trace(
133
+ "connect_unix_socket", logger, request, kwargs
134
+ ) as trace:
135
+ stream = await self._network_backend.connect_unix_socket(
136
+ **kwargs
137
+ )
138
+ trace.return_value = stream
139
+
140
+ if self._origin.scheme in (b"https", b"wss"):
141
+ ssl_context = (
142
+ default_ssl_context()
143
+ if self._ssl_context is None
144
+ else self._ssl_context
145
+ )
146
+ alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"]
147
+ ssl_context.set_alpn_protocols(alpn_protocols)
148
+
149
+ kwargs = {
150
+ "ssl_context": ssl_context,
151
+ "server_hostname": sni_hostname
152
+ or self._origin.host.decode("ascii"),
153
+ "timeout": timeout,
154
+ }
155
+ async with Trace("start_tls", logger, request, kwargs) as trace:
156
+ stream = await stream.start_tls(**kwargs)
157
+ trace.return_value = stream
158
+ return stream
159
+ except (ConnectError, ConnectTimeout):
160
+ if retries_left <= 0:
161
+ raise
162
+ retries_left -= 1
163
+ delay = next(delays)
164
+ async with Trace("retry", logger, request, kwargs) as trace:
165
+ await self._network_backend.sleep(delay)
166
+
167
+ def can_handle_request(self, origin: Origin) -> bool:
168
+ return origin == self._origin
169
+
170
+ async def aclose(self) -> None:
171
+ if self._connection is not None:
172
+ async with Trace("close", logger, None, {}):
173
+ await self._connection.aclose()
174
+
175
+ def is_available(self) -> bool:
176
+ if self._connection is None:
177
+ # If HTTP/2 support is enabled, and the resulting connection could
178
+ # end up as HTTP/2 then we should indicate the connection as being
179
+ # available to service multiple requests.
180
+ return (
181
+ self._http2
182
+ and (self._origin.scheme == b"https" or not self._http1)
183
+ and not self._connect_failed
184
+ )
185
+ return self._connection.is_available()
186
+
187
+ def has_expired(self) -> bool:
188
+ if self._connection is None:
189
+ return self._connect_failed
190
+ return self._connection.has_expired()
191
+
192
+ def is_idle(self) -> bool:
193
+ if self._connection is None:
194
+ return self._connect_failed
195
+ return self._connection.is_idle()
196
+
197
+ def is_closed(self) -> bool:
198
+ if self._connection is None:
199
+ return self._connect_failed
200
+ return self._connection.is_closed()
201
+
202
+ def info(self) -> str:
203
+ if self._connection is None:
204
+ return "CONNECTION FAILED" if self._connect_failed else "CONNECTING"
205
+ return self._connection.info()
206
+
207
+ def __repr__(self) -> str:
208
+ return f"<{self.__class__.__name__} [{self.info()}]>"
209
+
210
+ # These context managers are not used in the standard flow, but are
211
+ # useful for testing or working with connection instances directly.
212
+
213
+ async def __aenter__(self) -> AsyncHTTPConnection:
214
+ return self
215
+
216
+ async def __aexit__(
217
+ self,
218
+ exc_type: type[BaseException] | None = None,
219
+ exc_value: BaseException | None = None,
220
+ traceback: types.TracebackType | None = None,
221
+ ) -> None:
222
+ await self.aclose()
.venv/lib/python3.11/site-packages/httpcore/_async/connection_pool.py ADDED
@@ -0,0 +1,420 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import ssl
4
+ import sys
5
+ import types
6
+ import typing
7
+
8
+ from .._backends.auto import AutoBackend
9
+ from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend
10
+ from .._exceptions import ConnectionNotAvailable, UnsupportedProtocol
11
+ from .._models import Origin, Proxy, Request, Response
12
+ from .._synchronization import AsyncEvent, AsyncShieldCancellation, AsyncThreadLock
13
+ from .connection import AsyncHTTPConnection
14
+ from .interfaces import AsyncConnectionInterface, AsyncRequestInterface
15
+
16
+
17
+ class AsyncPoolRequest:
18
+ def __init__(self, request: Request) -> None:
19
+ self.request = request
20
+ self.connection: AsyncConnectionInterface | None = None
21
+ self._connection_acquired = AsyncEvent()
22
+
23
+ def assign_to_connection(self, connection: AsyncConnectionInterface | None) -> None:
24
+ self.connection = connection
25
+ self._connection_acquired.set()
26
+
27
+ def clear_connection(self) -> None:
28
+ self.connection = None
29
+ self._connection_acquired = AsyncEvent()
30
+
31
+ async def wait_for_connection(
32
+ self, timeout: float | None = None
33
+ ) -> AsyncConnectionInterface:
34
+ if self.connection is None:
35
+ await self._connection_acquired.wait(timeout=timeout)
36
+ assert self.connection is not None
37
+ return self.connection
38
+
39
+ def is_queued(self) -> bool:
40
+ return self.connection is None
41
+
42
+
43
+ class AsyncConnectionPool(AsyncRequestInterface):
44
+ """
45
+ A connection pool for making HTTP requests.
46
+ """
47
+
48
+ def __init__(
49
+ self,
50
+ ssl_context: ssl.SSLContext | None = None,
51
+ proxy: Proxy | None = None,
52
+ max_connections: int | None = 10,
53
+ max_keepalive_connections: int | None = None,
54
+ keepalive_expiry: float | None = None,
55
+ http1: bool = True,
56
+ http2: bool = False,
57
+ retries: int = 0,
58
+ local_address: str | None = None,
59
+ uds: str | None = None,
60
+ network_backend: AsyncNetworkBackend | None = None,
61
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
62
+ ) -> None:
63
+ """
64
+ A connection pool for making HTTP requests.
65
+
66
+ Parameters:
67
+ ssl_context: An SSL context to use for verifying connections.
68
+ If not specified, the default `httpcore.default_ssl_context()`
69
+ will be used.
70
+ max_connections: The maximum number of concurrent HTTP connections that
71
+ the pool should allow. Any attempt to send a request on a pool that
72
+ would exceed this amount will block until a connection is available.
73
+ max_keepalive_connections: The maximum number of idle HTTP connections
74
+ that will be maintained in the pool.
75
+ keepalive_expiry: The duration in seconds that an idle HTTP connection
76
+ may be maintained for before being expired from the pool.
77
+ http1: A boolean indicating if HTTP/1.1 requests should be supported
78
+ by the connection pool. Defaults to True.
79
+ http2: A boolean indicating if HTTP/2 requests should be supported by
80
+ the connection pool. Defaults to False.
81
+ retries: The maximum number of retries when trying to establish a
82
+ connection.
83
+ local_address: Local address to connect from. Can also be used to connect
84
+ using a particular address family. Using `local_address="0.0.0.0"`
85
+ will connect using an `AF_INET` address (IPv4), while using
86
+ `local_address="::"` will connect using an `AF_INET6` address (IPv6).
87
+ uds: Path to a Unix Domain Socket to use instead of TCP sockets.
88
+ network_backend: A backend instance to use for handling network I/O.
89
+ socket_options: Socket options that have to be included
90
+ in the TCP socket when the connection was established.
91
+ """
92
+ self._ssl_context = ssl_context
93
+ self._proxy = proxy
94
+ self._max_connections = (
95
+ sys.maxsize if max_connections is None else max_connections
96
+ )
97
+ self._max_keepalive_connections = (
98
+ sys.maxsize
99
+ if max_keepalive_connections is None
100
+ else max_keepalive_connections
101
+ )
102
+ self._max_keepalive_connections = min(
103
+ self._max_connections, self._max_keepalive_connections
104
+ )
105
+
106
+ self._keepalive_expiry = keepalive_expiry
107
+ self._http1 = http1
108
+ self._http2 = http2
109
+ self._retries = retries
110
+ self._local_address = local_address
111
+ self._uds = uds
112
+
113
+ self._network_backend = (
114
+ AutoBackend() if network_backend is None else network_backend
115
+ )
116
+ self._socket_options = socket_options
117
+
118
+ # The mutable state on a connection pool is the queue of incoming requests,
119
+ # and the set of connections that are servicing those requests.
120
+ self._connections: list[AsyncConnectionInterface] = []
121
+ self._requests: list[AsyncPoolRequest] = []
122
+
123
+ # We only mutate the state of the connection pool within an 'optional_thread_lock'
124
+ # context. This holds a threading lock unless we're running in async mode,
125
+ # in which case it is a no-op.
126
+ self._optional_thread_lock = AsyncThreadLock()
127
+
128
+ def create_connection(self, origin: Origin) -> AsyncConnectionInterface:
129
+ if self._proxy is not None:
130
+ if self._proxy.url.scheme in (b"socks5", b"socks5h"):
131
+ from .socks_proxy import AsyncSocks5Connection
132
+
133
+ return AsyncSocks5Connection(
134
+ proxy_origin=self._proxy.url.origin,
135
+ proxy_auth=self._proxy.auth,
136
+ remote_origin=origin,
137
+ ssl_context=self._ssl_context,
138
+ keepalive_expiry=self._keepalive_expiry,
139
+ http1=self._http1,
140
+ http2=self._http2,
141
+ network_backend=self._network_backend,
142
+ )
143
+ elif origin.scheme == b"http":
144
+ from .http_proxy import AsyncForwardHTTPConnection
145
+
146
+ return AsyncForwardHTTPConnection(
147
+ proxy_origin=self._proxy.url.origin,
148
+ proxy_headers=self._proxy.headers,
149
+ proxy_ssl_context=self._proxy.ssl_context,
150
+ remote_origin=origin,
151
+ keepalive_expiry=self._keepalive_expiry,
152
+ network_backend=self._network_backend,
153
+ )
154
+ from .http_proxy import AsyncTunnelHTTPConnection
155
+
156
+ return AsyncTunnelHTTPConnection(
157
+ proxy_origin=self._proxy.url.origin,
158
+ proxy_headers=self._proxy.headers,
159
+ proxy_ssl_context=self._proxy.ssl_context,
160
+ remote_origin=origin,
161
+ ssl_context=self._ssl_context,
162
+ keepalive_expiry=self._keepalive_expiry,
163
+ http1=self._http1,
164
+ http2=self._http2,
165
+ network_backend=self._network_backend,
166
+ )
167
+
168
+ return AsyncHTTPConnection(
169
+ origin=origin,
170
+ ssl_context=self._ssl_context,
171
+ keepalive_expiry=self._keepalive_expiry,
172
+ http1=self._http1,
173
+ http2=self._http2,
174
+ retries=self._retries,
175
+ local_address=self._local_address,
176
+ uds=self._uds,
177
+ network_backend=self._network_backend,
178
+ socket_options=self._socket_options,
179
+ )
180
+
181
+ @property
182
+ def connections(self) -> list[AsyncConnectionInterface]:
183
+ """
184
+ Return a list of the connections currently in the pool.
185
+
186
+ For example:
187
+
188
+ ```python
189
+ >>> pool.connections
190
+ [
191
+ <AsyncHTTPConnection ['https://example.com:443', HTTP/1.1, ACTIVE, Request Count: 6]>,
192
+ <AsyncHTTPConnection ['https://example.com:443', HTTP/1.1, IDLE, Request Count: 9]> ,
193
+ <AsyncHTTPConnection ['http://example.com:80', HTTP/1.1, IDLE, Request Count: 1]>,
194
+ ]
195
+ ```
196
+ """
197
+ return list(self._connections)
198
+
199
+ async def handle_async_request(self, request: Request) -> Response:
200
+ """
201
+ Send an HTTP request, and return an HTTP response.
202
+
203
+ This is the core implementation that is called into by `.request()` or `.stream()`.
204
+ """
205
+ scheme = request.url.scheme.decode()
206
+ if scheme == "":
207
+ raise UnsupportedProtocol(
208
+ "Request URL is missing an 'http://' or 'https://' protocol."
209
+ )
210
+ if scheme not in ("http", "https", "ws", "wss"):
211
+ raise UnsupportedProtocol(
212
+ f"Request URL has an unsupported protocol '{scheme}://'."
213
+ )
214
+
215
+ timeouts = request.extensions.get("timeout", {})
216
+ timeout = timeouts.get("pool", None)
217
+
218
+ with self._optional_thread_lock:
219
+ # Add the incoming request to our request queue.
220
+ pool_request = AsyncPoolRequest(request)
221
+ self._requests.append(pool_request)
222
+
223
+ try:
224
+ while True:
225
+ with self._optional_thread_lock:
226
+ # Assign incoming requests to available connections,
227
+ # closing or creating new connections as required.
228
+ closing = self._assign_requests_to_connections()
229
+ await self._close_connections(closing)
230
+
231
+ # Wait until this request has an assigned connection.
232
+ connection = await pool_request.wait_for_connection(timeout=timeout)
233
+
234
+ try:
235
+ # Send the request on the assigned connection.
236
+ response = await connection.handle_async_request(
237
+ pool_request.request
238
+ )
239
+ except ConnectionNotAvailable:
240
+ # In some cases a connection may initially be available to
241
+ # handle a request, but then become unavailable.
242
+ #
243
+ # In this case we clear the connection and try again.
244
+ pool_request.clear_connection()
245
+ else:
246
+ break # pragma: nocover
247
+
248
+ except BaseException as exc:
249
+ with self._optional_thread_lock:
250
+ # For any exception or cancellation we remove the request from
251
+ # the queue, and then re-assign requests to connections.
252
+ self._requests.remove(pool_request)
253
+ closing = self._assign_requests_to_connections()
254
+
255
+ await self._close_connections(closing)
256
+ raise exc from None
257
+
258
+ # Return the response. Note that in this case we still have to manage
259
+ # the point at which the response is closed.
260
+ assert isinstance(response.stream, typing.AsyncIterable)
261
+ return Response(
262
+ status=response.status,
263
+ headers=response.headers,
264
+ content=PoolByteStream(
265
+ stream=response.stream, pool_request=pool_request, pool=self
266
+ ),
267
+ extensions=response.extensions,
268
+ )
269
+
270
+ def _assign_requests_to_connections(self) -> list[AsyncConnectionInterface]:
271
+ """
272
+ Manage the state of the connection pool, assigning incoming
273
+ requests to connections as available.
274
+
275
+ Called whenever a new request is added or removed from the pool.
276
+
277
+ Any closing connections are returned, allowing the I/O for closing
278
+ those connections to be handled seperately.
279
+ """
280
+ closing_connections = []
281
+
282
+ # First we handle cleaning up any connections that are closed,
283
+ # have expired their keep-alive, or surplus idle connections.
284
+ for connection in list(self._connections):
285
+ if connection.is_closed():
286
+ # log: "removing closed connection"
287
+ self._connections.remove(connection)
288
+ elif connection.has_expired():
289
+ # log: "closing expired connection"
290
+ self._connections.remove(connection)
291
+ closing_connections.append(connection)
292
+ elif (
293
+ connection.is_idle()
294
+ and len([connection.is_idle() for connection in self._connections])
295
+ > self._max_keepalive_connections
296
+ ):
297
+ # log: "closing idle connection"
298
+ self._connections.remove(connection)
299
+ closing_connections.append(connection)
300
+
301
+ # Assign queued requests to connections.
302
+ queued_requests = [request for request in self._requests if request.is_queued()]
303
+ for pool_request in queued_requests:
304
+ origin = pool_request.request.url.origin
305
+ available_connections = [
306
+ connection
307
+ for connection in self._connections
308
+ if connection.can_handle_request(origin) and connection.is_available()
309
+ ]
310
+ idle_connections = [
311
+ connection for connection in self._connections if connection.is_idle()
312
+ ]
313
+
314
+ # There are three cases for how we may be able to handle the request:
315
+ #
316
+ # 1. There is an existing connection that can handle the request.
317
+ # 2. We can create a new connection to handle the request.
318
+ # 3. We can close an idle connection and then create a new connection
319
+ # to handle the request.
320
+ if available_connections:
321
+ # log: "reusing existing connection"
322
+ connection = available_connections[0]
323
+ pool_request.assign_to_connection(connection)
324
+ elif len(self._connections) < self._max_connections:
325
+ # log: "creating new connection"
326
+ connection = self.create_connection(origin)
327
+ self._connections.append(connection)
328
+ pool_request.assign_to_connection(connection)
329
+ elif idle_connections:
330
+ # log: "closing idle connection"
331
+ connection = idle_connections[0]
332
+ self._connections.remove(connection)
333
+ closing_connections.append(connection)
334
+ # log: "creating new connection"
335
+ connection = self.create_connection(origin)
336
+ self._connections.append(connection)
337
+ pool_request.assign_to_connection(connection)
338
+
339
+ return closing_connections
340
+
341
+ async def _close_connections(self, closing: list[AsyncConnectionInterface]) -> None:
342
+ # Close connections which have been removed from the pool.
343
+ with AsyncShieldCancellation():
344
+ for connection in closing:
345
+ await connection.aclose()
346
+
347
+ async def aclose(self) -> None:
348
+ # Explicitly close the connection pool.
349
+ # Clears all existing requests and connections.
350
+ with self._optional_thread_lock:
351
+ closing_connections = list(self._connections)
352
+ self._connections = []
353
+ await self._close_connections(closing_connections)
354
+
355
+ async def __aenter__(self) -> AsyncConnectionPool:
356
+ return self
357
+
358
+ async def __aexit__(
359
+ self,
360
+ exc_type: type[BaseException] | None = None,
361
+ exc_value: BaseException | None = None,
362
+ traceback: types.TracebackType | None = None,
363
+ ) -> None:
364
+ await self.aclose()
365
+
366
+ def __repr__(self) -> str:
367
+ class_name = self.__class__.__name__
368
+ with self._optional_thread_lock:
369
+ request_is_queued = [request.is_queued() for request in self._requests]
370
+ connection_is_idle = [
371
+ connection.is_idle() for connection in self._connections
372
+ ]
373
+
374
+ num_active_requests = request_is_queued.count(False)
375
+ num_queued_requests = request_is_queued.count(True)
376
+ num_active_connections = connection_is_idle.count(False)
377
+ num_idle_connections = connection_is_idle.count(True)
378
+
379
+ requests_info = (
380
+ f"Requests: {num_active_requests} active, {num_queued_requests} queued"
381
+ )
382
+ connection_info = (
383
+ f"Connections: {num_active_connections} active, {num_idle_connections} idle"
384
+ )
385
+
386
+ return f"<{class_name} [{requests_info} | {connection_info}]>"
387
+
388
+
389
+ class PoolByteStream:
390
+ def __init__(
391
+ self,
392
+ stream: typing.AsyncIterable[bytes],
393
+ pool_request: AsyncPoolRequest,
394
+ pool: AsyncConnectionPool,
395
+ ) -> None:
396
+ self._stream = stream
397
+ self._pool_request = pool_request
398
+ self._pool = pool
399
+ self._closed = False
400
+
401
+ async def __aiter__(self) -> typing.AsyncIterator[bytes]:
402
+ try:
403
+ async for part in self._stream:
404
+ yield part
405
+ except BaseException as exc:
406
+ await self.aclose()
407
+ raise exc from None
408
+
409
+ async def aclose(self) -> None:
410
+ if not self._closed:
411
+ self._closed = True
412
+ with AsyncShieldCancellation():
413
+ if hasattr(self._stream, "aclose"):
414
+ await self._stream.aclose()
415
+
416
+ with self._pool._optional_thread_lock:
417
+ self._pool._requests.remove(self._pool_request)
418
+ closing = self._pool._assign_requests_to_connections()
419
+
420
+ await self._pool._close_connections(closing)
.venv/lib/python3.11/site-packages/httpcore/_async/http11.py ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import enum
4
+ import logging
5
+ import ssl
6
+ import time
7
+ import types
8
+ import typing
9
+
10
+ import h11
11
+
12
+ from .._backends.base import AsyncNetworkStream
13
+ from .._exceptions import (
14
+ ConnectionNotAvailable,
15
+ LocalProtocolError,
16
+ RemoteProtocolError,
17
+ WriteError,
18
+ map_exceptions,
19
+ )
20
+ from .._models import Origin, Request, Response
21
+ from .._synchronization import AsyncLock, AsyncShieldCancellation
22
+ from .._trace import Trace
23
+ from .interfaces import AsyncConnectionInterface
24
+
25
+ logger = logging.getLogger("httpcore.http11")
26
+
27
+
28
+ # A subset of `h11.Event` types supported by `_send_event`
29
+ H11SendEvent = typing.Union[
30
+ h11.Request,
31
+ h11.Data,
32
+ h11.EndOfMessage,
33
+ ]
34
+
35
+
36
+ class HTTPConnectionState(enum.IntEnum):
37
+ NEW = 0
38
+ ACTIVE = 1
39
+ IDLE = 2
40
+ CLOSED = 3
41
+
42
+
43
+ class AsyncHTTP11Connection(AsyncConnectionInterface):
44
+ READ_NUM_BYTES = 64 * 1024
45
+ MAX_INCOMPLETE_EVENT_SIZE = 100 * 1024
46
+
47
+ def __init__(
48
+ self,
49
+ origin: Origin,
50
+ stream: AsyncNetworkStream,
51
+ keepalive_expiry: float | None = None,
52
+ ) -> None:
53
+ self._origin = origin
54
+ self._network_stream = stream
55
+ self._keepalive_expiry: float | None = keepalive_expiry
56
+ self._expire_at: float | None = None
57
+ self._state = HTTPConnectionState.NEW
58
+ self._state_lock = AsyncLock()
59
+ self._request_count = 0
60
+ self._h11_state = h11.Connection(
61
+ our_role=h11.CLIENT,
62
+ max_incomplete_event_size=self.MAX_INCOMPLETE_EVENT_SIZE,
63
+ )
64
+
65
+ async def handle_async_request(self, request: Request) -> Response:
66
+ if not self.can_handle_request(request.url.origin):
67
+ raise RuntimeError(
68
+ f"Attempted to send request to {request.url.origin} on connection "
69
+ f"to {self._origin}"
70
+ )
71
+
72
+ async with self._state_lock:
73
+ if self._state in (HTTPConnectionState.NEW, HTTPConnectionState.IDLE):
74
+ self._request_count += 1
75
+ self._state = HTTPConnectionState.ACTIVE
76
+ self._expire_at = None
77
+ else:
78
+ raise ConnectionNotAvailable()
79
+
80
+ try:
81
+ kwargs = {"request": request}
82
+ try:
83
+ async with Trace(
84
+ "send_request_headers", logger, request, kwargs
85
+ ) as trace:
86
+ await self._send_request_headers(**kwargs)
87
+ async with Trace("send_request_body", logger, request, kwargs) as trace:
88
+ await self._send_request_body(**kwargs)
89
+ except WriteError:
90
+ # If we get a write error while we're writing the request,
91
+ # then we supress this error and move on to attempting to
92
+ # read the response. Servers can sometimes close the request
93
+ # pre-emptively and then respond with a well formed HTTP
94
+ # error response.
95
+ pass
96
+
97
+ async with Trace(
98
+ "receive_response_headers", logger, request, kwargs
99
+ ) as trace:
100
+ (
101
+ http_version,
102
+ status,
103
+ reason_phrase,
104
+ headers,
105
+ trailing_data,
106
+ ) = await self._receive_response_headers(**kwargs)
107
+ trace.return_value = (
108
+ http_version,
109
+ status,
110
+ reason_phrase,
111
+ headers,
112
+ )
113
+
114
+ network_stream = self._network_stream
115
+
116
+ # CONNECT or Upgrade request
117
+ if (status == 101) or (
118
+ (request.method == b"CONNECT") and (200 <= status < 300)
119
+ ):
120
+ network_stream = AsyncHTTP11UpgradeStream(network_stream, trailing_data)
121
+
122
+ return Response(
123
+ status=status,
124
+ headers=headers,
125
+ content=HTTP11ConnectionByteStream(self, request),
126
+ extensions={
127
+ "http_version": http_version,
128
+ "reason_phrase": reason_phrase,
129
+ "network_stream": network_stream,
130
+ },
131
+ )
132
+ except BaseException as exc:
133
+ with AsyncShieldCancellation():
134
+ async with Trace("response_closed", logger, request) as trace:
135
+ await self._response_closed()
136
+ raise exc
137
+
138
+ # Sending the request...
139
+
140
+ async def _send_request_headers(self, request: Request) -> None:
141
+ timeouts = request.extensions.get("timeout", {})
142
+ timeout = timeouts.get("write", None)
143
+
144
+ with map_exceptions({h11.LocalProtocolError: LocalProtocolError}):
145
+ event = h11.Request(
146
+ method=request.method,
147
+ target=request.url.target,
148
+ headers=request.headers,
149
+ )
150
+ await self._send_event(event, timeout=timeout)
151
+
152
+ async def _send_request_body(self, request: Request) -> None:
153
+ timeouts = request.extensions.get("timeout", {})
154
+ timeout = timeouts.get("write", None)
155
+
156
+ assert isinstance(request.stream, typing.AsyncIterable)
157
+ async for chunk in request.stream:
158
+ event = h11.Data(data=chunk)
159
+ await self._send_event(event, timeout=timeout)
160
+
161
+ await self._send_event(h11.EndOfMessage(), timeout=timeout)
162
+
163
+ async def _send_event(self, event: h11.Event, timeout: float | None = None) -> None:
164
+ bytes_to_send = self._h11_state.send(event)
165
+ if bytes_to_send is not None:
166
+ await self._network_stream.write(bytes_to_send, timeout=timeout)
167
+
168
+ # Receiving the response...
169
+
170
+ async def _receive_response_headers(
171
+ self, request: Request
172
+ ) -> tuple[bytes, int, bytes, list[tuple[bytes, bytes]], bytes]:
173
+ timeouts = request.extensions.get("timeout", {})
174
+ timeout = timeouts.get("read", None)
175
+
176
+ while True:
177
+ event = await self._receive_event(timeout=timeout)
178
+ if isinstance(event, h11.Response):
179
+ break
180
+ if (
181
+ isinstance(event, h11.InformationalResponse)
182
+ and event.status_code == 101
183
+ ):
184
+ break
185
+
186
+ http_version = b"HTTP/" + event.http_version
187
+
188
+ # h11 version 0.11+ supports a `raw_items` interface to get the
189
+ # raw header casing, rather than the enforced lowercase headers.
190
+ headers = event.headers.raw_items()
191
+
192
+ trailing_data, _ = self._h11_state.trailing_data
193
+
194
+ return http_version, event.status_code, event.reason, headers, trailing_data
195
+
196
+ async def _receive_response_body(
197
+ self, request: Request
198
+ ) -> typing.AsyncIterator[bytes]:
199
+ timeouts = request.extensions.get("timeout", {})
200
+ timeout = timeouts.get("read", None)
201
+
202
+ while True:
203
+ event = await self._receive_event(timeout=timeout)
204
+ if isinstance(event, h11.Data):
205
+ yield bytes(event.data)
206
+ elif isinstance(event, (h11.EndOfMessage, h11.PAUSED)):
207
+ break
208
+
209
+ async def _receive_event(
210
+ self, timeout: float | None = None
211
+ ) -> h11.Event | type[h11.PAUSED]:
212
+ while True:
213
+ with map_exceptions({h11.RemoteProtocolError: RemoteProtocolError}):
214
+ event = self._h11_state.next_event()
215
+
216
+ if event is h11.NEED_DATA:
217
+ data = await self._network_stream.read(
218
+ self.READ_NUM_BYTES, timeout=timeout
219
+ )
220
+
221
+ # If we feed this case through h11 we'll raise an exception like:
222
+ #
223
+ # httpcore.RemoteProtocolError: can't handle event type
224
+ # ConnectionClosed when role=SERVER and state=SEND_RESPONSE
225
+ #
226
+ # Which is accurate, but not very informative from an end-user
227
+ # perspective. Instead we handle this case distinctly and treat
228
+ # it as a ConnectError.
229
+ if data == b"" and self._h11_state.their_state == h11.SEND_RESPONSE:
230
+ msg = "Server disconnected without sending a response."
231
+ raise RemoteProtocolError(msg)
232
+
233
+ self._h11_state.receive_data(data)
234
+ else:
235
+ # mypy fails to narrow the type in the above if statement above
236
+ return event # type: ignore[return-value]
237
+
238
+ async def _response_closed(self) -> None:
239
+ async with self._state_lock:
240
+ if (
241
+ self._h11_state.our_state is h11.DONE
242
+ and self._h11_state.their_state is h11.DONE
243
+ ):
244
+ self._state = HTTPConnectionState.IDLE
245
+ self._h11_state.start_next_cycle()
246
+ if self._keepalive_expiry is not None:
247
+ now = time.monotonic()
248
+ self._expire_at = now + self._keepalive_expiry
249
+ else:
250
+ await self.aclose()
251
+
252
+ # Once the connection is no longer required...
253
+
254
+ async def aclose(self) -> None:
255
+ # Note that this method unilaterally closes the connection, and does
256
+ # not have any kind of locking in place around it.
257
+ self._state = HTTPConnectionState.CLOSED
258
+ await self._network_stream.aclose()
259
+
260
+ # The AsyncConnectionInterface methods provide information about the state of
261
+ # the connection, allowing for a connection pooling implementation to
262
+ # determine when to reuse and when to close the connection...
263
+
264
+ def can_handle_request(self, origin: Origin) -> bool:
265
+ return origin == self._origin
266
+
267
+ def is_available(self) -> bool:
268
+ # Note that HTTP/1.1 connections in the "NEW" state are not treated as
269
+ # being "available". The control flow which created the connection will
270
+ # be able to send an outgoing request, but the connection will not be
271
+ # acquired from the connection pool for any other request.
272
+ return self._state == HTTPConnectionState.IDLE
273
+
274
+ def has_expired(self) -> bool:
275
+ now = time.monotonic()
276
+ keepalive_expired = self._expire_at is not None and now > self._expire_at
277
+
278
+ # If the HTTP connection is idle but the socket is readable, then the
279
+ # only valid state is that the socket is about to return b"", indicating
280
+ # a server-initiated disconnect.
281
+ server_disconnected = (
282
+ self._state == HTTPConnectionState.IDLE
283
+ and self._network_stream.get_extra_info("is_readable")
284
+ )
285
+
286
+ return keepalive_expired or server_disconnected
287
+
288
+ def is_idle(self) -> bool:
289
+ return self._state == HTTPConnectionState.IDLE
290
+
291
+ def is_closed(self) -> bool:
292
+ return self._state == HTTPConnectionState.CLOSED
293
+
294
+ def info(self) -> str:
295
+ origin = str(self._origin)
296
+ return (
297
+ f"{origin!r}, HTTP/1.1, {self._state.name}, "
298
+ f"Request Count: {self._request_count}"
299
+ )
300
+
301
+ def __repr__(self) -> str:
302
+ class_name = self.__class__.__name__
303
+ origin = str(self._origin)
304
+ return (
305
+ f"<{class_name} [{origin!r}, {self._state.name}, "
306
+ f"Request Count: {self._request_count}]>"
307
+ )
308
+
309
+ # These context managers are not used in the standard flow, but are
310
+ # useful for testing or working with connection instances directly.
311
+
312
+ async def __aenter__(self) -> AsyncHTTP11Connection:
313
+ return self
314
+
315
+ async def __aexit__(
316
+ self,
317
+ exc_type: type[BaseException] | None = None,
318
+ exc_value: BaseException | None = None,
319
+ traceback: types.TracebackType | None = None,
320
+ ) -> None:
321
+ await self.aclose()
322
+
323
+
324
+ class HTTP11ConnectionByteStream:
325
+ def __init__(self, connection: AsyncHTTP11Connection, request: Request) -> None:
326
+ self._connection = connection
327
+ self._request = request
328
+ self._closed = False
329
+
330
+ async def __aiter__(self) -> typing.AsyncIterator[bytes]:
331
+ kwargs = {"request": self._request}
332
+ try:
333
+ async with Trace("receive_response_body", logger, self._request, kwargs):
334
+ async for chunk in self._connection._receive_response_body(**kwargs):
335
+ yield chunk
336
+ except BaseException as exc:
337
+ # If we get an exception while streaming the response,
338
+ # we want to close the response (and possibly the connection)
339
+ # before raising that exception.
340
+ with AsyncShieldCancellation():
341
+ await self.aclose()
342
+ raise exc
343
+
344
+ async def aclose(self) -> None:
345
+ if not self._closed:
346
+ self._closed = True
347
+ async with Trace("response_closed", logger, self._request):
348
+ await self._connection._response_closed()
349
+
350
+
351
+ class AsyncHTTP11UpgradeStream(AsyncNetworkStream):
352
+ def __init__(self, stream: AsyncNetworkStream, leading_data: bytes) -> None:
353
+ self._stream = stream
354
+ self._leading_data = leading_data
355
+
356
+ async def read(self, max_bytes: int, timeout: float | None = None) -> bytes:
357
+ if self._leading_data:
358
+ buffer = self._leading_data[:max_bytes]
359
+ self._leading_data = self._leading_data[max_bytes:]
360
+ return buffer
361
+ else:
362
+ return await self._stream.read(max_bytes, timeout)
363
+
364
+ async def write(self, buffer: bytes, timeout: float | None = None) -> None:
365
+ await self._stream.write(buffer, timeout)
366
+
367
+ async def aclose(self) -> None:
368
+ await self._stream.aclose()
369
+
370
+ async def start_tls(
371
+ self,
372
+ ssl_context: ssl.SSLContext,
373
+ server_hostname: str | None = None,
374
+ timeout: float | None = None,
375
+ ) -> AsyncNetworkStream:
376
+ return await self._stream.start_tls(ssl_context, server_hostname, timeout)
377
+
378
+ def get_extra_info(self, info: str) -> typing.Any:
379
+ return self._stream.get_extra_info(info)
.venv/lib/python3.11/site-packages/httpcore/_async/http2.py ADDED
@@ -0,0 +1,583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import enum
4
+ import logging
5
+ import time
6
+ import types
7
+ import typing
8
+
9
+ import h2.config
10
+ import h2.connection
11
+ import h2.events
12
+ import h2.exceptions
13
+ import h2.settings
14
+
15
+ from .._backends.base import AsyncNetworkStream
16
+ from .._exceptions import (
17
+ ConnectionNotAvailable,
18
+ LocalProtocolError,
19
+ RemoteProtocolError,
20
+ )
21
+ from .._models import Origin, Request, Response
22
+ from .._synchronization import AsyncLock, AsyncSemaphore, AsyncShieldCancellation
23
+ from .._trace import Trace
24
+ from .interfaces import AsyncConnectionInterface
25
+
26
+ logger = logging.getLogger("httpcore.http2")
27
+
28
+
29
+ def has_body_headers(request: Request) -> bool:
30
+ return any(
31
+ k.lower() == b"content-length" or k.lower() == b"transfer-encoding"
32
+ for k, v in request.headers
33
+ )
34
+
35
+
36
+ class HTTPConnectionState(enum.IntEnum):
37
+ ACTIVE = 1
38
+ IDLE = 2
39
+ CLOSED = 3
40
+
41
+
42
+ class AsyncHTTP2Connection(AsyncConnectionInterface):
43
+ READ_NUM_BYTES = 64 * 1024
44
+ CONFIG = h2.config.H2Configuration(validate_inbound_headers=False)
45
+
46
+ def __init__(
47
+ self,
48
+ origin: Origin,
49
+ stream: AsyncNetworkStream,
50
+ keepalive_expiry: float | None = None,
51
+ ):
52
+ self._origin = origin
53
+ self._network_stream = stream
54
+ self._keepalive_expiry: float | None = keepalive_expiry
55
+ self._h2_state = h2.connection.H2Connection(config=self.CONFIG)
56
+ self._state = HTTPConnectionState.IDLE
57
+ self._expire_at: float | None = None
58
+ self._request_count = 0
59
+ self._init_lock = AsyncLock()
60
+ self._state_lock = AsyncLock()
61
+ self._read_lock = AsyncLock()
62
+ self._write_lock = AsyncLock()
63
+ self._sent_connection_init = False
64
+ self._used_all_stream_ids = False
65
+ self._connection_error = False
66
+
67
+ # Mapping from stream ID to response stream events.
68
+ self._events: dict[
69
+ int,
70
+ h2.events.ResponseReceived
71
+ | h2.events.DataReceived
72
+ | h2.events.StreamEnded
73
+ | h2.events.StreamReset,
74
+ ] = {}
75
+
76
+ # Connection terminated events are stored as state since
77
+ # we need to handle them for all streams.
78
+ self._connection_terminated: h2.events.ConnectionTerminated | None = None
79
+
80
+ self._read_exception: Exception | None = None
81
+ self._write_exception: Exception | None = None
82
+
83
+ async def handle_async_request(self, request: Request) -> Response:
84
+ if not self.can_handle_request(request.url.origin):
85
+ # This cannot occur in normal operation, since the connection pool
86
+ # will only send requests on connections that handle them.
87
+ # It's in place simply for resilience as a guard against incorrect
88
+ # usage, for anyone working directly with httpcore connections.
89
+ raise RuntimeError(
90
+ f"Attempted to send request to {request.url.origin} on connection "
91
+ f"to {self._origin}"
92
+ )
93
+
94
+ async with self._state_lock:
95
+ if self._state in (HTTPConnectionState.ACTIVE, HTTPConnectionState.IDLE):
96
+ self._request_count += 1
97
+ self._expire_at = None
98
+ self._state = HTTPConnectionState.ACTIVE
99
+ else:
100
+ raise ConnectionNotAvailable()
101
+
102
+ async with self._init_lock:
103
+ if not self._sent_connection_init:
104
+ try:
105
+ kwargs = {"request": request}
106
+ async with Trace("send_connection_init", logger, request, kwargs):
107
+ await self._send_connection_init(**kwargs)
108
+ except BaseException as exc:
109
+ with AsyncShieldCancellation():
110
+ await self.aclose()
111
+ raise exc
112
+
113
+ self._sent_connection_init = True
114
+
115
+ # Initially start with just 1 until the remote server provides
116
+ # its max_concurrent_streams value
117
+ self._max_streams = 1
118
+
119
+ local_settings_max_streams = (
120
+ self._h2_state.local_settings.max_concurrent_streams
121
+ )
122
+ self._max_streams_semaphore = AsyncSemaphore(local_settings_max_streams)
123
+
124
+ for _ in range(local_settings_max_streams - self._max_streams):
125
+ await self._max_streams_semaphore.acquire()
126
+
127
+ await self._max_streams_semaphore.acquire()
128
+
129
+ try:
130
+ stream_id = self._h2_state.get_next_available_stream_id()
131
+ self._events[stream_id] = []
132
+ except h2.exceptions.NoAvailableStreamIDError: # pragma: nocover
133
+ self._used_all_stream_ids = True
134
+ self._request_count -= 1
135
+ raise ConnectionNotAvailable()
136
+
137
+ try:
138
+ kwargs = {"request": request, "stream_id": stream_id}
139
+ async with Trace("send_request_headers", logger, request, kwargs):
140
+ await self._send_request_headers(request=request, stream_id=stream_id)
141
+ async with Trace("send_request_body", logger, request, kwargs):
142
+ await self._send_request_body(request=request, stream_id=stream_id)
143
+ async with Trace(
144
+ "receive_response_headers", logger, request, kwargs
145
+ ) as trace:
146
+ status, headers = await self._receive_response(
147
+ request=request, stream_id=stream_id
148
+ )
149
+ trace.return_value = (status, headers)
150
+
151
+ return Response(
152
+ status=status,
153
+ headers=headers,
154
+ content=HTTP2ConnectionByteStream(self, request, stream_id=stream_id),
155
+ extensions={
156
+ "http_version": b"HTTP/2",
157
+ "network_stream": self._network_stream,
158
+ "stream_id": stream_id,
159
+ },
160
+ )
161
+ except BaseException as exc: # noqa: PIE786
162
+ with AsyncShieldCancellation():
163
+ kwargs = {"stream_id": stream_id}
164
+ async with Trace("response_closed", logger, request, kwargs):
165
+ await self._response_closed(stream_id=stream_id)
166
+
167
+ if isinstance(exc, h2.exceptions.ProtocolError):
168
+ # One case where h2 can raise a protocol error is when a
169
+ # closed frame has been seen by the state machine.
170
+ #
171
+ # This happens when one stream is reading, and encounters
172
+ # a GOAWAY event. Other flows of control may then raise
173
+ # a protocol error at any point they interact with the 'h2_state'.
174
+ #
175
+ # In this case we'll have stored the event, and should raise
176
+ # it as a RemoteProtocolError.
177
+ if self._connection_terminated: # pragma: nocover
178
+ raise RemoteProtocolError(self._connection_terminated)
179
+ # If h2 raises a protocol error in some other state then we
180
+ # must somehow have made a protocol violation.
181
+ raise LocalProtocolError(exc) # pragma: nocover
182
+
183
+ raise exc
184
+
185
+ async def _send_connection_init(self, request: Request) -> None:
186
+ """
187
+ The HTTP/2 connection requires some initial setup before we can start
188
+ using individual request/response streams on it.
189
+ """
190
+ # Need to set these manually here instead of manipulating via
191
+ # __setitem__() otherwise the H2Connection will emit SettingsUpdate
192
+ # frames in addition to sending the undesired defaults.
193
+ self._h2_state.local_settings = h2.settings.Settings(
194
+ client=True,
195
+ initial_values={
196
+ # Disable PUSH_PROMISE frames from the server since we don't do anything
197
+ # with them for now. Maybe when we support caching?
198
+ h2.settings.SettingCodes.ENABLE_PUSH: 0,
199
+ # These two are taken from h2 for safe defaults
200
+ h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS: 100,
201
+ h2.settings.SettingCodes.MAX_HEADER_LIST_SIZE: 65536,
202
+ },
203
+ )
204
+
205
+ # Some websites (*cough* Yahoo *cough*) balk at this setting being
206
+ # present in the initial handshake since it's not defined in the original
207
+ # RFC despite the RFC mandating ignoring settings you don't know about.
208
+ del self._h2_state.local_settings[
209
+ h2.settings.SettingCodes.ENABLE_CONNECT_PROTOCOL
210
+ ]
211
+
212
+ self._h2_state.initiate_connection()
213
+ self._h2_state.increment_flow_control_window(2**24)
214
+ await self._write_outgoing_data(request)
215
+
216
+ # Sending the request...
217
+
218
+ async def _send_request_headers(self, request: Request, stream_id: int) -> None:
219
+ """
220
+ Send the request headers to a given stream ID.
221
+ """
222
+ end_stream = not has_body_headers(request)
223
+
224
+ # In HTTP/2 the ':authority' pseudo-header is used instead of 'Host'.
225
+ # In order to gracefully handle HTTP/1.1 and HTTP/2 we always require
226
+ # HTTP/1.1 style headers, and map them appropriately if we end up on
227
+ # an HTTP/2 connection.
228
+ authority = [v for k, v in request.headers if k.lower() == b"host"][0]
229
+
230
+ headers = [
231
+ (b":method", request.method),
232
+ (b":authority", authority),
233
+ (b":scheme", request.url.scheme),
234
+ (b":path", request.url.target),
235
+ ] + [
236
+ (k.lower(), v)
237
+ for k, v in request.headers
238
+ if k.lower()
239
+ not in (
240
+ b"host",
241
+ b"transfer-encoding",
242
+ )
243
+ ]
244
+
245
+ self._h2_state.send_headers(stream_id, headers, end_stream=end_stream)
246
+ self._h2_state.increment_flow_control_window(2**24, stream_id=stream_id)
247
+ await self._write_outgoing_data(request)
248
+
249
+ async def _send_request_body(self, request: Request, stream_id: int) -> None:
250
+ """
251
+ Iterate over the request body sending it to a given stream ID.
252
+ """
253
+ if not has_body_headers(request):
254
+ return
255
+
256
+ assert isinstance(request.stream, typing.AsyncIterable)
257
+ async for data in request.stream:
258
+ await self._send_stream_data(request, stream_id, data)
259
+ await self._send_end_stream(request, stream_id)
260
+
261
+ async def _send_stream_data(
262
+ self, request: Request, stream_id: int, data: bytes
263
+ ) -> None:
264
+ """
265
+ Send a single chunk of data in one or more data frames.
266
+ """
267
+ while data:
268
+ max_flow = await self._wait_for_outgoing_flow(request, stream_id)
269
+ chunk_size = min(len(data), max_flow)
270
+ chunk, data = data[:chunk_size], data[chunk_size:]
271
+ self._h2_state.send_data(stream_id, chunk)
272
+ await self._write_outgoing_data(request)
273
+
274
+ async def _send_end_stream(self, request: Request, stream_id: int) -> None:
275
+ """
276
+ Send an empty data frame on on a given stream ID with the END_STREAM flag set.
277
+ """
278
+ self._h2_state.end_stream(stream_id)
279
+ await self._write_outgoing_data(request)
280
+
281
+ # Receiving the response...
282
+
283
+ async def _receive_response(
284
+ self, request: Request, stream_id: int
285
+ ) -> tuple[int, list[tuple[bytes, bytes]]]:
286
+ """
287
+ Return the response status code and headers for a given stream ID.
288
+ """
289
+ while True:
290
+ event = await self._receive_stream_event(request, stream_id)
291
+ if isinstance(event, h2.events.ResponseReceived):
292
+ break
293
+
294
+ status_code = 200
295
+ headers = []
296
+ for k, v in event.headers:
297
+ if k == b":status":
298
+ status_code = int(v.decode("ascii", errors="ignore"))
299
+ elif not k.startswith(b":"):
300
+ headers.append((k, v))
301
+
302
+ return (status_code, headers)
303
+
304
+ async def _receive_response_body(
305
+ self, request: Request, stream_id: int
306
+ ) -> typing.AsyncIterator[bytes]:
307
+ """
308
+ Iterator that returns the bytes of the response body for a given stream ID.
309
+ """
310
+ while True:
311
+ event = await self._receive_stream_event(request, stream_id)
312
+ if isinstance(event, h2.events.DataReceived):
313
+ amount = event.flow_controlled_length
314
+ self._h2_state.acknowledge_received_data(amount, stream_id)
315
+ await self._write_outgoing_data(request)
316
+ yield event.data
317
+ elif isinstance(event, h2.events.StreamEnded):
318
+ break
319
+
320
+ async def _receive_stream_event(
321
+ self, request: Request, stream_id: int
322
+ ) -> h2.events.ResponseReceived | h2.events.DataReceived | h2.events.StreamEnded:
323
+ """
324
+ Return the next available event for a given stream ID.
325
+
326
+ Will read more data from the network if required.
327
+ """
328
+ while not self._events.get(stream_id):
329
+ await self._receive_events(request, stream_id)
330
+ event = self._events[stream_id].pop(0)
331
+ if isinstance(event, h2.events.StreamReset):
332
+ raise RemoteProtocolError(event)
333
+ return event
334
+
335
+ async def _receive_events(
336
+ self, request: Request, stream_id: int | None = None
337
+ ) -> None:
338
+ """
339
+ Read some data from the network until we see one or more events
340
+ for a given stream ID.
341
+ """
342
+ async with self._read_lock:
343
+ if self._connection_terminated is not None:
344
+ last_stream_id = self._connection_terminated.last_stream_id
345
+ if stream_id and last_stream_id and stream_id > last_stream_id:
346
+ self._request_count -= 1
347
+ raise ConnectionNotAvailable()
348
+ raise RemoteProtocolError(self._connection_terminated)
349
+
350
+ # This conditional is a bit icky. We don't want to block reading if we've
351
+ # actually got an event to return for a given stream. We need to do that
352
+ # check *within* the atomic read lock. Though it also need to be optional,
353
+ # because when we call it from `_wait_for_outgoing_flow` we *do* want to
354
+ # block until we've available flow control, event when we have events
355
+ # pending for the stream ID we're attempting to send on.
356
+ if stream_id is None or not self._events.get(stream_id):
357
+ events = await self._read_incoming_data(request)
358
+ for event in events:
359
+ if isinstance(event, h2.events.RemoteSettingsChanged):
360
+ async with Trace(
361
+ "receive_remote_settings", logger, request
362
+ ) as trace:
363
+ await self._receive_remote_settings_change(event)
364
+ trace.return_value = event
365
+
366
+ elif isinstance(
367
+ event,
368
+ (
369
+ h2.events.ResponseReceived,
370
+ h2.events.DataReceived,
371
+ h2.events.StreamEnded,
372
+ h2.events.StreamReset,
373
+ ),
374
+ ):
375
+ if event.stream_id in self._events:
376
+ self._events[event.stream_id].append(event)
377
+
378
+ elif isinstance(event, h2.events.ConnectionTerminated):
379
+ self._connection_terminated = event
380
+
381
+ await self._write_outgoing_data(request)
382
+
383
+ async def _receive_remote_settings_change(self, event: h2.events.Event) -> None:
384
+ max_concurrent_streams = event.changed_settings.get(
385
+ h2.settings.SettingCodes.MAX_CONCURRENT_STREAMS
386
+ )
387
+ if max_concurrent_streams:
388
+ new_max_streams = min(
389
+ max_concurrent_streams.new_value,
390
+ self._h2_state.local_settings.max_concurrent_streams,
391
+ )
392
+ if new_max_streams and new_max_streams != self._max_streams:
393
+ while new_max_streams > self._max_streams:
394
+ await self._max_streams_semaphore.release()
395
+ self._max_streams += 1
396
+ while new_max_streams < self._max_streams:
397
+ await self._max_streams_semaphore.acquire()
398
+ self._max_streams -= 1
399
+
400
+ async def _response_closed(self, stream_id: int) -> None:
401
+ await self._max_streams_semaphore.release()
402
+ del self._events[stream_id]
403
+ async with self._state_lock:
404
+ if self._connection_terminated and not self._events:
405
+ await self.aclose()
406
+
407
+ elif self._state == HTTPConnectionState.ACTIVE and not self._events:
408
+ self._state = HTTPConnectionState.IDLE
409
+ if self._keepalive_expiry is not None:
410
+ now = time.monotonic()
411
+ self._expire_at = now + self._keepalive_expiry
412
+ if self._used_all_stream_ids: # pragma: nocover
413
+ await self.aclose()
414
+
415
+ async def aclose(self) -> None:
416
+ # Note that this method unilaterally closes the connection, and does
417
+ # not have any kind of locking in place around it.
418
+ self._h2_state.close_connection()
419
+ self._state = HTTPConnectionState.CLOSED
420
+ await self._network_stream.aclose()
421
+
422
+ # Wrappers around network read/write operations...
423
+
424
+ async def _read_incoming_data(self, request: Request) -> list[h2.events.Event]:
425
+ timeouts = request.extensions.get("timeout", {})
426
+ timeout = timeouts.get("read", None)
427
+
428
+ if self._read_exception is not None:
429
+ raise self._read_exception # pragma: nocover
430
+
431
+ try:
432
+ data = await self._network_stream.read(self.READ_NUM_BYTES, timeout)
433
+ if data == b"":
434
+ raise RemoteProtocolError("Server disconnected")
435
+ except Exception as exc:
436
+ # If we get a network error we should:
437
+ #
438
+ # 1. Save the exception and just raise it immediately on any future reads.
439
+ # (For example, this means that a single read timeout or disconnect will
440
+ # immediately close all pending streams. Without requiring multiple
441
+ # sequential timeouts.)
442
+ # 2. Mark the connection as errored, so that we don't accept any other
443
+ # incoming requests.
444
+ self._read_exception = exc
445
+ self._connection_error = True
446
+ raise exc
447
+
448
+ events: list[h2.events.Event] = self._h2_state.receive_data(data)
449
+
450
+ return events
451
+
452
+ async def _write_outgoing_data(self, request: Request) -> None:
453
+ timeouts = request.extensions.get("timeout", {})
454
+ timeout = timeouts.get("write", None)
455
+
456
+ async with self._write_lock:
457
+ data_to_send = self._h2_state.data_to_send()
458
+
459
+ if self._write_exception is not None:
460
+ raise self._write_exception # pragma: nocover
461
+
462
+ try:
463
+ await self._network_stream.write(data_to_send, timeout)
464
+ except Exception as exc: # pragma: nocover
465
+ # If we get a network error we should:
466
+ #
467
+ # 1. Save the exception and just raise it immediately on any future write.
468
+ # (For example, this means that a single write timeout or disconnect will
469
+ # immediately close all pending streams. Without requiring multiple
470
+ # sequential timeouts.)
471
+ # 2. Mark the connection as errored, so that we don't accept any other
472
+ # incoming requests.
473
+ self._write_exception = exc
474
+ self._connection_error = True
475
+ raise exc
476
+
477
+ # Flow control...
478
+
479
+ async def _wait_for_outgoing_flow(self, request: Request, stream_id: int) -> int:
480
+ """
481
+ Returns the maximum allowable outgoing flow for a given stream.
482
+
483
+ If the allowable flow is zero, then waits on the network until
484
+ WindowUpdated frames have increased the flow rate.
485
+ https://tools.ietf.org/html/rfc7540#section-6.9
486
+ """
487
+ local_flow: int = self._h2_state.local_flow_control_window(stream_id)
488
+ max_frame_size: int = self._h2_state.max_outbound_frame_size
489
+ flow = min(local_flow, max_frame_size)
490
+ while flow == 0:
491
+ await self._receive_events(request)
492
+ local_flow = self._h2_state.local_flow_control_window(stream_id)
493
+ max_frame_size = self._h2_state.max_outbound_frame_size
494
+ flow = min(local_flow, max_frame_size)
495
+ return flow
496
+
497
+ # Interface for connection pooling...
498
+
499
+ def can_handle_request(self, origin: Origin) -> bool:
500
+ return origin == self._origin
501
+
502
+ def is_available(self) -> bool:
503
+ return (
504
+ self._state != HTTPConnectionState.CLOSED
505
+ and not self._connection_error
506
+ and not self._used_all_stream_ids
507
+ and not (
508
+ self._h2_state.state_machine.state
509
+ == h2.connection.ConnectionState.CLOSED
510
+ )
511
+ )
512
+
513
+ def has_expired(self) -> bool:
514
+ now = time.monotonic()
515
+ return self._expire_at is not None and now > self._expire_at
516
+
517
+ def is_idle(self) -> bool:
518
+ return self._state == HTTPConnectionState.IDLE
519
+
520
+ def is_closed(self) -> bool:
521
+ return self._state == HTTPConnectionState.CLOSED
522
+
523
+ def info(self) -> str:
524
+ origin = str(self._origin)
525
+ return (
526
+ f"{origin!r}, HTTP/2, {self._state.name}, "
527
+ f"Request Count: {self._request_count}"
528
+ )
529
+
530
+ def __repr__(self) -> str:
531
+ class_name = self.__class__.__name__
532
+ origin = str(self._origin)
533
+ return (
534
+ f"<{class_name} [{origin!r}, {self._state.name}, "
535
+ f"Request Count: {self._request_count}]>"
536
+ )
537
+
538
+ # These context managers are not used in the standard flow, but are
539
+ # useful for testing or working with connection instances directly.
540
+
541
+ async def __aenter__(self) -> AsyncHTTP2Connection:
542
+ return self
543
+
544
+ async def __aexit__(
545
+ self,
546
+ exc_type: type[BaseException] | None = None,
547
+ exc_value: BaseException | None = None,
548
+ traceback: types.TracebackType | None = None,
549
+ ) -> None:
550
+ await self.aclose()
551
+
552
+
553
+ class HTTP2ConnectionByteStream:
554
+ def __init__(
555
+ self, connection: AsyncHTTP2Connection, request: Request, stream_id: int
556
+ ) -> None:
557
+ self._connection = connection
558
+ self._request = request
559
+ self._stream_id = stream_id
560
+ self._closed = False
561
+
562
+ async def __aiter__(self) -> typing.AsyncIterator[bytes]:
563
+ kwargs = {"request": self._request, "stream_id": self._stream_id}
564
+ try:
565
+ async with Trace("receive_response_body", logger, self._request, kwargs):
566
+ async for chunk in self._connection._receive_response_body(
567
+ request=self._request, stream_id=self._stream_id
568
+ ):
569
+ yield chunk
570
+ except BaseException as exc:
571
+ # If we get an exception while streaming the response,
572
+ # we want to close the response (and possibly the connection)
573
+ # before raising that exception.
574
+ with AsyncShieldCancellation():
575
+ await self.aclose()
576
+ raise exc
577
+
578
+ async def aclose(self) -> None:
579
+ if not self._closed:
580
+ self._closed = True
581
+ kwargs = {"stream_id": self._stream_id}
582
+ async with Trace("response_closed", logger, self._request, kwargs):
583
+ await self._connection._response_closed(stream_id=self._stream_id)
.venv/lib/python3.11/site-packages/httpcore/_async/http_proxy.py ADDED
@@ -0,0 +1,367 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import base64
4
+ import logging
5
+ import ssl
6
+ import typing
7
+
8
+ from .._backends.base import SOCKET_OPTION, AsyncNetworkBackend
9
+ from .._exceptions import ProxyError
10
+ from .._models import (
11
+ URL,
12
+ Origin,
13
+ Request,
14
+ Response,
15
+ enforce_bytes,
16
+ enforce_headers,
17
+ enforce_url,
18
+ )
19
+ from .._ssl import default_ssl_context
20
+ from .._synchronization import AsyncLock
21
+ from .._trace import Trace
22
+ from .connection import AsyncHTTPConnection
23
+ from .connection_pool import AsyncConnectionPool
24
+ from .http11 import AsyncHTTP11Connection
25
+ from .interfaces import AsyncConnectionInterface
26
+
27
+ ByteOrStr = typing.Union[bytes, str]
28
+ HeadersAsSequence = typing.Sequence[typing.Tuple[ByteOrStr, ByteOrStr]]
29
+ HeadersAsMapping = typing.Mapping[ByteOrStr, ByteOrStr]
30
+
31
+
32
+ logger = logging.getLogger("httpcore.proxy")
33
+
34
+
35
+ def merge_headers(
36
+ default_headers: typing.Sequence[tuple[bytes, bytes]] | None = None,
37
+ override_headers: typing.Sequence[tuple[bytes, bytes]] | None = None,
38
+ ) -> list[tuple[bytes, bytes]]:
39
+ """
40
+ Append default_headers and override_headers, de-duplicating if a key exists
41
+ in both cases.
42
+ """
43
+ default_headers = [] if default_headers is None else list(default_headers)
44
+ override_headers = [] if override_headers is None else list(override_headers)
45
+ has_override = set(key.lower() for key, value in override_headers)
46
+ default_headers = [
47
+ (key, value)
48
+ for key, value in default_headers
49
+ if key.lower() not in has_override
50
+ ]
51
+ return default_headers + override_headers
52
+
53
+
54
+ class AsyncHTTPProxy(AsyncConnectionPool): # pragma: nocover
55
+ """
56
+ A connection pool that sends requests via an HTTP proxy.
57
+ """
58
+
59
+ def __init__(
60
+ self,
61
+ proxy_url: URL | bytes | str,
62
+ proxy_auth: tuple[bytes | str, bytes | str] | None = None,
63
+ proxy_headers: HeadersAsMapping | HeadersAsSequence | None = None,
64
+ ssl_context: ssl.SSLContext | None = None,
65
+ proxy_ssl_context: ssl.SSLContext | None = None,
66
+ max_connections: int | None = 10,
67
+ max_keepalive_connections: int | None = None,
68
+ keepalive_expiry: float | None = None,
69
+ http1: bool = True,
70
+ http2: bool = False,
71
+ retries: int = 0,
72
+ local_address: str | None = None,
73
+ uds: str | None = None,
74
+ network_backend: AsyncNetworkBackend | None = None,
75
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
76
+ ) -> None:
77
+ """
78
+ A connection pool for making HTTP requests.
79
+
80
+ Parameters:
81
+ proxy_url: The URL to use when connecting to the proxy server.
82
+ For example `"http://127.0.0.1:8080/"`.
83
+ proxy_auth: Any proxy authentication as a two-tuple of
84
+ (username, password). May be either bytes or ascii-only str.
85
+ proxy_headers: Any HTTP headers to use for the proxy requests.
86
+ For example `{"Proxy-Authorization": "Basic <username>:<password>"}`.
87
+ ssl_context: An SSL context to use for verifying connections.
88
+ If not specified, the default `httpcore.default_ssl_context()`
89
+ will be used.
90
+ proxy_ssl_context: The same as `ssl_context`, but for a proxy server rather than a remote origin.
91
+ max_connections: The maximum number of concurrent HTTP connections that
92
+ the pool should allow. Any attempt to send a request on a pool that
93
+ would exceed this amount will block until a connection is available.
94
+ max_keepalive_connections: The maximum number of idle HTTP connections
95
+ that will be maintained in the pool.
96
+ keepalive_expiry: The duration in seconds that an idle HTTP connection
97
+ may be maintained for before being expired from the pool.
98
+ http1: A boolean indicating if HTTP/1.1 requests should be supported
99
+ by the connection pool. Defaults to True.
100
+ http2: A boolean indicating if HTTP/2 requests should be supported by
101
+ the connection pool. Defaults to False.
102
+ retries: The maximum number of retries when trying to establish
103
+ a connection.
104
+ local_address: Local address to connect from. Can also be used to
105
+ connect using a particular address family. Using
106
+ `local_address="0.0.0.0"` will connect using an `AF_INET` address
107
+ (IPv4), while using `local_address="::"` will connect using an
108
+ `AF_INET6` address (IPv6).
109
+ uds: Path to a Unix Domain Socket to use instead of TCP sockets.
110
+ network_backend: A backend instance to use for handling network I/O.
111
+ """
112
+ super().__init__(
113
+ ssl_context=ssl_context,
114
+ max_connections=max_connections,
115
+ max_keepalive_connections=max_keepalive_connections,
116
+ keepalive_expiry=keepalive_expiry,
117
+ http1=http1,
118
+ http2=http2,
119
+ network_backend=network_backend,
120
+ retries=retries,
121
+ local_address=local_address,
122
+ uds=uds,
123
+ socket_options=socket_options,
124
+ )
125
+
126
+ self._proxy_url = enforce_url(proxy_url, name="proxy_url")
127
+ if (
128
+ self._proxy_url.scheme == b"http" and proxy_ssl_context is not None
129
+ ): # pragma: no cover
130
+ raise RuntimeError(
131
+ "The `proxy_ssl_context` argument is not allowed for the http scheme"
132
+ )
133
+
134
+ self._ssl_context = ssl_context
135
+ self._proxy_ssl_context = proxy_ssl_context
136
+ self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")
137
+ if proxy_auth is not None:
138
+ username = enforce_bytes(proxy_auth[0], name="proxy_auth")
139
+ password = enforce_bytes(proxy_auth[1], name="proxy_auth")
140
+ userpass = username + b":" + password
141
+ authorization = b"Basic " + base64.b64encode(userpass)
142
+ self._proxy_headers = [
143
+ (b"Proxy-Authorization", authorization)
144
+ ] + self._proxy_headers
145
+
146
+ def create_connection(self, origin: Origin) -> AsyncConnectionInterface:
147
+ if origin.scheme == b"http":
148
+ return AsyncForwardHTTPConnection(
149
+ proxy_origin=self._proxy_url.origin,
150
+ proxy_headers=self._proxy_headers,
151
+ remote_origin=origin,
152
+ keepalive_expiry=self._keepalive_expiry,
153
+ network_backend=self._network_backend,
154
+ proxy_ssl_context=self._proxy_ssl_context,
155
+ )
156
+ return AsyncTunnelHTTPConnection(
157
+ proxy_origin=self._proxy_url.origin,
158
+ proxy_headers=self._proxy_headers,
159
+ remote_origin=origin,
160
+ ssl_context=self._ssl_context,
161
+ proxy_ssl_context=self._proxy_ssl_context,
162
+ keepalive_expiry=self._keepalive_expiry,
163
+ http1=self._http1,
164
+ http2=self._http2,
165
+ network_backend=self._network_backend,
166
+ )
167
+
168
+
169
+ class AsyncForwardHTTPConnection(AsyncConnectionInterface):
170
+ def __init__(
171
+ self,
172
+ proxy_origin: Origin,
173
+ remote_origin: Origin,
174
+ proxy_headers: HeadersAsMapping | HeadersAsSequence | None = None,
175
+ keepalive_expiry: float | None = None,
176
+ network_backend: AsyncNetworkBackend | None = None,
177
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
178
+ proxy_ssl_context: ssl.SSLContext | None = None,
179
+ ) -> None:
180
+ self._connection = AsyncHTTPConnection(
181
+ origin=proxy_origin,
182
+ keepalive_expiry=keepalive_expiry,
183
+ network_backend=network_backend,
184
+ socket_options=socket_options,
185
+ ssl_context=proxy_ssl_context,
186
+ )
187
+ self._proxy_origin = proxy_origin
188
+ self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")
189
+ self._remote_origin = remote_origin
190
+
191
+ async def handle_async_request(self, request: Request) -> Response:
192
+ headers = merge_headers(self._proxy_headers, request.headers)
193
+ url = URL(
194
+ scheme=self._proxy_origin.scheme,
195
+ host=self._proxy_origin.host,
196
+ port=self._proxy_origin.port,
197
+ target=bytes(request.url),
198
+ )
199
+ proxy_request = Request(
200
+ method=request.method,
201
+ url=url,
202
+ headers=headers,
203
+ content=request.stream,
204
+ extensions=request.extensions,
205
+ )
206
+ return await self._connection.handle_async_request(proxy_request)
207
+
208
+ def can_handle_request(self, origin: Origin) -> bool:
209
+ return origin == self._remote_origin
210
+
211
+ async def aclose(self) -> None:
212
+ await self._connection.aclose()
213
+
214
+ def info(self) -> str:
215
+ return self._connection.info()
216
+
217
+ def is_available(self) -> bool:
218
+ return self._connection.is_available()
219
+
220
+ def has_expired(self) -> bool:
221
+ return self._connection.has_expired()
222
+
223
+ def is_idle(self) -> bool:
224
+ return self._connection.is_idle()
225
+
226
+ def is_closed(self) -> bool:
227
+ return self._connection.is_closed()
228
+
229
+ def __repr__(self) -> str:
230
+ return f"<{self.__class__.__name__} [{self.info()}]>"
231
+
232
+
233
+ class AsyncTunnelHTTPConnection(AsyncConnectionInterface):
234
+ def __init__(
235
+ self,
236
+ proxy_origin: Origin,
237
+ remote_origin: Origin,
238
+ ssl_context: ssl.SSLContext | None = None,
239
+ proxy_ssl_context: ssl.SSLContext | None = None,
240
+ proxy_headers: typing.Sequence[tuple[bytes, bytes]] | None = None,
241
+ keepalive_expiry: float | None = None,
242
+ http1: bool = True,
243
+ http2: bool = False,
244
+ network_backend: AsyncNetworkBackend | None = None,
245
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
246
+ ) -> None:
247
+ self._connection: AsyncConnectionInterface = AsyncHTTPConnection(
248
+ origin=proxy_origin,
249
+ keepalive_expiry=keepalive_expiry,
250
+ network_backend=network_backend,
251
+ socket_options=socket_options,
252
+ ssl_context=proxy_ssl_context,
253
+ )
254
+ self._proxy_origin = proxy_origin
255
+ self._remote_origin = remote_origin
256
+ self._ssl_context = ssl_context
257
+ self._proxy_ssl_context = proxy_ssl_context
258
+ self._proxy_headers = enforce_headers(proxy_headers, name="proxy_headers")
259
+ self._keepalive_expiry = keepalive_expiry
260
+ self._http1 = http1
261
+ self._http2 = http2
262
+ self._connect_lock = AsyncLock()
263
+ self._connected = False
264
+
265
+ async def handle_async_request(self, request: Request) -> Response:
266
+ timeouts = request.extensions.get("timeout", {})
267
+ timeout = timeouts.get("connect", None)
268
+
269
+ async with self._connect_lock:
270
+ if not self._connected:
271
+ target = b"%b:%d" % (self._remote_origin.host, self._remote_origin.port)
272
+
273
+ connect_url = URL(
274
+ scheme=self._proxy_origin.scheme,
275
+ host=self._proxy_origin.host,
276
+ port=self._proxy_origin.port,
277
+ target=target,
278
+ )
279
+ connect_headers = merge_headers(
280
+ [(b"Host", target), (b"Accept", b"*/*")], self._proxy_headers
281
+ )
282
+ connect_request = Request(
283
+ method=b"CONNECT",
284
+ url=connect_url,
285
+ headers=connect_headers,
286
+ extensions=request.extensions,
287
+ )
288
+ connect_response = await self._connection.handle_async_request(
289
+ connect_request
290
+ )
291
+
292
+ if connect_response.status < 200 or connect_response.status > 299:
293
+ reason_bytes = connect_response.extensions.get("reason_phrase", b"")
294
+ reason_str = reason_bytes.decode("ascii", errors="ignore")
295
+ msg = "%d %s" % (connect_response.status, reason_str)
296
+ await self._connection.aclose()
297
+ raise ProxyError(msg)
298
+
299
+ stream = connect_response.extensions["network_stream"]
300
+
301
+ # Upgrade the stream to SSL
302
+ ssl_context = (
303
+ default_ssl_context()
304
+ if self._ssl_context is None
305
+ else self._ssl_context
306
+ )
307
+ alpn_protocols = ["http/1.1", "h2"] if self._http2 else ["http/1.1"]
308
+ ssl_context.set_alpn_protocols(alpn_protocols)
309
+
310
+ kwargs = {
311
+ "ssl_context": ssl_context,
312
+ "server_hostname": self._remote_origin.host.decode("ascii"),
313
+ "timeout": timeout,
314
+ }
315
+ async with Trace("start_tls", logger, request, kwargs) as trace:
316
+ stream = await stream.start_tls(**kwargs)
317
+ trace.return_value = stream
318
+
319
+ # Determine if we should be using HTTP/1.1 or HTTP/2
320
+ ssl_object = stream.get_extra_info("ssl_object")
321
+ http2_negotiated = (
322
+ ssl_object is not None
323
+ and ssl_object.selected_alpn_protocol() == "h2"
324
+ )
325
+
326
+ # Create the HTTP/1.1 or HTTP/2 connection
327
+ if http2_negotiated or (self._http2 and not self._http1):
328
+ from .http2 import AsyncHTTP2Connection
329
+
330
+ self._connection = AsyncHTTP2Connection(
331
+ origin=self._remote_origin,
332
+ stream=stream,
333
+ keepalive_expiry=self._keepalive_expiry,
334
+ )
335
+ else:
336
+ self._connection = AsyncHTTP11Connection(
337
+ origin=self._remote_origin,
338
+ stream=stream,
339
+ keepalive_expiry=self._keepalive_expiry,
340
+ )
341
+
342
+ self._connected = True
343
+ return await self._connection.handle_async_request(request)
344
+
345
+ def can_handle_request(self, origin: Origin) -> bool:
346
+ return origin == self._remote_origin
347
+
348
+ async def aclose(self) -> None:
349
+ await self._connection.aclose()
350
+
351
+ def info(self) -> str:
352
+ return self._connection.info()
353
+
354
+ def is_available(self) -> bool:
355
+ return self._connection.is_available()
356
+
357
+ def has_expired(self) -> bool:
358
+ return self._connection.has_expired()
359
+
360
+ def is_idle(self) -> bool:
361
+ return self._connection.is_idle()
362
+
363
+ def is_closed(self) -> bool:
364
+ return self._connection.is_closed()
365
+
366
+ def __repr__(self) -> str:
367
+ return f"<{self.__class__.__name__} [{self.info()}]>"
.venv/lib/python3.11/site-packages/httpcore/_async/interfaces.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import contextlib
4
+ import typing
5
+
6
+ from .._models import (
7
+ URL,
8
+ Extensions,
9
+ HeaderTypes,
10
+ Origin,
11
+ Request,
12
+ Response,
13
+ enforce_bytes,
14
+ enforce_headers,
15
+ enforce_url,
16
+ include_request_headers,
17
+ )
18
+
19
+
20
+ class AsyncRequestInterface:
21
+ async def request(
22
+ self,
23
+ method: bytes | str,
24
+ url: URL | bytes | str,
25
+ *,
26
+ headers: HeaderTypes = None,
27
+ content: bytes | typing.AsyncIterator[bytes] | None = None,
28
+ extensions: Extensions | None = None,
29
+ ) -> Response:
30
+ # Strict type checking on our parameters.
31
+ method = enforce_bytes(method, name="method")
32
+ url = enforce_url(url, name="url")
33
+ headers = enforce_headers(headers, name="headers")
34
+
35
+ # Include Host header, and optionally Content-Length or Transfer-Encoding.
36
+ headers = include_request_headers(headers, url=url, content=content)
37
+
38
+ request = Request(
39
+ method=method,
40
+ url=url,
41
+ headers=headers,
42
+ content=content,
43
+ extensions=extensions,
44
+ )
45
+ response = await self.handle_async_request(request)
46
+ try:
47
+ await response.aread()
48
+ finally:
49
+ await response.aclose()
50
+ return response
51
+
52
+ @contextlib.asynccontextmanager
53
+ async def stream(
54
+ self,
55
+ method: bytes | str,
56
+ url: URL | bytes | str,
57
+ *,
58
+ headers: HeaderTypes = None,
59
+ content: bytes | typing.AsyncIterator[bytes] | None = None,
60
+ extensions: Extensions | None = None,
61
+ ) -> typing.AsyncIterator[Response]:
62
+ # Strict type checking on our parameters.
63
+ method = enforce_bytes(method, name="method")
64
+ url = enforce_url(url, name="url")
65
+ headers = enforce_headers(headers, name="headers")
66
+
67
+ # Include Host header, and optionally Content-Length or Transfer-Encoding.
68
+ headers = include_request_headers(headers, url=url, content=content)
69
+
70
+ request = Request(
71
+ method=method,
72
+ url=url,
73
+ headers=headers,
74
+ content=content,
75
+ extensions=extensions,
76
+ )
77
+ response = await self.handle_async_request(request)
78
+ try:
79
+ yield response
80
+ finally:
81
+ await response.aclose()
82
+
83
+ async def handle_async_request(self, request: Request) -> Response:
84
+ raise NotImplementedError() # pragma: nocover
85
+
86
+
87
+ class AsyncConnectionInterface(AsyncRequestInterface):
88
+ async def aclose(self) -> None:
89
+ raise NotImplementedError() # pragma: nocover
90
+
91
+ def info(self) -> str:
92
+ raise NotImplementedError() # pragma: nocover
93
+
94
+ def can_handle_request(self, origin: Origin) -> bool:
95
+ raise NotImplementedError() # pragma: nocover
96
+
97
+ def is_available(self) -> bool:
98
+ """
99
+ Return `True` if the connection is currently able to accept an
100
+ outgoing request.
101
+
102
+ An HTTP/1.1 connection will only be available if it is currently idle.
103
+
104
+ An HTTP/2 connection will be available so long as the stream ID space is
105
+ not yet exhausted, and the connection is not in an error state.
106
+
107
+ While the connection is being established we may not yet know if it is going
108
+ to result in an HTTP/1.1 or HTTP/2 connection. The connection should be
109
+ treated as being available, but might ultimately raise `NewConnectionRequired`
110
+ required exceptions if multiple requests are attempted over a connection
111
+ that ends up being established as HTTP/1.1.
112
+ """
113
+ raise NotImplementedError() # pragma: nocover
114
+
115
+ def has_expired(self) -> bool:
116
+ """
117
+ Return `True` if the connection is in a state where it should be closed.
118
+
119
+ This either means that the connection is idle and it has passed the
120
+ expiry time on its keep-alive, or that server has sent an EOF.
121
+ """
122
+ raise NotImplementedError() # pragma: nocover
123
+
124
+ def is_idle(self) -> bool:
125
+ """
126
+ Return `True` if the connection is currently idle.
127
+ """
128
+ raise NotImplementedError() # pragma: nocover
129
+
130
+ def is_closed(self) -> bool:
131
+ """
132
+ Return `True` if the connection has been closed.
133
+
134
+ Used when a response is closed to determine if the connection may be
135
+ returned to the connection pool or not.
136
+ """
137
+ raise NotImplementedError() # pragma: nocover
.venv/lib/python3.11/site-packages/httpcore/_async/socks_proxy.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import logging
4
+ import ssl
5
+
6
+ import socksio
7
+
8
+ from .._backends.auto import AutoBackend
9
+ from .._backends.base import AsyncNetworkBackend, AsyncNetworkStream
10
+ from .._exceptions import ConnectionNotAvailable, ProxyError
11
+ from .._models import URL, Origin, Request, Response, enforce_bytes, enforce_url
12
+ from .._ssl import default_ssl_context
13
+ from .._synchronization import AsyncLock
14
+ from .._trace import Trace
15
+ from .connection_pool import AsyncConnectionPool
16
+ from .http11 import AsyncHTTP11Connection
17
+ from .interfaces import AsyncConnectionInterface
18
+
19
+ logger = logging.getLogger("httpcore.socks")
20
+
21
+
22
+ AUTH_METHODS = {
23
+ b"\x00": "NO AUTHENTICATION REQUIRED",
24
+ b"\x01": "GSSAPI",
25
+ b"\x02": "USERNAME/PASSWORD",
26
+ b"\xff": "NO ACCEPTABLE METHODS",
27
+ }
28
+
29
+ REPLY_CODES = {
30
+ b"\x00": "Succeeded",
31
+ b"\x01": "General SOCKS server failure",
32
+ b"\x02": "Connection not allowed by ruleset",
33
+ b"\x03": "Network unreachable",
34
+ b"\x04": "Host unreachable",
35
+ b"\x05": "Connection refused",
36
+ b"\x06": "TTL expired",
37
+ b"\x07": "Command not supported",
38
+ b"\x08": "Address type not supported",
39
+ }
40
+
41
+
42
+ async def _init_socks5_connection(
43
+ stream: AsyncNetworkStream,
44
+ *,
45
+ host: bytes,
46
+ port: int,
47
+ auth: tuple[bytes, bytes] | None = None,
48
+ ) -> None:
49
+ conn = socksio.socks5.SOCKS5Connection()
50
+
51
+ # Auth method request
52
+ auth_method = (
53
+ socksio.socks5.SOCKS5AuthMethod.NO_AUTH_REQUIRED
54
+ if auth is None
55
+ else socksio.socks5.SOCKS5AuthMethod.USERNAME_PASSWORD
56
+ )
57
+ conn.send(socksio.socks5.SOCKS5AuthMethodsRequest([auth_method]))
58
+ outgoing_bytes = conn.data_to_send()
59
+ await stream.write(outgoing_bytes)
60
+
61
+ # Auth method response
62
+ incoming_bytes = await stream.read(max_bytes=4096)
63
+ response = conn.receive_data(incoming_bytes)
64
+ assert isinstance(response, socksio.socks5.SOCKS5AuthReply)
65
+ if response.method != auth_method:
66
+ requested = AUTH_METHODS.get(auth_method, "UNKNOWN")
67
+ responded = AUTH_METHODS.get(response.method, "UNKNOWN")
68
+ raise ProxyError(
69
+ f"Requested {requested} from proxy server, but got {responded}."
70
+ )
71
+
72
+ if response.method == socksio.socks5.SOCKS5AuthMethod.USERNAME_PASSWORD:
73
+ # Username/password request
74
+ assert auth is not None
75
+ username, password = auth
76
+ conn.send(socksio.socks5.SOCKS5UsernamePasswordRequest(username, password))
77
+ outgoing_bytes = conn.data_to_send()
78
+ await stream.write(outgoing_bytes)
79
+
80
+ # Username/password response
81
+ incoming_bytes = await stream.read(max_bytes=4096)
82
+ response = conn.receive_data(incoming_bytes)
83
+ assert isinstance(response, socksio.socks5.SOCKS5UsernamePasswordReply)
84
+ if not response.success:
85
+ raise ProxyError("Invalid username/password")
86
+
87
+ # Connect request
88
+ conn.send(
89
+ socksio.socks5.SOCKS5CommandRequest.from_address(
90
+ socksio.socks5.SOCKS5Command.CONNECT, (host, port)
91
+ )
92
+ )
93
+ outgoing_bytes = conn.data_to_send()
94
+ await stream.write(outgoing_bytes)
95
+
96
+ # Connect response
97
+ incoming_bytes = await stream.read(max_bytes=4096)
98
+ response = conn.receive_data(incoming_bytes)
99
+ assert isinstance(response, socksio.socks5.SOCKS5Reply)
100
+ if response.reply_code != socksio.socks5.SOCKS5ReplyCode.SUCCEEDED:
101
+ reply_code = REPLY_CODES.get(response.reply_code, "UNKOWN")
102
+ raise ProxyError(f"Proxy Server could not connect: {reply_code}.")
103
+
104
+
105
+ class AsyncSOCKSProxy(AsyncConnectionPool): # pragma: nocover
106
+ """
107
+ A connection pool that sends requests via an HTTP proxy.
108
+ """
109
+
110
+ def __init__(
111
+ self,
112
+ proxy_url: URL | bytes | str,
113
+ proxy_auth: tuple[bytes | str, bytes | str] | None = None,
114
+ ssl_context: ssl.SSLContext | None = None,
115
+ max_connections: int | None = 10,
116
+ max_keepalive_connections: int | None = None,
117
+ keepalive_expiry: float | None = None,
118
+ http1: bool = True,
119
+ http2: bool = False,
120
+ retries: int = 0,
121
+ network_backend: AsyncNetworkBackend | None = None,
122
+ ) -> None:
123
+ """
124
+ A connection pool for making HTTP requests.
125
+
126
+ Parameters:
127
+ proxy_url: The URL to use when connecting to the proxy server.
128
+ For example `"http://127.0.0.1:8080/"`.
129
+ ssl_context: An SSL context to use for verifying connections.
130
+ If not specified, the default `httpcore.default_ssl_context()`
131
+ will be used.
132
+ max_connections: The maximum number of concurrent HTTP connections that
133
+ the pool should allow. Any attempt to send a request on a pool that
134
+ would exceed this amount will block until a connection is available.
135
+ max_keepalive_connections: The maximum number of idle HTTP connections
136
+ that will be maintained in the pool.
137
+ keepalive_expiry: The duration in seconds that an idle HTTP connection
138
+ may be maintained for before being expired from the pool.
139
+ http1: A boolean indicating if HTTP/1.1 requests should be supported
140
+ by the connection pool. Defaults to True.
141
+ http2: A boolean indicating if HTTP/2 requests should be supported by
142
+ the connection pool. Defaults to False.
143
+ retries: The maximum number of retries when trying to establish
144
+ a connection.
145
+ local_address: Local address to connect from. Can also be used to
146
+ connect using a particular address family. Using
147
+ `local_address="0.0.0.0"` will connect using an `AF_INET` address
148
+ (IPv4), while using `local_address="::"` will connect using an
149
+ `AF_INET6` address (IPv6).
150
+ uds: Path to a Unix Domain Socket to use instead of TCP sockets.
151
+ network_backend: A backend instance to use for handling network I/O.
152
+ """
153
+ super().__init__(
154
+ ssl_context=ssl_context,
155
+ max_connections=max_connections,
156
+ max_keepalive_connections=max_keepalive_connections,
157
+ keepalive_expiry=keepalive_expiry,
158
+ http1=http1,
159
+ http2=http2,
160
+ network_backend=network_backend,
161
+ retries=retries,
162
+ )
163
+ self._ssl_context = ssl_context
164
+ self._proxy_url = enforce_url(proxy_url, name="proxy_url")
165
+ if proxy_auth is not None:
166
+ username, password = proxy_auth
167
+ username_bytes = enforce_bytes(username, name="proxy_auth")
168
+ password_bytes = enforce_bytes(password, name="proxy_auth")
169
+ self._proxy_auth: tuple[bytes, bytes] | None = (
170
+ username_bytes,
171
+ password_bytes,
172
+ )
173
+ else:
174
+ self._proxy_auth = None
175
+
176
+ def create_connection(self, origin: Origin) -> AsyncConnectionInterface:
177
+ return AsyncSocks5Connection(
178
+ proxy_origin=self._proxy_url.origin,
179
+ remote_origin=origin,
180
+ proxy_auth=self._proxy_auth,
181
+ ssl_context=self._ssl_context,
182
+ keepalive_expiry=self._keepalive_expiry,
183
+ http1=self._http1,
184
+ http2=self._http2,
185
+ network_backend=self._network_backend,
186
+ )
187
+
188
+
189
+ class AsyncSocks5Connection(AsyncConnectionInterface):
190
+ def __init__(
191
+ self,
192
+ proxy_origin: Origin,
193
+ remote_origin: Origin,
194
+ proxy_auth: tuple[bytes, bytes] | None = None,
195
+ ssl_context: ssl.SSLContext | None = None,
196
+ keepalive_expiry: float | None = None,
197
+ http1: bool = True,
198
+ http2: bool = False,
199
+ network_backend: AsyncNetworkBackend | None = None,
200
+ ) -> None:
201
+ self._proxy_origin = proxy_origin
202
+ self._remote_origin = remote_origin
203
+ self._proxy_auth = proxy_auth
204
+ self._ssl_context = ssl_context
205
+ self._keepalive_expiry = keepalive_expiry
206
+ self._http1 = http1
207
+ self._http2 = http2
208
+
209
+ self._network_backend: AsyncNetworkBackend = (
210
+ AutoBackend() if network_backend is None else network_backend
211
+ )
212
+ self._connect_lock = AsyncLock()
213
+ self._connection: AsyncConnectionInterface | None = None
214
+ self._connect_failed = False
215
+
216
+ async def handle_async_request(self, request: Request) -> Response:
217
+ timeouts = request.extensions.get("timeout", {})
218
+ sni_hostname = request.extensions.get("sni_hostname", None)
219
+ timeout = timeouts.get("connect", None)
220
+
221
+ async with self._connect_lock:
222
+ if self._connection is None:
223
+ try:
224
+ # Connect to the proxy
225
+ kwargs = {
226
+ "host": self._proxy_origin.host.decode("ascii"),
227
+ "port": self._proxy_origin.port,
228
+ "timeout": timeout,
229
+ }
230
+ async with Trace("connect_tcp", logger, request, kwargs) as trace:
231
+ stream = await self._network_backend.connect_tcp(**kwargs)
232
+ trace.return_value = stream
233
+
234
+ # Connect to the remote host using socks5
235
+ kwargs = {
236
+ "stream": stream,
237
+ "host": self._remote_origin.host.decode("ascii"),
238
+ "port": self._remote_origin.port,
239
+ "auth": self._proxy_auth,
240
+ }
241
+ async with Trace(
242
+ "setup_socks5_connection", logger, request, kwargs
243
+ ) as trace:
244
+ await _init_socks5_connection(**kwargs)
245
+ trace.return_value = stream
246
+
247
+ # Upgrade the stream to SSL
248
+ if self._remote_origin.scheme == b"https":
249
+ ssl_context = (
250
+ default_ssl_context()
251
+ if self._ssl_context is None
252
+ else self._ssl_context
253
+ )
254
+ alpn_protocols = (
255
+ ["http/1.1", "h2"] if self._http2 else ["http/1.1"]
256
+ )
257
+ ssl_context.set_alpn_protocols(alpn_protocols)
258
+
259
+ kwargs = {
260
+ "ssl_context": ssl_context,
261
+ "server_hostname": sni_hostname
262
+ or self._remote_origin.host.decode("ascii"),
263
+ "timeout": timeout,
264
+ }
265
+ async with Trace("start_tls", logger, request, kwargs) as trace:
266
+ stream = await stream.start_tls(**kwargs)
267
+ trace.return_value = stream
268
+
269
+ # Determine if we should be using HTTP/1.1 or HTTP/2
270
+ ssl_object = stream.get_extra_info("ssl_object")
271
+ http2_negotiated = (
272
+ ssl_object is not None
273
+ and ssl_object.selected_alpn_protocol() == "h2"
274
+ )
275
+
276
+ # Create the HTTP/1.1 or HTTP/2 connection
277
+ if http2_negotiated or (
278
+ self._http2 and not self._http1
279
+ ): # pragma: nocover
280
+ from .http2 import AsyncHTTP2Connection
281
+
282
+ self._connection = AsyncHTTP2Connection(
283
+ origin=self._remote_origin,
284
+ stream=stream,
285
+ keepalive_expiry=self._keepalive_expiry,
286
+ )
287
+ else:
288
+ self._connection = AsyncHTTP11Connection(
289
+ origin=self._remote_origin,
290
+ stream=stream,
291
+ keepalive_expiry=self._keepalive_expiry,
292
+ )
293
+ except Exception as exc:
294
+ self._connect_failed = True
295
+ raise exc
296
+ elif not self._connection.is_available(): # pragma: nocover
297
+ raise ConnectionNotAvailable()
298
+
299
+ return await self._connection.handle_async_request(request)
300
+
301
+ def can_handle_request(self, origin: Origin) -> bool:
302
+ return origin == self._remote_origin
303
+
304
+ async def aclose(self) -> None:
305
+ if self._connection is not None:
306
+ await self._connection.aclose()
307
+
308
+ def is_available(self) -> bool:
309
+ if self._connection is None: # pragma: nocover
310
+ # If HTTP/2 support is enabled, and the resulting connection could
311
+ # end up as HTTP/2 then we should indicate the connection as being
312
+ # available to service multiple requests.
313
+ return (
314
+ self._http2
315
+ and (self._remote_origin.scheme == b"https" or not self._http1)
316
+ and not self._connect_failed
317
+ )
318
+ return self._connection.is_available()
319
+
320
+ def has_expired(self) -> bool:
321
+ if self._connection is None: # pragma: nocover
322
+ return self._connect_failed
323
+ return self._connection.has_expired()
324
+
325
+ def is_idle(self) -> bool:
326
+ if self._connection is None: # pragma: nocover
327
+ return self._connect_failed
328
+ return self._connection.is_idle()
329
+
330
+ def is_closed(self) -> bool:
331
+ if self._connection is None: # pragma: nocover
332
+ return self._connect_failed
333
+ return self._connection.is_closed()
334
+
335
+ def info(self) -> str:
336
+ if self._connection is None: # pragma: nocover
337
+ return "CONNECTION FAILED" if self._connect_failed else "CONNECTING"
338
+ return self._connection.info()
339
+
340
+ def __repr__(self) -> str:
341
+ return f"<{self.__class__.__name__} [{self.info()}]>"
.venv/lib/python3.11/site-packages/httpcore/_backends/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (191 Bytes). View file
 
.venv/lib/python3.11/site-packages/httpcore/_backends/__pycache__/anyio.cpython-311.pyc ADDED
Binary file (9.56 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/_backends/__pycache__/trio.cpython-311.pyc ADDED
Binary file (10.1 kB). View file
 
.venv/lib/python3.11/site-packages/httpcore/_backends/auto.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import typing
4
+
5
+ from .._synchronization import current_async_library
6
+ from .base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream
7
+
8
+
9
+ class AutoBackend(AsyncNetworkBackend):
10
+ async def _init_backend(self) -> None:
11
+ if not (hasattr(self, "_backend")):
12
+ backend = current_async_library()
13
+ if backend == "trio":
14
+ from .trio import TrioBackend
15
+
16
+ self._backend: AsyncNetworkBackend = TrioBackend()
17
+ else:
18
+ from .anyio import AnyIOBackend
19
+
20
+ self._backend = AnyIOBackend()
21
+
22
+ async def connect_tcp(
23
+ self,
24
+ host: str,
25
+ port: int,
26
+ timeout: float | None = None,
27
+ local_address: str | None = None,
28
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
29
+ ) -> AsyncNetworkStream:
30
+ await self._init_backend()
31
+ return await self._backend.connect_tcp(
32
+ host,
33
+ port,
34
+ timeout=timeout,
35
+ local_address=local_address,
36
+ socket_options=socket_options,
37
+ )
38
+
39
+ async def connect_unix_socket(
40
+ self,
41
+ path: str,
42
+ timeout: float | None = None,
43
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
44
+ ) -> AsyncNetworkStream: # pragma: nocover
45
+ await self._init_backend()
46
+ return await self._backend.connect_unix_socket(
47
+ path, timeout=timeout, socket_options=socket_options
48
+ )
49
+
50
+ async def sleep(self, seconds: float) -> None: # pragma: nocover
51
+ await self._init_backend()
52
+ return await self._backend.sleep(seconds)
.venv/lib/python3.11/site-packages/httpcore/_backends/mock.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import ssl
4
+ import typing
5
+
6
+ from .._exceptions import ReadError
7
+ from .base import (
8
+ SOCKET_OPTION,
9
+ AsyncNetworkBackend,
10
+ AsyncNetworkStream,
11
+ NetworkBackend,
12
+ NetworkStream,
13
+ )
14
+
15
+
16
+ class MockSSLObject:
17
+ def __init__(self, http2: bool):
18
+ self._http2 = http2
19
+
20
+ def selected_alpn_protocol(self) -> str:
21
+ return "h2" if self._http2 else "http/1.1"
22
+
23
+
24
+ class MockStream(NetworkStream):
25
+ def __init__(self, buffer: list[bytes], http2: bool = False) -> None:
26
+ self._buffer = buffer
27
+ self._http2 = http2
28
+ self._closed = False
29
+
30
+ def read(self, max_bytes: int, timeout: float | None = None) -> bytes:
31
+ if self._closed:
32
+ raise ReadError("Connection closed")
33
+ if not self._buffer:
34
+ return b""
35
+ return self._buffer.pop(0)
36
+
37
+ def write(self, buffer: bytes, timeout: float | None = None) -> None:
38
+ pass
39
+
40
+ def close(self) -> None:
41
+ self._closed = True
42
+
43
+ def start_tls(
44
+ self,
45
+ ssl_context: ssl.SSLContext,
46
+ server_hostname: str | None = None,
47
+ timeout: float | None = None,
48
+ ) -> NetworkStream:
49
+ return self
50
+
51
+ def get_extra_info(self, info: str) -> typing.Any:
52
+ return MockSSLObject(http2=self._http2) if info == "ssl_object" else None
53
+
54
+ def __repr__(self) -> str:
55
+ return "<httpcore.MockStream>"
56
+
57
+
58
+ class MockBackend(NetworkBackend):
59
+ def __init__(self, buffer: list[bytes], http2: bool = False) -> None:
60
+ self._buffer = buffer
61
+ self._http2 = http2
62
+
63
+ def connect_tcp(
64
+ self,
65
+ host: str,
66
+ port: int,
67
+ timeout: float | None = None,
68
+ local_address: str | None = None,
69
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
70
+ ) -> NetworkStream:
71
+ return MockStream(list(self._buffer), http2=self._http2)
72
+
73
+ def connect_unix_socket(
74
+ self,
75
+ path: str,
76
+ timeout: float | None = None,
77
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
78
+ ) -> NetworkStream:
79
+ return MockStream(list(self._buffer), http2=self._http2)
80
+
81
+ def sleep(self, seconds: float) -> None:
82
+ pass
83
+
84
+
85
+ class AsyncMockStream(AsyncNetworkStream):
86
+ def __init__(self, buffer: list[bytes], http2: bool = False) -> None:
87
+ self._buffer = buffer
88
+ self._http2 = http2
89
+ self._closed = False
90
+
91
+ async def read(self, max_bytes: int, timeout: float | None = None) -> bytes:
92
+ if self._closed:
93
+ raise ReadError("Connection closed")
94
+ if not self._buffer:
95
+ return b""
96
+ return self._buffer.pop(0)
97
+
98
+ async def write(self, buffer: bytes, timeout: float | None = None) -> None:
99
+ pass
100
+
101
+ async def aclose(self) -> None:
102
+ self._closed = True
103
+
104
+ async def start_tls(
105
+ self,
106
+ ssl_context: ssl.SSLContext,
107
+ server_hostname: str | None = None,
108
+ timeout: float | None = None,
109
+ ) -> AsyncNetworkStream:
110
+ return self
111
+
112
+ def get_extra_info(self, info: str) -> typing.Any:
113
+ return MockSSLObject(http2=self._http2) if info == "ssl_object" else None
114
+
115
+ def __repr__(self) -> str:
116
+ return "<httpcore.AsyncMockStream>"
117
+
118
+
119
+ class AsyncMockBackend(AsyncNetworkBackend):
120
+ def __init__(self, buffer: list[bytes], http2: bool = False) -> None:
121
+ self._buffer = buffer
122
+ self._http2 = http2
123
+
124
+ async def connect_tcp(
125
+ self,
126
+ host: str,
127
+ port: int,
128
+ timeout: float | None = None,
129
+ local_address: str | None = None,
130
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
131
+ ) -> AsyncNetworkStream:
132
+ return AsyncMockStream(list(self._buffer), http2=self._http2)
133
+
134
+ async def connect_unix_socket(
135
+ self,
136
+ path: str,
137
+ timeout: float | None = None,
138
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
139
+ ) -> AsyncNetworkStream:
140
+ return AsyncMockStream(list(self._buffer), http2=self._http2)
141
+
142
+ async def sleep(self, seconds: float) -> None:
143
+ pass
.venv/lib/python3.11/site-packages/httpcore/_backends/sync.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import functools
4
+ import socket
5
+ import ssl
6
+ import sys
7
+ import typing
8
+
9
+ from .._exceptions import (
10
+ ConnectError,
11
+ ConnectTimeout,
12
+ ExceptionMapping,
13
+ ReadError,
14
+ ReadTimeout,
15
+ WriteError,
16
+ WriteTimeout,
17
+ map_exceptions,
18
+ )
19
+ from .._utils import is_socket_readable
20
+ from .base import SOCKET_OPTION, NetworkBackend, NetworkStream
21
+
22
+
23
+ class TLSinTLSStream(NetworkStream): # pragma: no cover
24
+ """
25
+ Because the standard `SSLContext.wrap_socket` method does
26
+ not work for `SSLSocket` objects, we need this class
27
+ to implement TLS stream using an underlying `SSLObject`
28
+ instance in order to support TLS on top of TLS.
29
+ """
30
+
31
+ # Defined in RFC 8449
32
+ TLS_RECORD_SIZE = 16384
33
+
34
+ def __init__(
35
+ self,
36
+ sock: socket.socket,
37
+ ssl_context: ssl.SSLContext,
38
+ server_hostname: str | None = None,
39
+ timeout: float | None = None,
40
+ ):
41
+ self._sock = sock
42
+ self._incoming = ssl.MemoryBIO()
43
+ self._outgoing = ssl.MemoryBIO()
44
+
45
+ self.ssl_obj = ssl_context.wrap_bio(
46
+ incoming=self._incoming,
47
+ outgoing=self._outgoing,
48
+ server_hostname=server_hostname,
49
+ )
50
+
51
+ self._sock.settimeout(timeout)
52
+ self._perform_io(self.ssl_obj.do_handshake)
53
+
54
+ def _perform_io(
55
+ self,
56
+ func: typing.Callable[..., typing.Any],
57
+ ) -> typing.Any:
58
+ ret = None
59
+
60
+ while True:
61
+ errno = None
62
+ try:
63
+ ret = func()
64
+ except (ssl.SSLWantReadError, ssl.SSLWantWriteError) as e:
65
+ errno = e.errno
66
+
67
+ self._sock.sendall(self._outgoing.read())
68
+
69
+ if errno == ssl.SSL_ERROR_WANT_READ:
70
+ buf = self._sock.recv(self.TLS_RECORD_SIZE)
71
+
72
+ if buf:
73
+ self._incoming.write(buf)
74
+ else:
75
+ self._incoming.write_eof()
76
+ if errno is None:
77
+ return ret
78
+
79
+ def read(self, max_bytes: int, timeout: float | None = None) -> bytes:
80
+ exc_map: ExceptionMapping = {socket.timeout: ReadTimeout, OSError: ReadError}
81
+ with map_exceptions(exc_map):
82
+ self._sock.settimeout(timeout)
83
+ return typing.cast(
84
+ bytes, self._perform_io(functools.partial(self.ssl_obj.read, max_bytes))
85
+ )
86
+
87
+ def write(self, buffer: bytes, timeout: float | None = None) -> None:
88
+ exc_map: ExceptionMapping = {socket.timeout: WriteTimeout, OSError: WriteError}
89
+ with map_exceptions(exc_map):
90
+ self._sock.settimeout(timeout)
91
+ while buffer:
92
+ nsent = self._perform_io(functools.partial(self.ssl_obj.write, buffer))
93
+ buffer = buffer[nsent:]
94
+
95
+ def close(self) -> None:
96
+ self._sock.close()
97
+
98
+ def start_tls(
99
+ self,
100
+ ssl_context: ssl.SSLContext,
101
+ server_hostname: str | None = None,
102
+ timeout: float | None = None,
103
+ ) -> NetworkStream:
104
+ raise NotImplementedError()
105
+
106
+ def get_extra_info(self, info: str) -> typing.Any:
107
+ if info == "ssl_object":
108
+ return self.ssl_obj
109
+ if info == "client_addr":
110
+ return self._sock.getsockname()
111
+ if info == "server_addr":
112
+ return self._sock.getpeername()
113
+ if info == "socket":
114
+ return self._sock
115
+ if info == "is_readable":
116
+ return is_socket_readable(self._sock)
117
+ return None
118
+
119
+
120
+ class SyncStream(NetworkStream):
121
+ def __init__(self, sock: socket.socket) -> None:
122
+ self._sock = sock
123
+
124
+ def read(self, max_bytes: int, timeout: float | None = None) -> bytes:
125
+ exc_map: ExceptionMapping = {socket.timeout: ReadTimeout, OSError: ReadError}
126
+ with map_exceptions(exc_map):
127
+ self._sock.settimeout(timeout)
128
+ return self._sock.recv(max_bytes)
129
+
130
+ def write(self, buffer: bytes, timeout: float | None = None) -> None:
131
+ if not buffer:
132
+ return
133
+
134
+ exc_map: ExceptionMapping = {socket.timeout: WriteTimeout, OSError: WriteError}
135
+ with map_exceptions(exc_map):
136
+ while buffer:
137
+ self._sock.settimeout(timeout)
138
+ n = self._sock.send(buffer)
139
+ buffer = buffer[n:]
140
+
141
+ def close(self) -> None:
142
+ self._sock.close()
143
+
144
+ def start_tls(
145
+ self,
146
+ ssl_context: ssl.SSLContext,
147
+ server_hostname: str | None = None,
148
+ timeout: float | None = None,
149
+ ) -> NetworkStream:
150
+ exc_map: ExceptionMapping = {
151
+ socket.timeout: ConnectTimeout,
152
+ OSError: ConnectError,
153
+ }
154
+ with map_exceptions(exc_map):
155
+ try:
156
+ if isinstance(self._sock, ssl.SSLSocket): # pragma: no cover
157
+ # If the underlying socket has already been upgraded
158
+ # to the TLS layer (i.e. is an instance of SSLSocket),
159
+ # we need some additional smarts to support TLS-in-TLS.
160
+ return TLSinTLSStream(
161
+ self._sock, ssl_context, server_hostname, timeout
162
+ )
163
+ else:
164
+ self._sock.settimeout(timeout)
165
+ sock = ssl_context.wrap_socket(
166
+ self._sock, server_hostname=server_hostname
167
+ )
168
+ except Exception as exc: # pragma: nocover
169
+ self.close()
170
+ raise exc
171
+ return SyncStream(sock)
172
+
173
+ def get_extra_info(self, info: str) -> typing.Any:
174
+ if info == "ssl_object" and isinstance(self._sock, ssl.SSLSocket):
175
+ return self._sock._sslobj # type: ignore
176
+ if info == "client_addr":
177
+ return self._sock.getsockname()
178
+ if info == "server_addr":
179
+ return self._sock.getpeername()
180
+ if info == "socket":
181
+ return self._sock
182
+ if info == "is_readable":
183
+ return is_socket_readable(self._sock)
184
+ return None
185
+
186
+
187
+ class SyncBackend(NetworkBackend):
188
+ def connect_tcp(
189
+ self,
190
+ host: str,
191
+ port: int,
192
+ timeout: float | None = None,
193
+ local_address: str | None = None,
194
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
195
+ ) -> NetworkStream:
196
+ # Note that we automatically include `TCP_NODELAY`
197
+ # in addition to any other custom socket options.
198
+ if socket_options is None:
199
+ socket_options = [] # pragma: no cover
200
+ address = (host, port)
201
+ source_address = None if local_address is None else (local_address, 0)
202
+ exc_map: ExceptionMapping = {
203
+ socket.timeout: ConnectTimeout,
204
+ OSError: ConnectError,
205
+ }
206
+
207
+ with map_exceptions(exc_map):
208
+ sock = socket.create_connection(
209
+ address,
210
+ timeout,
211
+ source_address=source_address,
212
+ )
213
+ for option in socket_options:
214
+ sock.setsockopt(*option) # pragma: no cover
215
+ sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
216
+ return SyncStream(sock)
217
+
218
+ def connect_unix_socket(
219
+ self,
220
+ path: str,
221
+ timeout: float | None = None,
222
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
223
+ ) -> NetworkStream: # pragma: nocover
224
+ if sys.platform == "win32":
225
+ raise RuntimeError(
226
+ "Attempted to connect to a UNIX socket on a Windows system."
227
+ )
228
+ if socket_options is None:
229
+ socket_options = []
230
+
231
+ exc_map: ExceptionMapping = {
232
+ socket.timeout: ConnectTimeout,
233
+ OSError: ConnectError,
234
+ }
235
+ with map_exceptions(exc_map):
236
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
237
+ for option in socket_options:
238
+ sock.setsockopt(*option)
239
+ sock.settimeout(timeout)
240
+ sock.connect(path)
241
+ return SyncStream(sock)
.venv/lib/python3.11/site-packages/httpcore/_backends/trio.py ADDED
@@ -0,0 +1,159 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import ssl
4
+ import typing
5
+
6
+ import trio
7
+
8
+ from .._exceptions import (
9
+ ConnectError,
10
+ ConnectTimeout,
11
+ ExceptionMapping,
12
+ ReadError,
13
+ ReadTimeout,
14
+ WriteError,
15
+ WriteTimeout,
16
+ map_exceptions,
17
+ )
18
+ from .base import SOCKET_OPTION, AsyncNetworkBackend, AsyncNetworkStream
19
+
20
+
21
+ class TrioStream(AsyncNetworkStream):
22
+ def __init__(self, stream: trio.abc.Stream) -> None:
23
+ self._stream = stream
24
+
25
+ async def read(self, max_bytes: int, timeout: float | None = None) -> bytes:
26
+ timeout_or_inf = float("inf") if timeout is None else timeout
27
+ exc_map: ExceptionMapping = {
28
+ trio.TooSlowError: ReadTimeout,
29
+ trio.BrokenResourceError: ReadError,
30
+ trio.ClosedResourceError: ReadError,
31
+ }
32
+ with map_exceptions(exc_map):
33
+ with trio.fail_after(timeout_or_inf):
34
+ data: bytes = await self._stream.receive_some(max_bytes=max_bytes)
35
+ return data
36
+
37
+ async def write(self, buffer: bytes, timeout: float | None = None) -> None:
38
+ if not buffer:
39
+ return
40
+
41
+ timeout_or_inf = float("inf") if timeout is None else timeout
42
+ exc_map: ExceptionMapping = {
43
+ trio.TooSlowError: WriteTimeout,
44
+ trio.BrokenResourceError: WriteError,
45
+ trio.ClosedResourceError: WriteError,
46
+ }
47
+ with map_exceptions(exc_map):
48
+ with trio.fail_after(timeout_or_inf):
49
+ await self._stream.send_all(data=buffer)
50
+
51
+ async def aclose(self) -> None:
52
+ await self._stream.aclose()
53
+
54
+ async def start_tls(
55
+ self,
56
+ ssl_context: ssl.SSLContext,
57
+ server_hostname: str | None = None,
58
+ timeout: float | None = None,
59
+ ) -> AsyncNetworkStream:
60
+ timeout_or_inf = float("inf") if timeout is None else timeout
61
+ exc_map: ExceptionMapping = {
62
+ trio.TooSlowError: ConnectTimeout,
63
+ trio.BrokenResourceError: ConnectError,
64
+ }
65
+ ssl_stream = trio.SSLStream(
66
+ self._stream,
67
+ ssl_context=ssl_context,
68
+ server_hostname=server_hostname,
69
+ https_compatible=True,
70
+ server_side=False,
71
+ )
72
+ with map_exceptions(exc_map):
73
+ try:
74
+ with trio.fail_after(timeout_or_inf):
75
+ await ssl_stream.do_handshake()
76
+ except Exception as exc: # pragma: nocover
77
+ await self.aclose()
78
+ raise exc
79
+ return TrioStream(ssl_stream)
80
+
81
+ def get_extra_info(self, info: str) -> typing.Any:
82
+ if info == "ssl_object" and isinstance(self._stream, trio.SSLStream):
83
+ # Type checkers cannot see `_ssl_object` attribute because trio._ssl.SSLStream uses __getattr__/__setattr__.
84
+ # Tracked at https://github.com/python-trio/trio/issues/542
85
+ return self._stream._ssl_object # type: ignore[attr-defined]
86
+ if info == "client_addr":
87
+ return self._get_socket_stream().socket.getsockname()
88
+ if info == "server_addr":
89
+ return self._get_socket_stream().socket.getpeername()
90
+ if info == "socket":
91
+ stream = self._stream
92
+ while isinstance(stream, trio.SSLStream):
93
+ stream = stream.transport_stream
94
+ assert isinstance(stream, trio.SocketStream)
95
+ return stream.socket
96
+ if info == "is_readable":
97
+ socket = self.get_extra_info("socket")
98
+ return socket.is_readable()
99
+ return None
100
+
101
+ def _get_socket_stream(self) -> trio.SocketStream:
102
+ stream = self._stream
103
+ while isinstance(stream, trio.SSLStream):
104
+ stream = stream.transport_stream
105
+ assert isinstance(stream, trio.SocketStream)
106
+ return stream
107
+
108
+
109
+ class TrioBackend(AsyncNetworkBackend):
110
+ async def connect_tcp(
111
+ self,
112
+ host: str,
113
+ port: int,
114
+ timeout: float | None = None,
115
+ local_address: str | None = None,
116
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
117
+ ) -> AsyncNetworkStream:
118
+ # By default for TCP sockets, trio enables TCP_NODELAY.
119
+ # https://trio.readthedocs.io/en/stable/reference-io.html#trio.SocketStream
120
+ if socket_options is None:
121
+ socket_options = [] # pragma: no cover
122
+ timeout_or_inf = float("inf") if timeout is None else timeout
123
+ exc_map: ExceptionMapping = {
124
+ trio.TooSlowError: ConnectTimeout,
125
+ trio.BrokenResourceError: ConnectError,
126
+ OSError: ConnectError,
127
+ }
128
+ with map_exceptions(exc_map):
129
+ with trio.fail_after(timeout_or_inf):
130
+ stream: trio.abc.Stream = await trio.open_tcp_stream(
131
+ host=host, port=port, local_address=local_address
132
+ )
133
+ for option in socket_options:
134
+ stream.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover
135
+ return TrioStream(stream)
136
+
137
+ async def connect_unix_socket(
138
+ self,
139
+ path: str,
140
+ timeout: float | None = None,
141
+ socket_options: typing.Iterable[SOCKET_OPTION] | None = None,
142
+ ) -> AsyncNetworkStream: # pragma: nocover
143
+ if socket_options is None:
144
+ socket_options = []
145
+ timeout_or_inf = float("inf") if timeout is None else timeout
146
+ exc_map: ExceptionMapping = {
147
+ trio.TooSlowError: ConnectTimeout,
148
+ trio.BrokenResourceError: ConnectError,
149
+ OSError: ConnectError,
150
+ }
151
+ with map_exceptions(exc_map):
152
+ with trio.fail_after(timeout_or_inf):
153
+ stream: trio.abc.Stream = await trio.open_unix_socket(path)
154
+ for option in socket_options:
155
+ stream.setsockopt(*option) # type: ignore[attr-defined] # pragma: no cover
156
+ return TrioStream(stream)
157
+
158
+ async def sleep(self, seconds: float) -> None:
159
+ await trio.sleep(seconds) # pragma: nocover
.venv/lib/python3.11/site-packages/httpcore/_exceptions.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import contextlib
2
+ import typing
3
+
4
+ ExceptionMapping = typing.Mapping[typing.Type[Exception], typing.Type[Exception]]
5
+
6
+
7
+ @contextlib.contextmanager
8
+ def map_exceptions(map: ExceptionMapping) -> typing.Iterator[None]:
9
+ try:
10
+ yield
11
+ except Exception as exc: # noqa: PIE786
12
+ for from_exc, to_exc in map.items():
13
+ if isinstance(exc, from_exc):
14
+ raise to_exc(exc) from exc
15
+ raise # pragma: nocover
16
+
17
+
18
+ class ConnectionNotAvailable(Exception):
19
+ pass
20
+
21
+
22
+ class ProxyError(Exception):
23
+ pass
24
+
25
+
26
+ class UnsupportedProtocol(Exception):
27
+ pass
28
+
29
+
30
+ class ProtocolError(Exception):
31
+ pass
32
+
33
+
34
+ class RemoteProtocolError(ProtocolError):
35
+ pass
36
+
37
+
38
+ class LocalProtocolError(ProtocolError):
39
+ pass
40
+
41
+
42
+ # Timeout errors
43
+
44
+
45
+ class TimeoutException(Exception):
46
+ pass
47
+
48
+
49
+ class PoolTimeout(TimeoutException):
50
+ pass
51
+
52
+
53
+ class ConnectTimeout(TimeoutException):
54
+ pass
55
+
56
+
57
+ class ReadTimeout(TimeoutException):
58
+ pass
59
+
60
+
61
+ class WriteTimeout(TimeoutException):
62
+ pass
63
+
64
+
65
+ # Network errors
66
+
67
+
68
+ class NetworkError(Exception):
69
+ pass
70
+
71
+
72
+ class ConnectError(NetworkError):
73
+ pass
74
+
75
+
76
+ class ReadError(NetworkError):
77
+ pass
78
+
79
+
80
+ class WriteError(NetworkError):
81
+ pass
.venv/lib/python3.11/site-packages/httpcore/_models.py ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import base64
4
+ import ssl
5
+ import typing
6
+ import urllib.parse
7
+
8
+ # Functions for typechecking...
9
+
10
+
11
+ ByteOrStr = typing.Union[bytes, str]
12
+ HeadersAsSequence = typing.Sequence[typing.Tuple[ByteOrStr, ByteOrStr]]
13
+ HeadersAsMapping = typing.Mapping[ByteOrStr, ByteOrStr]
14
+ HeaderTypes = typing.Union[HeadersAsSequence, HeadersAsMapping, None]
15
+
16
+ Extensions = typing.MutableMapping[str, typing.Any]
17
+
18
+
19
+ def enforce_bytes(value: bytes | str, *, name: str) -> bytes:
20
+ """
21
+ Any arguments that are ultimately represented as bytes can be specified
22
+ either as bytes or as strings.
23
+
24
+ However we enforce that any string arguments must only contain characters in
25
+ the plain ASCII range. chr(0)...chr(127). If you need to use characters
26
+ outside that range then be precise, and use a byte-wise argument.
27
+ """
28
+ if isinstance(value, str):
29
+ try:
30
+ return value.encode("ascii")
31
+ except UnicodeEncodeError:
32
+ raise TypeError(f"{name} strings may not include unicode characters.")
33
+ elif isinstance(value, bytes):
34
+ return value
35
+
36
+ seen_type = type(value).__name__
37
+ raise TypeError(f"{name} must be bytes or str, but got {seen_type}.")
38
+
39
+
40
+ def enforce_url(value: URL | bytes | str, *, name: str) -> URL:
41
+ """
42
+ Type check for URL parameters.
43
+ """
44
+ if isinstance(value, (bytes, str)):
45
+ return URL(value)
46
+ elif isinstance(value, URL):
47
+ return value
48
+
49
+ seen_type = type(value).__name__
50
+ raise TypeError(f"{name} must be a URL, bytes, or str, but got {seen_type}.")
51
+
52
+
53
+ def enforce_headers(
54
+ value: HeadersAsMapping | HeadersAsSequence | None = None, *, name: str
55
+ ) -> list[tuple[bytes, bytes]]:
56
+ """
57
+ Convienence function that ensure all items in request or response headers
58
+ are either bytes or strings in the plain ASCII range.
59
+ """
60
+ if value is None:
61
+ return []
62
+ elif isinstance(value, typing.Mapping):
63
+ return [
64
+ (
65
+ enforce_bytes(k, name="header name"),
66
+ enforce_bytes(v, name="header value"),
67
+ )
68
+ for k, v in value.items()
69
+ ]
70
+ elif isinstance(value, typing.Sequence):
71
+ return [
72
+ (
73
+ enforce_bytes(k, name="header name"),
74
+ enforce_bytes(v, name="header value"),
75
+ )
76
+ for k, v in value
77
+ ]
78
+
79
+ seen_type = type(value).__name__
80
+ raise TypeError(
81
+ f"{name} must be a mapping or sequence of two-tuples, but got {seen_type}."
82
+ )
83
+
84
+
85
+ def enforce_stream(
86
+ value: bytes | typing.Iterable[bytes] | typing.AsyncIterable[bytes] | None,
87
+ *,
88
+ name: str,
89
+ ) -> typing.Iterable[bytes] | typing.AsyncIterable[bytes]:
90
+ if value is None:
91
+ return ByteStream(b"")
92
+ elif isinstance(value, bytes):
93
+ return ByteStream(value)
94
+ return value
95
+
96
+
97
+ # * https://tools.ietf.org/html/rfc3986#section-3.2.3
98
+ # * https://url.spec.whatwg.org/#url-miscellaneous
99
+ # * https://url.spec.whatwg.org/#scheme-state
100
+ DEFAULT_PORTS = {
101
+ b"ftp": 21,
102
+ b"http": 80,
103
+ b"https": 443,
104
+ b"ws": 80,
105
+ b"wss": 443,
106
+ }
107
+
108
+
109
+ def include_request_headers(
110
+ headers: list[tuple[bytes, bytes]],
111
+ *,
112
+ url: "URL",
113
+ content: None | bytes | typing.Iterable[bytes] | typing.AsyncIterable[bytes],
114
+ ) -> list[tuple[bytes, bytes]]:
115
+ headers_set = set(k.lower() for k, v in headers)
116
+
117
+ if b"host" not in headers_set:
118
+ default_port = DEFAULT_PORTS.get(url.scheme)
119
+ if url.port is None or url.port == default_port:
120
+ header_value = url.host
121
+ else:
122
+ header_value = b"%b:%d" % (url.host, url.port)
123
+ headers = [(b"Host", header_value)] + headers
124
+
125
+ if (
126
+ content is not None
127
+ and b"content-length" not in headers_set
128
+ and b"transfer-encoding" not in headers_set
129
+ ):
130
+ if isinstance(content, bytes):
131
+ content_length = str(len(content)).encode("ascii")
132
+ headers += [(b"Content-Length", content_length)]
133
+ else:
134
+ headers += [(b"Transfer-Encoding", b"chunked")] # pragma: nocover
135
+
136
+ return headers
137
+
138
+
139
+ # Interfaces for byte streams...
140
+
141
+
142
+ class ByteStream:
143
+ """
144
+ A container for non-streaming content, and that supports both sync and async
145
+ stream iteration.
146
+ """
147
+
148
+ def __init__(self, content: bytes) -> None:
149
+ self._content = content
150
+
151
+ def __iter__(self) -> typing.Iterator[bytes]:
152
+ yield self._content
153
+
154
+ async def __aiter__(self) -> typing.AsyncIterator[bytes]:
155
+ yield self._content
156
+
157
+ def __repr__(self) -> str:
158
+ return f"<{self.__class__.__name__} [{len(self._content)} bytes]>"
159
+
160
+
161
+ class Origin:
162
+ def __init__(self, scheme: bytes, host: bytes, port: int) -> None:
163
+ self.scheme = scheme
164
+ self.host = host
165
+ self.port = port
166
+
167
+ def __eq__(self, other: typing.Any) -> bool:
168
+ return (
169
+ isinstance(other, Origin)
170
+ and self.scheme == other.scheme
171
+ and self.host == other.host
172
+ and self.port == other.port
173
+ )
174
+
175
+ def __str__(self) -> str:
176
+ scheme = self.scheme.decode("ascii")
177
+ host = self.host.decode("ascii")
178
+ port = str(self.port)
179
+ return f"{scheme}://{host}:{port}"
180
+
181
+
182
+ class URL:
183
+ """
184
+ Represents the URL against which an HTTP request may be made.
185
+
186
+ The URL may either be specified as a plain string, for convienence:
187
+
188
+ ```python
189
+ url = httpcore.URL("https://www.example.com/")
190
+ ```
191
+
192
+ Or be constructed with explicitily pre-parsed components:
193
+
194
+ ```python
195
+ url = httpcore.URL(scheme=b'https', host=b'www.example.com', port=None, target=b'/')
196
+ ```
197
+
198
+ Using this second more explicit style allows integrations that are using
199
+ `httpcore` to pass through URLs that have already been parsed in order to use
200
+ libraries such as `rfc-3986` rather than relying on the stdlib. It also ensures
201
+ that URL parsing is treated identically at both the networking level and at any
202
+ higher layers of abstraction.
203
+
204
+ The four components are important here, as they allow the URL to be precisely
205
+ specified in a pre-parsed format. They also allow certain types of request to
206
+ be created that could not otherwise be expressed.
207
+
208
+ For example, an HTTP request to `http://www.example.com/` forwarded via a proxy
209
+ at `http://localhost:8080`...
210
+
211
+ ```python
212
+ # Constructs an HTTP request with a complete URL as the target:
213
+ # GET https://www.example.com/ HTTP/1.1
214
+ url = httpcore.URL(
215
+ scheme=b'http',
216
+ host=b'localhost',
217
+ port=8080,
218
+ target=b'https://www.example.com/'
219
+ )
220
+ request = httpcore.Request(
221
+ method="GET",
222
+ url=url
223
+ )
224
+ ```
225
+
226
+ Another example is constructing an `OPTIONS *` request...
227
+
228
+ ```python
229
+ # Constructs an 'OPTIONS *' HTTP request:
230
+ # OPTIONS * HTTP/1.1
231
+ url = httpcore.URL(scheme=b'https', host=b'www.example.com', target=b'*')
232
+ request = httpcore.Request(method="OPTIONS", url=url)
233
+ ```
234
+
235
+ This kind of request is not possible to formulate with a URL string,
236
+ because the `/` delimiter is always used to demark the target from the
237
+ host/port portion of the URL.
238
+
239
+ For convenience, string-like arguments may be specified either as strings or
240
+ as bytes. However, once a request is being issue over-the-wire, the URL
241
+ components are always ultimately required to be a bytewise representation.
242
+
243
+ In order to avoid any ambiguity over character encodings, when strings are used
244
+ as arguments, they must be strictly limited to the ASCII range `chr(0)`-`chr(127)`.
245
+ If you require a bytewise representation that is outside this range you must
246
+ handle the character encoding directly, and pass a bytes instance.
247
+ """
248
+
249
+ def __init__(
250
+ self,
251
+ url: bytes | str = "",
252
+ *,
253
+ scheme: bytes | str = b"",
254
+ host: bytes | str = b"",
255
+ port: int | None = None,
256
+ target: bytes | str = b"",
257
+ ) -> None:
258
+ """
259
+ Parameters:
260
+ url: The complete URL as a string or bytes.
261
+ scheme: The URL scheme as a string or bytes.
262
+ Typically either `"http"` or `"https"`.
263
+ host: The URL host as a string or bytes. Such as `"www.example.com"`.
264
+ port: The port to connect to. Either an integer or `None`.
265
+ target: The target of the HTTP request. Such as `"/items?search=red"`.
266
+ """
267
+ if url:
268
+ parsed = urllib.parse.urlparse(enforce_bytes(url, name="url"))
269
+ self.scheme = parsed.scheme
270
+ self.host = parsed.hostname or b""
271
+ self.port = parsed.port
272
+ self.target = (parsed.path or b"/") + (
273
+ b"?" + parsed.query if parsed.query else b""
274
+ )
275
+ else:
276
+ self.scheme = enforce_bytes(scheme, name="scheme")
277
+ self.host = enforce_bytes(host, name="host")
278
+ self.port = port
279
+ self.target = enforce_bytes(target, name="target")
280
+
281
+ @property
282
+ def origin(self) -> Origin:
283
+ default_port = {
284
+ b"http": 80,
285
+ b"https": 443,
286
+ b"ws": 80,
287
+ b"wss": 443,
288
+ b"socks5": 1080,
289
+ b"socks5h": 1080,
290
+ }[self.scheme]
291
+ return Origin(
292
+ scheme=self.scheme, host=self.host, port=self.port or default_port
293
+ )
294
+
295
+ def __eq__(self, other: typing.Any) -> bool:
296
+ return (
297
+ isinstance(other, URL)
298
+ and other.scheme == self.scheme
299
+ and other.host == self.host
300
+ and other.port == self.port
301
+ and other.target == self.target
302
+ )
303
+
304
+ def __bytes__(self) -> bytes:
305
+ if self.port is None:
306
+ return b"%b://%b%b" % (self.scheme, self.host, self.target)
307
+ return b"%b://%b:%d%b" % (self.scheme, self.host, self.port, self.target)
308
+
309
+ def __repr__(self) -> str:
310
+ return (
311
+ f"{self.__class__.__name__}(scheme={self.scheme!r}, "
312
+ f"host={self.host!r}, port={self.port!r}, target={self.target!r})"
313
+ )
314
+
315
+
316
+ class Request:
317
+ """
318
+ An HTTP request.
319
+ """
320
+
321
+ def __init__(
322
+ self,
323
+ method: bytes | str,
324
+ url: URL | bytes | str,
325
+ *,
326
+ headers: HeaderTypes = None,
327
+ content: bytes
328
+ | typing.Iterable[bytes]
329
+ | typing.AsyncIterable[bytes]
330
+ | None = None,
331
+ extensions: Extensions | None = None,
332
+ ) -> None:
333
+ """
334
+ Parameters:
335
+ method: The HTTP request method, either as a string or bytes.
336
+ For example: `GET`.
337
+ url: The request URL, either as a `URL` instance, or as a string or bytes.
338
+ For example: `"https://www.example.com".`
339
+ headers: The HTTP request headers.
340
+ content: The content of the request body.
341
+ extensions: A dictionary of optional extra information included on
342
+ the request. Possible keys include `"timeout"`, and `"trace"`.
343
+ """
344
+ self.method: bytes = enforce_bytes(method, name="method")
345
+ self.url: URL = enforce_url(url, name="url")
346
+ self.headers: list[tuple[bytes, bytes]] = enforce_headers(
347
+ headers, name="headers"
348
+ )
349
+ self.stream: typing.Iterable[bytes] | typing.AsyncIterable[bytes] = (
350
+ enforce_stream(content, name="content")
351
+ )
352
+ self.extensions = {} if extensions is None else extensions
353
+
354
+ if "target" in self.extensions:
355
+ self.url = URL(
356
+ scheme=self.url.scheme,
357
+ host=self.url.host,
358
+ port=self.url.port,
359
+ target=self.extensions["target"],
360
+ )
361
+
362
+ def __repr__(self) -> str:
363
+ return f"<{self.__class__.__name__} [{self.method!r}]>"
364
+
365
+
366
+ class Response:
367
+ """
368
+ An HTTP response.
369
+ """
370
+
371
+ def __init__(
372
+ self,
373
+ status: int,
374
+ *,
375
+ headers: HeaderTypes = None,
376
+ content: bytes
377
+ | typing.Iterable[bytes]
378
+ | typing.AsyncIterable[bytes]
379
+ | None = None,
380
+ extensions: Extensions | None = None,
381
+ ) -> None:
382
+ """
383
+ Parameters:
384
+ status: The HTTP status code of the response. For example `200`.
385
+ headers: The HTTP response headers.
386
+ content: The content of the response body.
387
+ extensions: A dictionary of optional extra information included on
388
+ the responseself.Possible keys include `"http_version"`,
389
+ `"reason_phrase"`, and `"network_stream"`.
390
+ """
391
+ self.status: int = status
392
+ self.headers: list[tuple[bytes, bytes]] = enforce_headers(
393
+ headers, name="headers"
394
+ )
395
+ self.stream: typing.Iterable[bytes] | typing.AsyncIterable[bytes] = (
396
+ enforce_stream(content, name="content")
397
+ )
398
+ self.extensions = {} if extensions is None else extensions
399
+
400
+ self._stream_consumed = False
401
+
402
+ @property
403
+ def content(self) -> bytes:
404
+ if not hasattr(self, "_content"):
405
+ if isinstance(self.stream, typing.Iterable):
406
+ raise RuntimeError(
407
+ "Attempted to access 'response.content' on a streaming response. "
408
+ "Call 'response.read()' first."
409
+ )
410
+ else:
411
+ raise RuntimeError(
412
+ "Attempted to access 'response.content' on a streaming response. "
413
+ "Call 'await response.aread()' first."
414
+ )
415
+ return self._content
416
+
417
+ def __repr__(self) -> str:
418
+ return f"<{self.__class__.__name__} [{self.status}]>"
419
+
420
+ # Sync interface...
421
+
422
+ def read(self) -> bytes:
423
+ if not isinstance(self.stream, typing.Iterable): # pragma: nocover
424
+ raise RuntimeError(
425
+ "Attempted to read an asynchronous response using 'response.read()'. "
426
+ "You should use 'await response.aread()' instead."
427
+ )
428
+ if not hasattr(self, "_content"):
429
+ self._content = b"".join([part for part in self.iter_stream()])
430
+ return self._content
431
+
432
+ def iter_stream(self) -> typing.Iterator[bytes]:
433
+ if not isinstance(self.stream, typing.Iterable): # pragma: nocover
434
+ raise RuntimeError(
435
+ "Attempted to stream an asynchronous response using 'for ... in "
436
+ "response.iter_stream()'. "
437
+ "You should use 'async for ... in response.aiter_stream()' instead."
438
+ )
439
+ if self._stream_consumed:
440
+ raise RuntimeError(
441
+ "Attempted to call 'for ... in response.iter_stream()' more than once."
442
+ )
443
+ self._stream_consumed = True
444
+ for chunk in self.stream:
445
+ yield chunk
446
+
447
+ def close(self) -> None:
448
+ if not isinstance(self.stream, typing.Iterable): # pragma: nocover
449
+ raise RuntimeError(
450
+ "Attempted to close an asynchronous response using 'response.close()'. "
451
+ "You should use 'await response.aclose()' instead."
452
+ )
453
+ if hasattr(self.stream, "close"):
454
+ self.stream.close()
455
+
456
+ # Async interface...
457
+
458
+ async def aread(self) -> bytes:
459
+ if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover
460
+ raise RuntimeError(
461
+ "Attempted to read an synchronous response using "
462
+ "'await response.aread()'. "
463
+ "You should use 'response.read()' instead."
464
+ )
465
+ if not hasattr(self, "_content"):
466
+ self._content = b"".join([part async for part in self.aiter_stream()])
467
+ return self._content
468
+
469
+ async def aiter_stream(self) -> typing.AsyncIterator[bytes]:
470
+ if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover
471
+ raise RuntimeError(
472
+ "Attempted to stream an synchronous response using 'async for ... in "
473
+ "response.aiter_stream()'. "
474
+ "You should use 'for ... in response.iter_stream()' instead."
475
+ )
476
+ if self._stream_consumed:
477
+ raise RuntimeError(
478
+ "Attempted to call 'async for ... in response.aiter_stream()' "
479
+ "more than once."
480
+ )
481
+ self._stream_consumed = True
482
+ async for chunk in self.stream:
483
+ yield chunk
484
+
485
+ async def aclose(self) -> None:
486
+ if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover
487
+ raise RuntimeError(
488
+ "Attempted to close a synchronous response using "
489
+ "'await response.aclose()'. "
490
+ "You should use 'response.close()' instead."
491
+ )
492
+ if hasattr(self.stream, "aclose"):
493
+ await self.stream.aclose()
494
+
495
+
496
+ class Proxy:
497
+ def __init__(
498
+ self,
499
+ url: URL | bytes | str,
500
+ auth: tuple[bytes | str, bytes | str] | None = None,
501
+ headers: HeadersAsMapping | HeadersAsSequence | None = None,
502
+ ssl_context: ssl.SSLContext | None = None,
503
+ ):
504
+ self.url = enforce_url(url, name="url")
505
+ self.headers = enforce_headers(headers, name="headers")
506
+ self.ssl_context = ssl_context
507
+
508
+ if auth is not None:
509
+ username = enforce_bytes(auth[0], name="auth")
510
+ password = enforce_bytes(auth[1], name="auth")
511
+ userpass = username + b":" + password
512
+ authorization = b"Basic " + base64.b64encode(userpass)
513
+ self.auth: tuple[bytes, bytes] | None = (username, password)
514
+ self.headers = [(b"Proxy-Authorization", authorization)] + self.headers
515
+ else:
516
+ self.auth = None
.venv/lib/python3.11/site-packages/httpcore/_sync/__init__.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .connection import HTTPConnection
2
+ from .connection_pool import ConnectionPool
3
+ from .http11 import HTTP11Connection
4
+ from .http_proxy import HTTPProxy
5
+ from .interfaces import ConnectionInterface
6
+
7
+ try:
8
+ from .http2 import HTTP2Connection
9
+ except ImportError: # pragma: nocover
10
+
11
+ class HTTP2Connection: # type: ignore
12
+ def __init__(self, *args, **kwargs) -> None: # type: ignore
13
+ raise RuntimeError(
14
+ "Attempted to use http2 support, but the `h2` package is not "
15
+ "installed. Use 'pip install httpcore[http2]'."
16
+ )
17
+
18
+
19
+ try:
20
+ from .socks_proxy import SOCKSProxy
21
+ except ImportError: # pragma: nocover
22
+
23
+ class SOCKSProxy: # type: ignore
24
+ def __init__(self, *args, **kwargs) -> None: # type: ignore
25
+ raise RuntimeError(
26
+ "Attempted to use SOCKS support, but the `socksio` package is not "
27
+ "installed. Use 'pip install httpcore[socks]'."
28
+ )
29
+
30
+
31
+ __all__ = [
32
+ "HTTPConnection",
33
+ "ConnectionPool",
34
+ "HTTPProxy",
35
+ "HTTP11Connection",
36
+ "HTTP2Connection",
37
+ "ConnectionInterface",
38
+ "SOCKSProxy",
39
+ ]