Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.5.dist-info/RECORD +14 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/__pycache__/constants.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/__pycache__/environment.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/__pycache__/meta.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx-3.2.1.dist-info/LICENSE.txt +37 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx-3.2.1.dist-info/METADATA +135 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx-3.2.1.dist-info/WHEEL +5 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/INSTALLER +1 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/top_level.txt +1 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/INSTALLER +1 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cudnn_cu11-8.7.0.84.dist-info/INSTALLER +1 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cudnn_cu11-8.7.0.84.dist-info/RECORD +39 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cudnn_cu11-8.7.0.84.dist-info/WHEEL +5 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/AUTHORS.txt +799 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/INSTALLER +1 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/LICENSE.txt +20 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/METADATA +90 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/REQUESTED +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/__init__.py +13 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/__pycache__/__pip-runner__.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/main.py +12 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/__pycache__/check.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/metadata.py +39 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/metadata_editable.py +41 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel_editable.py +46 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel_legacy.py +102 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/self_outdated_check.py +244 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/py.typed +4 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/dispatcher.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/functionalization.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/native.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/python.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/ufunc.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/unboxing.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/autograd.py +853 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/cpp.py +467 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/dispatcher.py +118 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/lazy.py +464 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/meta.py +12 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/native.py +153 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/structured.py +157 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/translate.py +430 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__pycache__/__init__.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__pycache__/types.cpython-311.pyc +0 -0
tuning-competition-baseline/.venv/lib/python3.11/site-packages/MarkupSafe-2.1.5.dist-info/RECORD
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
MarkupSafe-2.1.5.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 2 |
+
MarkupSafe-2.1.5.dist-info/LICENSE.rst,sha256=SJqOEQhQntmKN7uYPhHg9-HTHwvY-Zp5yESOf_N9B-o,1475
|
| 3 |
+
MarkupSafe-2.1.5.dist-info/METADATA,sha256=2dRDPam6OZLfpX0wg1JN5P3u9arqACxVSfdGmsJU7o8,3003
|
| 4 |
+
MarkupSafe-2.1.5.dist-info/RECORD,,
|
| 5 |
+
MarkupSafe-2.1.5.dist-info/WHEEL,sha256=AI1yqBLEPcVKWn5Ls2uPawjbqPXPFTYdQLSdN8WFCJw,152
|
| 6 |
+
MarkupSafe-2.1.5.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
|
| 7 |
+
markupsafe/__init__.py,sha256=r7VOTjUq7EMQ4v3p4R1LoVOGJg6ysfYRncLr34laRBs,10958
|
| 8 |
+
markupsafe/__pycache__/__init__.cpython-311.pyc,,
|
| 9 |
+
markupsafe/__pycache__/_native.cpython-311.pyc,,
|
| 10 |
+
markupsafe/_native.py,sha256=GR86Qvo_GcgKmKreA1WmYN9ud17OFwkww8E-fiW-57s,1713
|
| 11 |
+
markupsafe/_speedups.c,sha256=X2XvQVtIdcK4Usz70BvkzoOfjTCmQlDkkjYSn-swE0g,7083
|
| 12 |
+
markupsafe/_speedups.cpython-311-x86_64-linux-gnu.so,sha256=9PMBIm-zJzHm91NC-mblTC119_dIAldSQ4xFsE1_NPc,53656
|
| 13 |
+
markupsafe/_speedups.pyi,sha256=vfMCsOgbAXRNLUXkyuyonG8uEWKYU4PDqNuMaDELAYw,229
|
| 14 |
+
markupsafe/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/__pycache__/constants.cpython-311.pyc
ADDED
|
Binary file (1.58 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/__pycache__/environment.cpython-311.pyc
ADDED
|
Binary file (80.6 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/jinja2/__pycache__/meta.cpython-311.pyc
ADDED
|
Binary file (5.72 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx-3.2.1.dist-info/LICENSE.txt
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
NetworkX is distributed with the 3-clause BSD license.
|
| 2 |
+
|
| 3 |
+
::
|
| 4 |
+
|
| 5 |
+
Copyright (C) 2004-2023, NetworkX Developers
|
| 6 |
+
Aric Hagberg <hagberg@lanl.gov>
|
| 7 |
+
Dan Schult <dschult@colgate.edu>
|
| 8 |
+
Pieter Swart <swart@lanl.gov>
|
| 9 |
+
All rights reserved.
|
| 10 |
+
|
| 11 |
+
Redistribution and use in source and binary forms, with or without
|
| 12 |
+
modification, are permitted provided that the following conditions are
|
| 13 |
+
met:
|
| 14 |
+
|
| 15 |
+
* Redistributions of source code must retain the above copyright
|
| 16 |
+
notice, this list of conditions and the following disclaimer.
|
| 17 |
+
|
| 18 |
+
* Redistributions in binary form must reproduce the above
|
| 19 |
+
copyright notice, this list of conditions and the following
|
| 20 |
+
disclaimer in the documentation and/or other materials provided
|
| 21 |
+
with the distribution.
|
| 22 |
+
|
| 23 |
+
* Neither the name of the NetworkX Developers nor the names of its
|
| 24 |
+
contributors may be used to endorse or promote products derived
|
| 25 |
+
from this software without specific prior written permission.
|
| 26 |
+
|
| 27 |
+
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
| 28 |
+
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
| 29 |
+
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
| 30 |
+
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
| 31 |
+
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
| 32 |
+
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
| 33 |
+
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
| 34 |
+
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
| 35 |
+
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
| 36 |
+
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
| 37 |
+
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx-3.2.1.dist-info/METADATA
ADDED
|
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: networkx
|
| 3 |
+
Version: 3.2.1
|
| 4 |
+
Summary: Python package for creating and manipulating graphs and networks
|
| 5 |
+
Author-email: Aric Hagberg <hagberg@lanl.gov>
|
| 6 |
+
Maintainer-email: NetworkX Developers <networkx-discuss@googlegroups.com>
|
| 7 |
+
Project-URL: Homepage, https://networkx.org/
|
| 8 |
+
Project-URL: Bug Tracker, https://github.com/networkx/networkx/issues
|
| 9 |
+
Project-URL: Documentation, https://networkx.org/documentation/stable/
|
| 10 |
+
Project-URL: Source Code, https://github.com/networkx/networkx
|
| 11 |
+
Keywords: Networks,Graph Theory,Mathematics,network,graph,discrete mathematics,math
|
| 12 |
+
Platform: Linux
|
| 13 |
+
Platform: Mac OSX
|
| 14 |
+
Platform: Windows
|
| 15 |
+
Platform: Unix
|
| 16 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 17 |
+
Classifier: Intended Audience :: Developers
|
| 18 |
+
Classifier: Intended Audience :: Science/Research
|
| 19 |
+
Classifier: License :: OSI Approved :: BSD License
|
| 20 |
+
Classifier: Operating System :: OS Independent
|
| 21 |
+
Classifier: Programming Language :: Python :: 3
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 24 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 25 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 26 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 27 |
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
| 28 |
+
Classifier: Topic :: Scientific/Engineering :: Bio-Informatics
|
| 29 |
+
Classifier: Topic :: Scientific/Engineering :: Information Analysis
|
| 30 |
+
Classifier: Topic :: Scientific/Engineering :: Mathematics
|
| 31 |
+
Classifier: Topic :: Scientific/Engineering :: Physics
|
| 32 |
+
Requires-Python: >=3.9
|
| 33 |
+
Description-Content-Type: text/x-rst
|
| 34 |
+
License-File: LICENSE.txt
|
| 35 |
+
Provides-Extra: default
|
| 36 |
+
Requires-Dist: numpy >=1.22 ; extra == 'default'
|
| 37 |
+
Requires-Dist: scipy !=1.11.0,!=1.11.1,>=1.9 ; extra == 'default'
|
| 38 |
+
Requires-Dist: matplotlib >=3.5 ; extra == 'default'
|
| 39 |
+
Requires-Dist: pandas >=1.4 ; extra == 'default'
|
| 40 |
+
Provides-Extra: developer
|
| 41 |
+
Requires-Dist: changelist ==0.4 ; extra == 'developer'
|
| 42 |
+
Requires-Dist: pre-commit >=3.2 ; extra == 'developer'
|
| 43 |
+
Requires-Dist: mypy >=1.1 ; extra == 'developer'
|
| 44 |
+
Requires-Dist: rtoml ; extra == 'developer'
|
| 45 |
+
Provides-Extra: doc
|
| 46 |
+
Requires-Dist: sphinx >=7 ; extra == 'doc'
|
| 47 |
+
Requires-Dist: pydata-sphinx-theme >=0.14 ; extra == 'doc'
|
| 48 |
+
Requires-Dist: sphinx-gallery >=0.14 ; extra == 'doc'
|
| 49 |
+
Requires-Dist: numpydoc >=1.6 ; extra == 'doc'
|
| 50 |
+
Requires-Dist: pillow >=9.4 ; extra == 'doc'
|
| 51 |
+
Requires-Dist: nb2plots >=0.7 ; extra == 'doc'
|
| 52 |
+
Requires-Dist: texext >=0.6.7 ; extra == 'doc'
|
| 53 |
+
Requires-Dist: nbconvert <7.9 ; extra == 'doc'
|
| 54 |
+
Provides-Extra: extra
|
| 55 |
+
Requires-Dist: lxml >=4.6 ; extra == 'extra'
|
| 56 |
+
Requires-Dist: pygraphviz >=1.11 ; extra == 'extra'
|
| 57 |
+
Requires-Dist: pydot >=1.4.2 ; extra == 'extra'
|
| 58 |
+
Requires-Dist: sympy >=1.10 ; extra == 'extra'
|
| 59 |
+
Provides-Extra: test
|
| 60 |
+
Requires-Dist: pytest >=7.2 ; extra == 'test'
|
| 61 |
+
Requires-Dist: pytest-cov >=4.0 ; extra == 'test'
|
| 62 |
+
|
| 63 |
+
NetworkX
|
| 64 |
+
========
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
.. image:: https://github.com/networkx/networkx/workflows/test/badge.svg?branch=main
|
| 68 |
+
:target: https://github.com/networkx/networkx/actions?query=workflow%3A%22test%22
|
| 69 |
+
|
| 70 |
+
.. image:: https://codecov.io/gh/networkx/networkx/branch/main/graph/badge.svg
|
| 71 |
+
:target: https://app.codecov.io/gh/networkx/networkx/branch/main
|
| 72 |
+
|
| 73 |
+
.. image:: https://img.shields.io/github/labels/networkx/networkx/Good%20First%20Issue?color=green&label=Contribute%20&style=flat-square
|
| 74 |
+
:target: https://github.com/networkx/networkx/issues?q=is%3Aopen+is%3Aissue+label%3A%22Good+First+Issue%22
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
NetworkX is a Python package for the creation, manipulation,
|
| 78 |
+
and study of the structure, dynamics, and functions
|
| 79 |
+
of complex networks.
|
| 80 |
+
|
| 81 |
+
- **Website (including documentation):** https://networkx.org
|
| 82 |
+
- **Mailing list:** https://groups.google.com/forum/#!forum/networkx-discuss
|
| 83 |
+
- **Source:** https://github.com/networkx/networkx
|
| 84 |
+
- **Bug reports:** https://github.com/networkx/networkx/issues
|
| 85 |
+
- **Report a security vulnerability:** https://tidelift.com/security
|
| 86 |
+
- **Tutorial:** https://networkx.org/documentation/latest/tutorial.html
|
| 87 |
+
- **GitHub Discussions:** https://github.com/networkx/networkx/discussions
|
| 88 |
+
|
| 89 |
+
Simple example
|
| 90 |
+
--------------
|
| 91 |
+
|
| 92 |
+
Find the shortest path between two nodes in an undirected graph:
|
| 93 |
+
|
| 94 |
+
.. code:: pycon
|
| 95 |
+
|
| 96 |
+
>>> import networkx as nx
|
| 97 |
+
>>> G = nx.Graph()
|
| 98 |
+
>>> G.add_edge("A", "B", weight=4)
|
| 99 |
+
>>> G.add_edge("B", "D", weight=2)
|
| 100 |
+
>>> G.add_edge("A", "C", weight=3)
|
| 101 |
+
>>> G.add_edge("C", "D", weight=4)
|
| 102 |
+
>>> nx.shortest_path(G, "A", "D", weight="weight")
|
| 103 |
+
['A', 'B', 'D']
|
| 104 |
+
|
| 105 |
+
Install
|
| 106 |
+
-------
|
| 107 |
+
|
| 108 |
+
Install the latest version of NetworkX::
|
| 109 |
+
|
| 110 |
+
$ pip install networkx
|
| 111 |
+
|
| 112 |
+
Install with all optional dependencies::
|
| 113 |
+
|
| 114 |
+
$ pip install networkx[all]
|
| 115 |
+
|
| 116 |
+
For additional details, please see `INSTALL.rst`.
|
| 117 |
+
|
| 118 |
+
Bugs
|
| 119 |
+
----
|
| 120 |
+
|
| 121 |
+
Please report any bugs that you find `here <https://github.com/networkx/networkx/issues>`_.
|
| 122 |
+
Or, even better, fork the repository on `GitHub <https://github.com/networkx/networkx>`_
|
| 123 |
+
and create a pull request (PR). We welcome all changes, big or small, and we
|
| 124 |
+
will help you make the PR if you are new to `git` (just ask on the issue and/or
|
| 125 |
+
see `CONTRIBUTING.rst`).
|
| 126 |
+
|
| 127 |
+
License
|
| 128 |
+
-------
|
| 129 |
+
|
| 130 |
+
Released under the 3-Clause BSD license (see `LICENSE.txt`)::
|
| 131 |
+
|
| 132 |
+
Copyright (C) 2004-2023 NetworkX Developers
|
| 133 |
+
Aric Hagberg <hagberg@lanl.gov>
|
| 134 |
+
Dan Schult <dschult@colgate.edu>
|
| 135 |
+
Pieter Swart <swart@lanl.gov>
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/networkx-3.2.1.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.41.2)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-any
|
| 5 |
+
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_nvrtc_cu11-11.8.89.dist-info/top_level.txt
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
nvidia
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cuda_runtime_cu11-11.8.89.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cudnn_cu11-8.7.0.84.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cudnn_cu11-8.7.0.84.dist-info/RECORD
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 2 |
+
nvidia/__pycache__/__init__.cpython-311.pyc,,
|
| 3 |
+
nvidia/cudnn/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 4 |
+
nvidia/cudnn/__pycache__/__init__.cpython-311.pyc,,
|
| 5 |
+
nvidia/cudnn/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 6 |
+
nvidia/cudnn/include/__pycache__/__init__.cpython-311.pyc,,
|
| 7 |
+
nvidia/cudnn/include/cudnn.h,sha256=nDGd5AONjdibAgMINJPMvCS1Tzvb3mki4IGWsrtR8p8,2968
|
| 8 |
+
nvidia/cudnn/include/cudnn_adv_infer.h,sha256=CSGiXy0NKDNSl-nkG2h6B8ssepnljQ3PiXJaiwcZ4rU,29025
|
| 9 |
+
nvidia/cudnn/include/cudnn_adv_infer_v8.h,sha256=CSGiXy0NKDNSl-nkG2h6B8ssepnljQ3PiXJaiwcZ4rU,29025
|
| 10 |
+
nvidia/cudnn/include/cudnn_adv_train.h,sha256=I741xrSdMEyJET6CRergT_rtVD-CXCAqAqVnI3wn2DI,27700
|
| 11 |
+
nvidia/cudnn/include/cudnn_adv_train_v8.h,sha256=I741xrSdMEyJET6CRergT_rtVD-CXCAqAqVnI3wn2DI,27700
|
| 12 |
+
nvidia/cudnn/include/cudnn_backend.h,sha256=qNFagb4NdM8S2d1JZFITBVLL59qxiU0_GrnN7eAl5co,24727
|
| 13 |
+
nvidia/cudnn/include/cudnn_backend_v8.h,sha256=qNFagb4NdM8S2d1JZFITBVLL59qxiU0_GrnN7eAl5co,24727
|
| 14 |
+
nvidia/cudnn/include/cudnn_cnn_infer.h,sha256=44Le8oYL6MlNqYPHt-gG3OJtX0pQ39zxVmwQR-mMs_I,29083
|
| 15 |
+
nvidia/cudnn/include/cudnn_cnn_infer_v8.h,sha256=44Le8oYL6MlNqYPHt-gG3OJtX0pQ39zxVmwQR-mMs_I,29083
|
| 16 |
+
nvidia/cudnn/include/cudnn_cnn_train.h,sha256=fMg2JXxX5cC_AuOLwmJvj3dRfI5Gxb-3_G67f-IdepQ,10217
|
| 17 |
+
nvidia/cudnn/include/cudnn_cnn_train_v8.h,sha256=fMg2JXxX5cC_AuOLwmJvj3dRfI5Gxb-3_G67f-IdepQ,10217
|
| 18 |
+
nvidia/cudnn/include/cudnn_ops_infer.h,sha256=_eoAj4VayT1X2xjWRIKDfyfu2X9SYKo206W2KF-5-60,49631
|
| 19 |
+
nvidia/cudnn/include/cudnn_ops_infer_v8.h,sha256=_eoAj4VayT1X2xjWRIKDfyfu2X9SYKo206W2KF-5-60,49631
|
| 20 |
+
nvidia/cudnn/include/cudnn_ops_train.h,sha256=Gxdqcy5CRBIjD0MRaBlNv8LGYb3lbi06ECLzPVTGwmE,25733
|
| 21 |
+
nvidia/cudnn/include/cudnn_ops_train_v8.h,sha256=Gxdqcy5CRBIjD0MRaBlNv8LGYb3lbi06ECLzPVTGwmE,25733
|
| 22 |
+
nvidia/cudnn/include/cudnn_v8.h,sha256=nDGd5AONjdibAgMINJPMvCS1Tzvb3mki4IGWsrtR8p8,2968
|
| 23 |
+
nvidia/cudnn/include/cudnn_version.h,sha256=GDrhJZBHJ9V9raYCLgK20IwPsRbFfWar-e_aUGDz4cQ,3113
|
| 24 |
+
nvidia/cudnn/include/cudnn_version_v8.h,sha256=GDrhJZBHJ9V9raYCLgK20IwPsRbFfWar-e_aUGDz4cQ,3113
|
| 25 |
+
nvidia/cudnn/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
| 26 |
+
nvidia/cudnn/lib/__pycache__/__init__.cpython-311.pyc,,
|
| 27 |
+
nvidia/cudnn/lib/libcudnn.so.8,sha256=Jqcoi3MV1lisqxBz8CxPGM0dJ-6t3hApWPAxfa1mVuA,150200
|
| 28 |
+
nvidia/cudnn/lib/libcudnn_adv_infer.so.8,sha256=m4YscW0p6QtRn9nmz1BmLS5DxujMT-051PHQ9g1ZzGI,130420936
|
| 29 |
+
nvidia/cudnn/lib/libcudnn_adv_train.so.8,sha256=LouBB7ZNSranPOGE0wG3HhTCsbtGnvVzShuqOhAIR9Q,121126456
|
| 30 |
+
nvidia/cudnn/lib/libcudnn_cnn_infer.so.8,sha256=q_6u8ekYhJBJCD9iV7zp6rCiqJ9ty7IvRPwVBcBAmnw,662671696
|
| 31 |
+
nvidia/cudnn/lib/libcudnn_cnn_train.so.8,sha256=gBiVQZ6d4k3klgmf__Pu8UPoLEh83mjE6Rl-lvvqEds,102270568
|
| 32 |
+
nvidia/cudnn/lib/libcudnn_ops_infer.so.8,sha256=bmmzZRK2mZAF3kV3YzadRHQ6gPnrFBbgnE5gKeSrOA0,97560024
|
| 33 |
+
nvidia/cudnn/lib/libcudnn_ops_train.so.8,sha256=VfW-n_Ut7y_5uhQ4uF-37Pv6N9CgmJt0Z4izmu-Bv-c,74719784
|
| 34 |
+
nvidia_cudnn_cu11-8.7.0.84.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
| 35 |
+
nvidia_cudnn_cu11-8.7.0.84.dist-info/License.txt,sha256=Sc95vbNXNLUv5iAwE7O9dZ-B6ZjNMqosZcUduaiMYdI,18174
|
| 36 |
+
nvidia_cudnn_cu11-8.7.0.84.dist-info/METADATA,sha256=O2zEaG1qbOXQi2Bl-n2ZQJxpqcBjMBGxlDqIKisoC6Y,1570
|
| 37 |
+
nvidia_cudnn_cu11-8.7.0.84.dist-info/RECORD,,
|
| 38 |
+
nvidia_cudnn_cu11-8.7.0.84.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
|
| 39 |
+
nvidia_cudnn_cu11-8.7.0.84.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/nvidia_cudnn_cu11-8.7.0.84.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.37.1)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-manylinux1_x86_64
|
| 5 |
+
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/AUTHORS.txt
ADDED
|
@@ -0,0 +1,799 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
@Switch01
|
| 2 |
+
A_Rog
|
| 3 |
+
Aakanksha Agrawal
|
| 4 |
+
Abhinav Sagar
|
| 5 |
+
ABHYUDAY PRATAP SINGH
|
| 6 |
+
abs51295
|
| 7 |
+
AceGentile
|
| 8 |
+
Adam Chainz
|
| 9 |
+
Adam Tse
|
| 10 |
+
Adam Wentz
|
| 11 |
+
admin
|
| 12 |
+
Adolfo Ochagavía
|
| 13 |
+
Adrien Morison
|
| 14 |
+
Agus
|
| 15 |
+
ahayrapetyan
|
| 16 |
+
Ahilya
|
| 17 |
+
AinsworthK
|
| 18 |
+
Akash Srivastava
|
| 19 |
+
Alan Yee
|
| 20 |
+
Albert Tugushev
|
| 21 |
+
Albert-Guan
|
| 22 |
+
albertg
|
| 23 |
+
Alberto Sottile
|
| 24 |
+
Aleks Bunin
|
| 25 |
+
Ales Erjavec
|
| 26 |
+
Alethea Flowers
|
| 27 |
+
Alex Gaynor
|
| 28 |
+
Alex Grönholm
|
| 29 |
+
Alex Hedges
|
| 30 |
+
Alex Loosley
|
| 31 |
+
Alex Morega
|
| 32 |
+
Alex Stachowiak
|
| 33 |
+
Alexander Shtyrov
|
| 34 |
+
Alexandre Conrad
|
| 35 |
+
Alexey Popravka
|
| 36 |
+
Aleš Erjavec
|
| 37 |
+
Alli
|
| 38 |
+
Ami Fischman
|
| 39 |
+
Ananya Maiti
|
| 40 |
+
Anatoly Techtonik
|
| 41 |
+
Anders Kaseorg
|
| 42 |
+
Andre Aguiar
|
| 43 |
+
Andreas Lutro
|
| 44 |
+
Andrei Geacar
|
| 45 |
+
Andrew Gaul
|
| 46 |
+
Andrew Shymanel
|
| 47 |
+
Andrey Bienkowski
|
| 48 |
+
Andrey Bulgakov
|
| 49 |
+
Andrés Delfino
|
| 50 |
+
Andy Freeland
|
| 51 |
+
Andy Kluger
|
| 52 |
+
Ani Hayrapetyan
|
| 53 |
+
Aniruddha Basak
|
| 54 |
+
Anish Tambe
|
| 55 |
+
Anrs Hu
|
| 56 |
+
Anthony Sottile
|
| 57 |
+
Antoine Musso
|
| 58 |
+
Anton Ovchinnikov
|
| 59 |
+
Anton Patrushev
|
| 60 |
+
Anton Zelenov
|
| 61 |
+
Antonio Alvarado Hernandez
|
| 62 |
+
Antony Lee
|
| 63 |
+
Antti Kaihola
|
| 64 |
+
Anubhav Patel
|
| 65 |
+
Anudit Nagar
|
| 66 |
+
Anuj Godase
|
| 67 |
+
AQNOUCH Mohammed
|
| 68 |
+
AraHaan
|
| 69 |
+
arena
|
| 70 |
+
arenasys
|
| 71 |
+
Arindam Choudhury
|
| 72 |
+
Armin Ronacher
|
| 73 |
+
Arnon Yaari
|
| 74 |
+
Artem
|
| 75 |
+
Arun Babu Neelicattu
|
| 76 |
+
Ashley Manton
|
| 77 |
+
Ashwin Ramaswami
|
| 78 |
+
atse
|
| 79 |
+
Atsushi Odagiri
|
| 80 |
+
Avinash Karhana
|
| 81 |
+
Avner Cohen
|
| 82 |
+
Awit (Ah-Wit) Ghirmai
|
| 83 |
+
Baptiste Mispelon
|
| 84 |
+
Barney Gale
|
| 85 |
+
barneygale
|
| 86 |
+
Bartek Ogryczak
|
| 87 |
+
Bastian Venthur
|
| 88 |
+
Ben Bodenmiller
|
| 89 |
+
Ben Darnell
|
| 90 |
+
Ben Hoyt
|
| 91 |
+
Ben Mares
|
| 92 |
+
Ben Rosser
|
| 93 |
+
Bence Nagy
|
| 94 |
+
Benjamin Peterson
|
| 95 |
+
Benjamin VanEvery
|
| 96 |
+
Benoit Pierre
|
| 97 |
+
Berker Peksag
|
| 98 |
+
Bernard
|
| 99 |
+
Bernard Tyers
|
| 100 |
+
Bernardo B. Marques
|
| 101 |
+
Bernhard M. Wiedemann
|
| 102 |
+
Bertil Hatt
|
| 103 |
+
Bhavam Vidyarthi
|
| 104 |
+
Blazej Michalik
|
| 105 |
+
Bogdan Opanchuk
|
| 106 |
+
BorisZZZ
|
| 107 |
+
Brad Erickson
|
| 108 |
+
Bradley Ayers
|
| 109 |
+
Branch Vincent
|
| 110 |
+
Brandon L. Reiss
|
| 111 |
+
Brandt Bucher
|
| 112 |
+
Brannon Dorsey
|
| 113 |
+
Brett Randall
|
| 114 |
+
Brett Rosen
|
| 115 |
+
Brian Cristante
|
| 116 |
+
Brian Rosner
|
| 117 |
+
briantracy
|
| 118 |
+
BrownTruck
|
| 119 |
+
Bruno Oliveira
|
| 120 |
+
Bruno Renié
|
| 121 |
+
Bruno S
|
| 122 |
+
Bstrdsmkr
|
| 123 |
+
Buck Golemon
|
| 124 |
+
burrows
|
| 125 |
+
Bussonnier Matthias
|
| 126 |
+
bwoodsend
|
| 127 |
+
c22
|
| 128 |
+
Caleb Martinez
|
| 129 |
+
Calvin Smith
|
| 130 |
+
Carl Meyer
|
| 131 |
+
Carlos Liam
|
| 132 |
+
Carol Willing
|
| 133 |
+
Carter Thayer
|
| 134 |
+
Cass
|
| 135 |
+
Chandrasekhar Atina
|
| 136 |
+
Charlie Marsh
|
| 137 |
+
Chih-Hsuan Yen
|
| 138 |
+
Chris Brinker
|
| 139 |
+
Chris Hunt
|
| 140 |
+
Chris Jerdonek
|
| 141 |
+
Chris Kuehl
|
| 142 |
+
Chris Markiewicz
|
| 143 |
+
Chris McDonough
|
| 144 |
+
Chris Pawley
|
| 145 |
+
Chris Pryer
|
| 146 |
+
Chris Wolfe
|
| 147 |
+
Christian Clauss
|
| 148 |
+
Christian Heimes
|
| 149 |
+
Christian Oudard
|
| 150 |
+
Christoph Reiter
|
| 151 |
+
Christopher Hunt
|
| 152 |
+
Christopher Snyder
|
| 153 |
+
chrysle
|
| 154 |
+
cjc7373
|
| 155 |
+
Clark Boylan
|
| 156 |
+
Claudio Jolowicz
|
| 157 |
+
Clay McClure
|
| 158 |
+
Cody
|
| 159 |
+
Cody Soyland
|
| 160 |
+
Colin Watson
|
| 161 |
+
Collin Anderson
|
| 162 |
+
Connor Osborn
|
| 163 |
+
Cooper Lees
|
| 164 |
+
Cooper Ry Lees
|
| 165 |
+
Cory Benfield
|
| 166 |
+
Cory Wright
|
| 167 |
+
Craig Kerstiens
|
| 168 |
+
Cristian Sorinel
|
| 169 |
+
Cristina
|
| 170 |
+
Cristina Muñoz
|
| 171 |
+
ctg123
|
| 172 |
+
Curtis Doty
|
| 173 |
+
cytolentino
|
| 174 |
+
Daan De Meyer
|
| 175 |
+
Dale
|
| 176 |
+
Damian
|
| 177 |
+
Damian Quiroga
|
| 178 |
+
Damian Shaw
|
| 179 |
+
Dan Black
|
| 180 |
+
Dan Savilonis
|
| 181 |
+
Dan Sully
|
| 182 |
+
Dane Hillard
|
| 183 |
+
daniel
|
| 184 |
+
Daniel Collins
|
| 185 |
+
Daniel Hahler
|
| 186 |
+
Daniel Holth
|
| 187 |
+
Daniel Jost
|
| 188 |
+
Daniel Katz
|
| 189 |
+
Daniel Shaulov
|
| 190 |
+
Daniele Esposti
|
| 191 |
+
Daniele Nicolodi
|
| 192 |
+
Daniele Procida
|
| 193 |
+
Daniil Konovalenko
|
| 194 |
+
Danny Hermes
|
| 195 |
+
Danny McClanahan
|
| 196 |
+
Darren Kavanagh
|
| 197 |
+
Dav Clark
|
| 198 |
+
Dave Abrahams
|
| 199 |
+
Dave Jones
|
| 200 |
+
David Aguilar
|
| 201 |
+
David Black
|
| 202 |
+
David Bordeynik
|
| 203 |
+
David Caro
|
| 204 |
+
David D Lowe
|
| 205 |
+
David Evans
|
| 206 |
+
David Hewitt
|
| 207 |
+
David Linke
|
| 208 |
+
David Poggi
|
| 209 |
+
David Poznik
|
| 210 |
+
David Pursehouse
|
| 211 |
+
David Runge
|
| 212 |
+
David Tucker
|
| 213 |
+
David Wales
|
| 214 |
+
Davidovich
|
| 215 |
+
ddelange
|
| 216 |
+
Deepak Sharma
|
| 217 |
+
Deepyaman Datta
|
| 218 |
+
Denise Yu
|
| 219 |
+
dependabot[bot]
|
| 220 |
+
derwolfe
|
| 221 |
+
Desetude
|
| 222 |
+
Devesh Kumar Singh
|
| 223 |
+
devsagul
|
| 224 |
+
Diego Caraballo
|
| 225 |
+
Diego Ramirez
|
| 226 |
+
DiegoCaraballo
|
| 227 |
+
Dimitri Merejkowsky
|
| 228 |
+
Dimitri Papadopoulos
|
| 229 |
+
Dimitri Papadopoulos Orfanos
|
| 230 |
+
Dirk Stolle
|
| 231 |
+
Dmitry Gladkov
|
| 232 |
+
Dmitry Volodin
|
| 233 |
+
Domen Kožar
|
| 234 |
+
Dominic Davis-Foster
|
| 235 |
+
Donald Stufft
|
| 236 |
+
Dongweiming
|
| 237 |
+
doron zarhi
|
| 238 |
+
Dos Moonen
|
| 239 |
+
Douglas Thor
|
| 240 |
+
DrFeathers
|
| 241 |
+
Dustin Ingram
|
| 242 |
+
Dustin Rodrigues
|
| 243 |
+
Dwayne Bailey
|
| 244 |
+
Ed Morley
|
| 245 |
+
Edgar Ramírez
|
| 246 |
+
Edgar Ramírez Mondragón
|
| 247 |
+
Ee Durbin
|
| 248 |
+
Efflam Lemaillet
|
| 249 |
+
efflamlemaillet
|
| 250 |
+
Eitan Adler
|
| 251 |
+
ekristina
|
| 252 |
+
elainechan
|
| 253 |
+
Eli Schwartz
|
| 254 |
+
Elisha Hollander
|
| 255 |
+
Ellen Marie Dash
|
| 256 |
+
Emil Burzo
|
| 257 |
+
Emil Styrke
|
| 258 |
+
Emmanuel Arias
|
| 259 |
+
Endoh Takanao
|
| 260 |
+
enoch
|
| 261 |
+
Erdinc Mutlu
|
| 262 |
+
Eric Cousineau
|
| 263 |
+
Eric Gillingham
|
| 264 |
+
Eric Hanchrow
|
| 265 |
+
Eric Hopper
|
| 266 |
+
Erik M. Bray
|
| 267 |
+
Erik Rose
|
| 268 |
+
Erwin Janssen
|
| 269 |
+
Eugene Vereshchagin
|
| 270 |
+
everdimension
|
| 271 |
+
Federico
|
| 272 |
+
Felipe Peter
|
| 273 |
+
Felix Yan
|
| 274 |
+
fiber-space
|
| 275 |
+
Filip Kokosiński
|
| 276 |
+
Filipe Laíns
|
| 277 |
+
Finn Womack
|
| 278 |
+
finnagin
|
| 279 |
+
Flavio Amurrio
|
| 280 |
+
Florian Briand
|
| 281 |
+
Florian Rathgeber
|
| 282 |
+
Francesco
|
| 283 |
+
Francesco Montesano
|
| 284 |
+
Fredrik Orderud
|
| 285 |
+
Frost Ming
|
| 286 |
+
Gabriel Curio
|
| 287 |
+
Gabriel de Perthuis
|
| 288 |
+
Garry Polley
|
| 289 |
+
gavin
|
| 290 |
+
gdanielson
|
| 291 |
+
Geoffrey Sneddon
|
| 292 |
+
George Song
|
| 293 |
+
Georgi Valkov
|
| 294 |
+
Georgy Pchelkin
|
| 295 |
+
ghost
|
| 296 |
+
Giftlin Rajaiah
|
| 297 |
+
gizmoguy1
|
| 298 |
+
gkdoc
|
| 299 |
+
Godefroid Chapelle
|
| 300 |
+
Gopinath M
|
| 301 |
+
GOTO Hayato
|
| 302 |
+
gousaiyang
|
| 303 |
+
gpiks
|
| 304 |
+
Greg Roodt
|
| 305 |
+
Greg Ward
|
| 306 |
+
Guilherme Espada
|
| 307 |
+
Guillaume Seguin
|
| 308 |
+
gutsytechster
|
| 309 |
+
Guy Rozendorn
|
| 310 |
+
Guy Tuval
|
| 311 |
+
gzpan123
|
| 312 |
+
Hanjun Kim
|
| 313 |
+
Hari Charan
|
| 314 |
+
Harsh Vardhan
|
| 315 |
+
harupy
|
| 316 |
+
Harutaka Kawamura
|
| 317 |
+
hauntsaninja
|
| 318 |
+
Henrich Hartzer
|
| 319 |
+
Henry Schreiner
|
| 320 |
+
Herbert Pfennig
|
| 321 |
+
Holly Stotelmyer
|
| 322 |
+
Honnix
|
| 323 |
+
Hsiaoming Yang
|
| 324 |
+
Hugo Lopes Tavares
|
| 325 |
+
Hugo van Kemenade
|
| 326 |
+
Hugues Bruant
|
| 327 |
+
Hynek Schlawack
|
| 328 |
+
Ian Bicking
|
| 329 |
+
Ian Cordasco
|
| 330 |
+
Ian Lee
|
| 331 |
+
Ian Stapleton Cordasco
|
| 332 |
+
Ian Wienand
|
| 333 |
+
Igor Kuzmitshov
|
| 334 |
+
Igor Sobreira
|
| 335 |
+
Ikko Ashimine
|
| 336 |
+
Ilan Schnell
|
| 337 |
+
Illia Volochii
|
| 338 |
+
Ilya Baryshev
|
| 339 |
+
Inada Naoki
|
| 340 |
+
Ionel Cristian Mărieș
|
| 341 |
+
Ionel Maries Cristian
|
| 342 |
+
Itamar Turner-Trauring
|
| 343 |
+
Ivan Pozdeev
|
| 344 |
+
J. Nick Koston
|
| 345 |
+
Jacob Kim
|
| 346 |
+
Jacob Walls
|
| 347 |
+
Jaime Sanz
|
| 348 |
+
jakirkham
|
| 349 |
+
Jakub Kuczys
|
| 350 |
+
Jakub Stasiak
|
| 351 |
+
Jakub Vysoky
|
| 352 |
+
Jakub Wilk
|
| 353 |
+
James Cleveland
|
| 354 |
+
James Curtin
|
| 355 |
+
James Firth
|
| 356 |
+
James Gerity
|
| 357 |
+
James Polley
|
| 358 |
+
Jan Pokorný
|
| 359 |
+
Jannis Leidel
|
| 360 |
+
Jarek Potiuk
|
| 361 |
+
jarondl
|
| 362 |
+
Jason Curtis
|
| 363 |
+
Jason R. Coombs
|
| 364 |
+
JasonMo
|
| 365 |
+
JasonMo1
|
| 366 |
+
Jay Graves
|
| 367 |
+
Jean Abou Samra
|
| 368 |
+
Jean-Christophe Fillion-Robin
|
| 369 |
+
Jeff Barber
|
| 370 |
+
Jeff Dairiki
|
| 371 |
+
Jeff Widman
|
| 372 |
+
Jelmer Vernooij
|
| 373 |
+
jenix21
|
| 374 |
+
Jeremy Fleischman
|
| 375 |
+
Jeremy Stanley
|
| 376 |
+
Jeremy Zafran
|
| 377 |
+
Jesse Rittner
|
| 378 |
+
Jiashuo Li
|
| 379 |
+
Jim Fisher
|
| 380 |
+
Jim Garrison
|
| 381 |
+
Jinzhe Zeng
|
| 382 |
+
Jiun Bae
|
| 383 |
+
Jivan Amara
|
| 384 |
+
Joe Bylund
|
| 385 |
+
Joe Michelini
|
| 386 |
+
John Paton
|
| 387 |
+
John Sirois
|
| 388 |
+
John T. Wodder II
|
| 389 |
+
John-Scott Atlakson
|
| 390 |
+
johnthagen
|
| 391 |
+
Jon Banafato
|
| 392 |
+
Jon Dufresne
|
| 393 |
+
Jon Parise
|
| 394 |
+
Jonas Nockert
|
| 395 |
+
Jonathan Herbert
|
| 396 |
+
Joonatan Partanen
|
| 397 |
+
Joost Molenaar
|
| 398 |
+
Jorge Niedbalski
|
| 399 |
+
Joseph Bylund
|
| 400 |
+
Joseph Long
|
| 401 |
+
Josh Bronson
|
| 402 |
+
Josh Cannon
|
| 403 |
+
Josh Hansen
|
| 404 |
+
Josh Schneier
|
| 405 |
+
Joshua
|
| 406 |
+
Juan Luis Cano Rodríguez
|
| 407 |
+
Juanjo Bazán
|
| 408 |
+
Judah Rand
|
| 409 |
+
Julian Berman
|
| 410 |
+
Julian Gethmann
|
| 411 |
+
Julien Demoor
|
| 412 |
+
Jussi Kukkonen
|
| 413 |
+
jwg4
|
| 414 |
+
Jyrki Pulliainen
|
| 415 |
+
Kai Chen
|
| 416 |
+
Kai Mueller
|
| 417 |
+
Kamal Bin Mustafa
|
| 418 |
+
kasium
|
| 419 |
+
kaustav haldar
|
| 420 |
+
keanemind
|
| 421 |
+
Keith Maxwell
|
| 422 |
+
Kelsey Hightower
|
| 423 |
+
Kenneth Belitzky
|
| 424 |
+
Kenneth Reitz
|
| 425 |
+
Kevin Burke
|
| 426 |
+
Kevin Carter
|
| 427 |
+
Kevin Frommelt
|
| 428 |
+
Kevin R Patterson
|
| 429 |
+
Kexuan Sun
|
| 430 |
+
Kit Randel
|
| 431 |
+
Klaas van Schelven
|
| 432 |
+
KOLANICH
|
| 433 |
+
konstin
|
| 434 |
+
kpinc
|
| 435 |
+
Krishna Oza
|
| 436 |
+
Kumar McMillan
|
| 437 |
+
Kuntal Majumder
|
| 438 |
+
Kurt McKee
|
| 439 |
+
Kyle Persohn
|
| 440 |
+
lakshmanaram
|
| 441 |
+
Laszlo Kiss-Kollar
|
| 442 |
+
Laurent Bristiel
|
| 443 |
+
Laurent LAPORTE
|
| 444 |
+
Laurie O
|
| 445 |
+
Laurie Opperman
|
| 446 |
+
layday
|
| 447 |
+
Leon Sasson
|
| 448 |
+
Lev Givon
|
| 449 |
+
Lincoln de Sousa
|
| 450 |
+
Lipis
|
| 451 |
+
lorddavidiii
|
| 452 |
+
Loren Carvalho
|
| 453 |
+
Lucas Cimon
|
| 454 |
+
Ludovic Gasc
|
| 455 |
+
Luis Medel
|
| 456 |
+
Lukas Geiger
|
| 457 |
+
Lukas Juhrich
|
| 458 |
+
Luke Macken
|
| 459 |
+
Luo Jiebin
|
| 460 |
+
luojiebin
|
| 461 |
+
luz.paz
|
| 462 |
+
László Kiss Kollár
|
| 463 |
+
M00nL1ght
|
| 464 |
+
Marc Abramowitz
|
| 465 |
+
Marc Tamlyn
|
| 466 |
+
Marcus Smith
|
| 467 |
+
Mariatta
|
| 468 |
+
Mark Kohler
|
| 469 |
+
Mark McLoughlin
|
| 470 |
+
Mark Williams
|
| 471 |
+
Markus Hametner
|
| 472 |
+
Martey Dodoo
|
| 473 |
+
Martin Fischer
|
| 474 |
+
Martin Häcker
|
| 475 |
+
Martin Pavlasek
|
| 476 |
+
Masaki
|
| 477 |
+
Masklinn
|
| 478 |
+
Matej Stuchlik
|
| 479 |
+
Mathew Jennings
|
| 480 |
+
Mathieu Bridon
|
| 481 |
+
Mathieu Kniewallner
|
| 482 |
+
Matt Bacchi
|
| 483 |
+
Matt Good
|
| 484 |
+
Matt Maker
|
| 485 |
+
Matt Robenolt
|
| 486 |
+
Matt Wozniski
|
| 487 |
+
matthew
|
| 488 |
+
Matthew Einhorn
|
| 489 |
+
Matthew Feickert
|
| 490 |
+
Matthew Gilliard
|
| 491 |
+
Matthew Hughes
|
| 492 |
+
Matthew Iversen
|
| 493 |
+
Matthew Treinish
|
| 494 |
+
Matthew Trumbell
|
| 495 |
+
Matthew Willson
|
| 496 |
+
Matthias Bussonnier
|
| 497 |
+
mattip
|
| 498 |
+
Maurits van Rees
|
| 499 |
+
Max W Chase
|
| 500 |
+
Maxim Kurnikov
|
| 501 |
+
Maxime Rouyrre
|
| 502 |
+
mayeut
|
| 503 |
+
mbaluna
|
| 504 |
+
mdebi
|
| 505 |
+
memoselyk
|
| 506 |
+
meowmeowcat
|
| 507 |
+
Michael
|
| 508 |
+
Michael Aquilina
|
| 509 |
+
Michael E. Karpeles
|
| 510 |
+
Michael Klich
|
| 511 |
+
Michael Mintz
|
| 512 |
+
Michael Williamson
|
| 513 |
+
michaelpacer
|
| 514 |
+
Michał Górny
|
| 515 |
+
Mickaël Schoentgen
|
| 516 |
+
Miguel Araujo Perez
|
| 517 |
+
Mihir Singh
|
| 518 |
+
Mike
|
| 519 |
+
Mike Hendricks
|
| 520 |
+
Min RK
|
| 521 |
+
MinRK
|
| 522 |
+
Miro Hrončok
|
| 523 |
+
Monica Baluna
|
| 524 |
+
montefra
|
| 525 |
+
Monty Taylor
|
| 526 |
+
morotti
|
| 527 |
+
mrKazzila
|
| 528 |
+
Muha Ajjan
|
| 529 |
+
Nadav Wexler
|
| 530 |
+
Nahuel Ambrosini
|
| 531 |
+
Nate Coraor
|
| 532 |
+
Nate Prewitt
|
| 533 |
+
Nathan Houghton
|
| 534 |
+
Nathaniel J. Smith
|
| 535 |
+
Nehal J Wani
|
| 536 |
+
Neil Botelho
|
| 537 |
+
Nguyễn Gia Phong
|
| 538 |
+
Nicholas Serra
|
| 539 |
+
Nick Coghlan
|
| 540 |
+
Nick Stenning
|
| 541 |
+
Nick Timkovich
|
| 542 |
+
Nicolas Bock
|
| 543 |
+
Nicole Harris
|
| 544 |
+
Nikhil Benesch
|
| 545 |
+
Nikhil Ladha
|
| 546 |
+
Nikita Chepanov
|
| 547 |
+
Nikolay Korolev
|
| 548 |
+
Nipunn Koorapati
|
| 549 |
+
Nitesh Sharma
|
| 550 |
+
Niyas Sait
|
| 551 |
+
Noah
|
| 552 |
+
Noah Gorny
|
| 553 |
+
Nowell Strite
|
| 554 |
+
NtaleGrey
|
| 555 |
+
nvdv
|
| 556 |
+
OBITORASU
|
| 557 |
+
Ofek Lev
|
| 558 |
+
ofrinevo
|
| 559 |
+
Oliver Freund
|
| 560 |
+
Oliver Jeeves
|
| 561 |
+
Oliver Mannion
|
| 562 |
+
Oliver Tonnhofer
|
| 563 |
+
Olivier Girardot
|
| 564 |
+
Olivier Grisel
|
| 565 |
+
Ollie Rutherfurd
|
| 566 |
+
OMOTO Kenji
|
| 567 |
+
Omry Yadan
|
| 568 |
+
onlinejudge95
|
| 569 |
+
Oren Held
|
| 570 |
+
Oscar Benjamin
|
| 571 |
+
Oz N Tiram
|
| 572 |
+
Pachwenko
|
| 573 |
+
Patrick Dubroy
|
| 574 |
+
Patrick Jenkins
|
| 575 |
+
Patrick Lawson
|
| 576 |
+
patricktokeeffe
|
| 577 |
+
Patrik Kopkan
|
| 578 |
+
Paul Ganssle
|
| 579 |
+
Paul Kehrer
|
| 580 |
+
Paul Moore
|
| 581 |
+
Paul Nasrat
|
| 582 |
+
Paul Oswald
|
| 583 |
+
Paul van der Linden
|
| 584 |
+
Paulus Schoutsen
|
| 585 |
+
Pavel Safronov
|
| 586 |
+
Pavithra Eswaramoorthy
|
| 587 |
+
Pawel Jasinski
|
| 588 |
+
Paweł Szramowski
|
| 589 |
+
Pekka Klärck
|
| 590 |
+
Peter Gessler
|
| 591 |
+
Peter Lisák
|
| 592 |
+
Peter Shen
|
| 593 |
+
Peter Waller
|
| 594 |
+
Petr Viktorin
|
| 595 |
+
petr-tik
|
| 596 |
+
Phaneendra Chiruvella
|
| 597 |
+
Phil Elson
|
| 598 |
+
Phil Freo
|
| 599 |
+
Phil Pennock
|
| 600 |
+
Phil Whelan
|
| 601 |
+
Philip Jägenstedt
|
| 602 |
+
Philip Molloy
|
| 603 |
+
Philippe Ombredanne
|
| 604 |
+
Pi Delport
|
| 605 |
+
Pierre-Yves Rofes
|
| 606 |
+
Pieter Degroote
|
| 607 |
+
pip
|
| 608 |
+
Prabakaran Kumaresshan
|
| 609 |
+
Prabhjyotsing Surjit Singh Sodhi
|
| 610 |
+
Prabhu Marappan
|
| 611 |
+
Pradyun Gedam
|
| 612 |
+
Prashant Sharma
|
| 613 |
+
Pratik Mallya
|
| 614 |
+
pre-commit-ci[bot]
|
| 615 |
+
Preet Thakkar
|
| 616 |
+
Preston Holmes
|
| 617 |
+
Przemek Wrzos
|
| 618 |
+
Pulkit Goyal
|
| 619 |
+
q0w
|
| 620 |
+
Qiangning Hong
|
| 621 |
+
Qiming Xu
|
| 622 |
+
Quentin Lee
|
| 623 |
+
Quentin Pradet
|
| 624 |
+
R. David Murray
|
| 625 |
+
Rafael Caricio
|
| 626 |
+
Ralf Schmitt
|
| 627 |
+
Ran Benita
|
| 628 |
+
Razzi Abuissa
|
| 629 |
+
rdb
|
| 630 |
+
Reece Dunham
|
| 631 |
+
Remi Rampin
|
| 632 |
+
Rene Dudfield
|
| 633 |
+
Riccardo Magliocchetti
|
| 634 |
+
Riccardo Schirone
|
| 635 |
+
Richard Jones
|
| 636 |
+
Richard Si
|
| 637 |
+
Ricky Ng-Adam
|
| 638 |
+
Rishi
|
| 639 |
+
rmorotti
|
| 640 |
+
RobberPhex
|
| 641 |
+
Robert Collins
|
| 642 |
+
Robert McGibbon
|
| 643 |
+
Robert Pollak
|
| 644 |
+
Robert T. McGibbon
|
| 645 |
+
robin elisha robinson
|
| 646 |
+
Roey Berman
|
| 647 |
+
Rohan Jain
|
| 648 |
+
Roman Bogorodskiy
|
| 649 |
+
Roman Donchenko
|
| 650 |
+
Romuald Brunet
|
| 651 |
+
ronaudinho
|
| 652 |
+
Ronny Pfannschmidt
|
| 653 |
+
Rory McCann
|
| 654 |
+
Ross Brattain
|
| 655 |
+
Roy Wellington Ⅳ
|
| 656 |
+
Ruairidh MacLeod
|
| 657 |
+
Russell Keith-Magee
|
| 658 |
+
Ryan Shepherd
|
| 659 |
+
Ryan Wooden
|
| 660 |
+
ryneeverett
|
| 661 |
+
S. Guliaev
|
| 662 |
+
Sachi King
|
| 663 |
+
Salvatore Rinchiera
|
| 664 |
+
sandeepkiran-js
|
| 665 |
+
Sander Van Balen
|
| 666 |
+
Savio Jomton
|
| 667 |
+
schlamar
|
| 668 |
+
Scott Kitterman
|
| 669 |
+
Sean
|
| 670 |
+
seanj
|
| 671 |
+
Sebastian Jordan
|
| 672 |
+
Sebastian Schaetz
|
| 673 |
+
Segev Finer
|
| 674 |
+
SeongSoo Cho
|
| 675 |
+
Sergey Vasilyev
|
| 676 |
+
Seth Michael Larson
|
| 677 |
+
Seth Woodworth
|
| 678 |
+
Shahar Epstein
|
| 679 |
+
Shantanu
|
| 680 |
+
shenxianpeng
|
| 681 |
+
shireenrao
|
| 682 |
+
Shivansh-007
|
| 683 |
+
Shixian Sheng
|
| 684 |
+
Shlomi Fish
|
| 685 |
+
Shovan Maity
|
| 686 |
+
Simeon Visser
|
| 687 |
+
Simon Cross
|
| 688 |
+
Simon Pichugin
|
| 689 |
+
sinoroc
|
| 690 |
+
sinscary
|
| 691 |
+
snook92
|
| 692 |
+
socketubs
|
| 693 |
+
Sorin Sbarnea
|
| 694 |
+
Srinivas Nyayapati
|
| 695 |
+
Srishti Hegde
|
| 696 |
+
Stavros Korokithakis
|
| 697 |
+
Stefan Scherfke
|
| 698 |
+
Stefano Rivera
|
| 699 |
+
Stephan Erb
|
| 700 |
+
Stephen Rosen
|
| 701 |
+
stepshal
|
| 702 |
+
Steve (Gadget) Barnes
|
| 703 |
+
Steve Barnes
|
| 704 |
+
Steve Dower
|
| 705 |
+
Steve Kowalik
|
| 706 |
+
Steven Myint
|
| 707 |
+
Steven Silvester
|
| 708 |
+
stonebig
|
| 709 |
+
studioj
|
| 710 |
+
Stéphane Bidoul
|
| 711 |
+
Stéphane Bidoul (ACSONE)
|
| 712 |
+
Stéphane Klein
|
| 713 |
+
Sumana Harihareswara
|
| 714 |
+
Surbhi Sharma
|
| 715 |
+
Sviatoslav Sydorenko
|
| 716 |
+
Sviatoslav Sydorenko (Святослав Сидоренко)
|
| 717 |
+
Swat009
|
| 718 |
+
Sylvain
|
| 719 |
+
Takayuki SHIMIZUKAWA
|
| 720 |
+
Taneli Hukkinen
|
| 721 |
+
tbeswick
|
| 722 |
+
Thiago
|
| 723 |
+
Thijs Triemstra
|
| 724 |
+
Thomas Fenzl
|
| 725 |
+
Thomas Grainger
|
| 726 |
+
Thomas Guettler
|
| 727 |
+
Thomas Johansson
|
| 728 |
+
Thomas Kluyver
|
| 729 |
+
Thomas Smith
|
| 730 |
+
Thomas VINCENT
|
| 731 |
+
Tim D. Smith
|
| 732 |
+
Tim Gates
|
| 733 |
+
Tim Harder
|
| 734 |
+
Tim Heap
|
| 735 |
+
tim smith
|
| 736 |
+
tinruufu
|
| 737 |
+
Tobias Hermann
|
| 738 |
+
Tom Forbes
|
| 739 |
+
Tom Freudenheim
|
| 740 |
+
Tom V
|
| 741 |
+
Tomas Hrnciar
|
| 742 |
+
Tomas Orsava
|
| 743 |
+
Tomer Chachamu
|
| 744 |
+
Tommi Enenkel | AnB
|
| 745 |
+
Tomáš Hrnčiar
|
| 746 |
+
Tony Beswick
|
| 747 |
+
Tony Narlock
|
| 748 |
+
Tony Zhaocheng Tan
|
| 749 |
+
TonyBeswick
|
| 750 |
+
toonarmycaptain
|
| 751 |
+
Toshio Kuratomi
|
| 752 |
+
toxinu
|
| 753 |
+
Travis Swicegood
|
| 754 |
+
Tushar Sadhwani
|
| 755 |
+
Tzu-ping Chung
|
| 756 |
+
Valentin Haenel
|
| 757 |
+
Victor Stinner
|
| 758 |
+
victorvpaulo
|
| 759 |
+
Vikram - Google
|
| 760 |
+
Viktor Szépe
|
| 761 |
+
Ville Skyttä
|
| 762 |
+
Vinay Sajip
|
| 763 |
+
Vincent Philippon
|
| 764 |
+
Vinicyus Macedo
|
| 765 |
+
Vipul Kumar
|
| 766 |
+
Vitaly Babiy
|
| 767 |
+
Vladimir Fokow
|
| 768 |
+
Vladimir Rutsky
|
| 769 |
+
W. Trevor King
|
| 770 |
+
Wil Tan
|
| 771 |
+
Wilfred Hughes
|
| 772 |
+
William Edwards
|
| 773 |
+
William ML Leslie
|
| 774 |
+
William T Olson
|
| 775 |
+
William Woodruff
|
| 776 |
+
Wilson Mo
|
| 777 |
+
wim glenn
|
| 778 |
+
Winson Luk
|
| 779 |
+
Wolfgang Maier
|
| 780 |
+
Wu Zhenyu
|
| 781 |
+
XAMES3
|
| 782 |
+
Xavier Fernandez
|
| 783 |
+
Xianpeng Shen
|
| 784 |
+
xoviat
|
| 785 |
+
xtreak
|
| 786 |
+
YAMAMOTO Takashi
|
| 787 |
+
Yen Chi Hsuan
|
| 788 |
+
Yeray Diaz Diaz
|
| 789 |
+
Yoval P
|
| 790 |
+
Yu Jian
|
| 791 |
+
Yuan Jing Vincent Yan
|
| 792 |
+
Yusuke Hayashi
|
| 793 |
+
Zearin
|
| 794 |
+
Zhiping Deng
|
| 795 |
+
ziebam
|
| 796 |
+
Zvezdan Petkovic
|
| 797 |
+
Łukasz Langa
|
| 798 |
+
Роман Донченко
|
| 799 |
+
Семён Марьясин
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/INSTALLER
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
pip
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/LICENSE.txt
ADDED
|
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Copyright (c) 2008-present The pip developers (see AUTHORS.txt file)
|
| 2 |
+
|
| 3 |
+
Permission is hereby granted, free of charge, to any person obtaining
|
| 4 |
+
a copy of this software and associated documentation files (the
|
| 5 |
+
"Software"), to deal in the Software without restriction, including
|
| 6 |
+
without limitation the rights to use, copy, modify, merge, publish,
|
| 7 |
+
distribute, sublicense, and/or sell copies of the Software, and to
|
| 8 |
+
permit persons to whom the Software is furnished to do so, subject to
|
| 9 |
+
the following conditions:
|
| 10 |
+
|
| 11 |
+
The above copyright notice and this permission notice shall be
|
| 12 |
+
included in all copies or substantial portions of the Software.
|
| 13 |
+
|
| 14 |
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
| 15 |
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
| 16 |
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
| 17 |
+
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
| 18 |
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
| 19 |
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
| 20 |
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/METADATA
ADDED
|
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Metadata-Version: 2.1
|
| 2 |
+
Name: pip
|
| 3 |
+
Version: 24.3.1
|
| 4 |
+
Summary: The PyPA recommended tool for installing Python packages.
|
| 5 |
+
Author-email: The pip developers <distutils-sig@python.org>
|
| 6 |
+
License: MIT
|
| 7 |
+
Project-URL: Homepage, https://pip.pypa.io/
|
| 8 |
+
Project-URL: Documentation, https://pip.pypa.io
|
| 9 |
+
Project-URL: Source, https://github.com/pypa/pip
|
| 10 |
+
Project-URL: Changelog, https://pip.pypa.io/en/stable/news/
|
| 11 |
+
Classifier: Development Status :: 5 - Production/Stable
|
| 12 |
+
Classifier: Intended Audience :: Developers
|
| 13 |
+
Classifier: License :: OSI Approved :: MIT License
|
| 14 |
+
Classifier: Topic :: Software Development :: Build Tools
|
| 15 |
+
Classifier: Programming Language :: Python
|
| 16 |
+
Classifier: Programming Language :: Python :: 3
|
| 17 |
+
Classifier: Programming Language :: Python :: 3 :: Only
|
| 18 |
+
Classifier: Programming Language :: Python :: 3.8
|
| 19 |
+
Classifier: Programming Language :: Python :: 3.9
|
| 20 |
+
Classifier: Programming Language :: Python :: 3.10
|
| 21 |
+
Classifier: Programming Language :: Python :: 3.11
|
| 22 |
+
Classifier: Programming Language :: Python :: 3.12
|
| 23 |
+
Classifier: Programming Language :: Python :: 3.13
|
| 24 |
+
Classifier: Programming Language :: Python :: Implementation :: CPython
|
| 25 |
+
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
| 26 |
+
Requires-Python: >=3.8
|
| 27 |
+
Description-Content-Type: text/x-rst
|
| 28 |
+
License-File: LICENSE.txt
|
| 29 |
+
License-File: AUTHORS.txt
|
| 30 |
+
|
| 31 |
+
pip - The Python Package Installer
|
| 32 |
+
==================================
|
| 33 |
+
|
| 34 |
+
.. |pypi-version| image:: https://img.shields.io/pypi/v/pip.svg
|
| 35 |
+
:target: https://pypi.org/project/pip/
|
| 36 |
+
:alt: PyPI
|
| 37 |
+
|
| 38 |
+
.. |python-versions| image:: https://img.shields.io/pypi/pyversions/pip
|
| 39 |
+
:target: https://pypi.org/project/pip
|
| 40 |
+
:alt: PyPI - Python Version
|
| 41 |
+
|
| 42 |
+
.. |docs-badge| image:: https://readthedocs.org/projects/pip/badge/?version=latest
|
| 43 |
+
:target: https://pip.pypa.io/en/latest
|
| 44 |
+
:alt: Documentation
|
| 45 |
+
|
| 46 |
+
|pypi-version| |python-versions| |docs-badge|
|
| 47 |
+
|
| 48 |
+
pip is the `package installer`_ for Python. You can use pip to install packages from the `Python Package Index`_ and other indexes.
|
| 49 |
+
|
| 50 |
+
Please take a look at our documentation for how to install and use pip:
|
| 51 |
+
|
| 52 |
+
* `Installation`_
|
| 53 |
+
* `Usage`_
|
| 54 |
+
|
| 55 |
+
We release updates regularly, with a new version every 3 months. Find more details in our documentation:
|
| 56 |
+
|
| 57 |
+
* `Release notes`_
|
| 58 |
+
* `Release process`_
|
| 59 |
+
|
| 60 |
+
If you find bugs, need help, or want to talk to the developers, please use our mailing lists or chat rooms:
|
| 61 |
+
|
| 62 |
+
* `Issue tracking`_
|
| 63 |
+
* `Discourse channel`_
|
| 64 |
+
* `User IRC`_
|
| 65 |
+
|
| 66 |
+
If you want to get involved head over to GitHub to get the source code, look at our development documentation and feel free to jump on the developer mailing lists and chat rooms:
|
| 67 |
+
|
| 68 |
+
* `GitHub page`_
|
| 69 |
+
* `Development documentation`_
|
| 70 |
+
* `Development IRC`_
|
| 71 |
+
|
| 72 |
+
Code of Conduct
|
| 73 |
+
---------------
|
| 74 |
+
|
| 75 |
+
Everyone interacting in the pip project's codebases, issue trackers, chat
|
| 76 |
+
rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
|
| 77 |
+
|
| 78 |
+
.. _package installer: https://packaging.python.org/guides/tool-recommendations/
|
| 79 |
+
.. _Python Package Index: https://pypi.org
|
| 80 |
+
.. _Installation: https://pip.pypa.io/en/stable/installation/
|
| 81 |
+
.. _Usage: https://pip.pypa.io/en/stable/
|
| 82 |
+
.. _Release notes: https://pip.pypa.io/en/stable/news.html
|
| 83 |
+
.. _Release process: https://pip.pypa.io/en/latest/development/release-process/
|
| 84 |
+
.. _GitHub page: https://github.com/pypa/pip
|
| 85 |
+
.. _Development documentation: https://pip.pypa.io/en/latest/development
|
| 86 |
+
.. _Issue tracking: https://github.com/pypa/pip/issues
|
| 87 |
+
.. _Discourse channel: https://discuss.python.org/c/packaging
|
| 88 |
+
.. _User IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa
|
| 89 |
+
.. _Development IRC: https://kiwiirc.com/nextclient/#ircs://irc.libera.chat:+6697/pypa-dev
|
| 90 |
+
.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip-24.3.1.dist-info/REQUESTED
ADDED
|
File without changes
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/__init__.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional
|
| 2 |
+
|
| 3 |
+
__version__ = "24.3.1"
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
def main(args: Optional[List[str]] = None) -> int:
|
| 7 |
+
"""This is an internal API only meant for use by pip's own console scripts.
|
| 8 |
+
|
| 9 |
+
For additional details, see https://github.com/pypa/pip/issues/7498.
|
| 10 |
+
"""
|
| 11 |
+
from pip._internal.utils.entrypoints import _wrapper
|
| 12 |
+
|
| 13 |
+
return _wrapper(args)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/__pycache__/__pip-runner__.cpython-311.pyc
ADDED
|
Binary file (2.53 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/main.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def main(args: Optional[List[str]] = None) -> int:
|
| 5 |
+
"""This is preserved for old console scripts that may still be referencing
|
| 6 |
+
it.
|
| 7 |
+
|
| 8 |
+
For additional details, see https://github.com/pypa/pip/issues/7498.
|
| 9 |
+
"""
|
| 10 |
+
from pip._internal.utils.entrypoints import _wrapper
|
| 11 |
+
|
| 12 |
+
return _wrapper(args)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/__pycache__/check.cpython-311.pyc
ADDED
|
Binary file (8.22 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/build_tracker.cpython-311.pyc
ADDED
|
Binary file (8.82 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/metadata_legacy.cpython-311.pyc
ADDED
|
Binary file (3.69 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/wheel_editable.cpython-311.pyc
ADDED
|
Binary file (2.41 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/__pycache__/wheel_legacy.cpython-311.pyc
ADDED
|
Binary file (4.44 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/metadata.py
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Metadata generation logic for source distributions.
|
| 2 |
+
"""
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
from pip._vendor.pyproject_hooks import BuildBackendHookCaller
|
| 7 |
+
|
| 8 |
+
from pip._internal.build_env import BuildEnvironment
|
| 9 |
+
from pip._internal.exceptions import (
|
| 10 |
+
InstallationSubprocessError,
|
| 11 |
+
MetadataGenerationFailed,
|
| 12 |
+
)
|
| 13 |
+
from pip._internal.utils.subprocess import runner_with_spinner_message
|
| 14 |
+
from pip._internal.utils.temp_dir import TempDirectory
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def generate_metadata(
|
| 18 |
+
build_env: BuildEnvironment, backend: BuildBackendHookCaller, details: str
|
| 19 |
+
) -> str:
|
| 20 |
+
"""Generate metadata using mechanisms described in PEP 517.
|
| 21 |
+
|
| 22 |
+
Returns the generated metadata directory.
|
| 23 |
+
"""
|
| 24 |
+
metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True)
|
| 25 |
+
|
| 26 |
+
metadata_dir = metadata_tmpdir.path
|
| 27 |
+
|
| 28 |
+
with build_env:
|
| 29 |
+
# Note that BuildBackendHookCaller implements a fallback for
|
| 30 |
+
# prepare_metadata_for_build_wheel, so we don't have to
|
| 31 |
+
# consider the possibility that this hook doesn't exist.
|
| 32 |
+
runner = runner_with_spinner_message("Preparing metadata (pyproject.toml)")
|
| 33 |
+
with backend.subprocess_runner(runner):
|
| 34 |
+
try:
|
| 35 |
+
distinfo_dir = backend.prepare_metadata_for_build_wheel(metadata_dir)
|
| 36 |
+
except InstallationSubprocessError as error:
|
| 37 |
+
raise MetadataGenerationFailed(package_details=details) from error
|
| 38 |
+
|
| 39 |
+
return os.path.join(metadata_dir, distinfo_dir)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/metadata_editable.py
ADDED
|
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Metadata generation logic for source distributions.
|
| 2 |
+
"""
|
| 3 |
+
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
from pip._vendor.pyproject_hooks import BuildBackendHookCaller
|
| 7 |
+
|
| 8 |
+
from pip._internal.build_env import BuildEnvironment
|
| 9 |
+
from pip._internal.exceptions import (
|
| 10 |
+
InstallationSubprocessError,
|
| 11 |
+
MetadataGenerationFailed,
|
| 12 |
+
)
|
| 13 |
+
from pip._internal.utils.subprocess import runner_with_spinner_message
|
| 14 |
+
from pip._internal.utils.temp_dir import TempDirectory
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def generate_editable_metadata(
|
| 18 |
+
build_env: BuildEnvironment, backend: BuildBackendHookCaller, details: str
|
| 19 |
+
) -> str:
|
| 20 |
+
"""Generate metadata using mechanisms described in PEP 660.
|
| 21 |
+
|
| 22 |
+
Returns the generated metadata directory.
|
| 23 |
+
"""
|
| 24 |
+
metadata_tmpdir = TempDirectory(kind="modern-metadata", globally_managed=True)
|
| 25 |
+
|
| 26 |
+
metadata_dir = metadata_tmpdir.path
|
| 27 |
+
|
| 28 |
+
with build_env:
|
| 29 |
+
# Note that BuildBackendHookCaller implements a fallback for
|
| 30 |
+
# prepare_metadata_for_build_wheel/editable, so we don't have to
|
| 31 |
+
# consider the possibility that this hook doesn't exist.
|
| 32 |
+
runner = runner_with_spinner_message(
|
| 33 |
+
"Preparing editable metadata (pyproject.toml)"
|
| 34 |
+
)
|
| 35 |
+
with backend.subprocess_runner(runner):
|
| 36 |
+
try:
|
| 37 |
+
distinfo_dir = backend.prepare_metadata_for_build_editable(metadata_dir)
|
| 38 |
+
except InstallationSubprocessError as error:
|
| 39 |
+
raise MetadataGenerationFailed(package_details=details) from error
|
| 40 |
+
|
| 41 |
+
return os.path.join(metadata_dir, distinfo_dir)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel_editable.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os
|
| 3 |
+
from typing import Optional
|
| 4 |
+
|
| 5 |
+
from pip._vendor.pyproject_hooks import BuildBackendHookCaller, HookMissing
|
| 6 |
+
|
| 7 |
+
from pip._internal.utils.subprocess import runner_with_spinner_message
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def build_wheel_editable(
|
| 13 |
+
name: str,
|
| 14 |
+
backend: BuildBackendHookCaller,
|
| 15 |
+
metadata_directory: str,
|
| 16 |
+
tempd: str,
|
| 17 |
+
) -> Optional[str]:
|
| 18 |
+
"""Build one InstallRequirement using the PEP 660 build process.
|
| 19 |
+
|
| 20 |
+
Returns path to wheel if successfully built. Otherwise, returns None.
|
| 21 |
+
"""
|
| 22 |
+
assert metadata_directory is not None
|
| 23 |
+
try:
|
| 24 |
+
logger.debug("Destination directory: %s", tempd)
|
| 25 |
+
|
| 26 |
+
runner = runner_with_spinner_message(
|
| 27 |
+
f"Building editable for {name} (pyproject.toml)"
|
| 28 |
+
)
|
| 29 |
+
with backend.subprocess_runner(runner):
|
| 30 |
+
try:
|
| 31 |
+
wheel_name = backend.build_editable(
|
| 32 |
+
tempd,
|
| 33 |
+
metadata_directory=metadata_directory,
|
| 34 |
+
)
|
| 35 |
+
except HookMissing as e:
|
| 36 |
+
logger.error(
|
| 37 |
+
"Cannot build editable %s because the build "
|
| 38 |
+
"backend does not have the %s hook",
|
| 39 |
+
name,
|
| 40 |
+
e,
|
| 41 |
+
)
|
| 42 |
+
return None
|
| 43 |
+
except Exception:
|
| 44 |
+
logger.error("Failed building editable for %s", name)
|
| 45 |
+
return None
|
| 46 |
+
return os.path.join(tempd, wheel_name)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/build/wheel_legacy.py
ADDED
|
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import logging
|
| 2 |
+
import os.path
|
| 3 |
+
from typing import List, Optional
|
| 4 |
+
|
| 5 |
+
from pip._internal.cli.spinners import open_spinner
|
| 6 |
+
from pip._internal.utils.setuptools_build import make_setuptools_bdist_wheel_args
|
| 7 |
+
from pip._internal.utils.subprocess import call_subprocess, format_command_args
|
| 8 |
+
|
| 9 |
+
logger = logging.getLogger(__name__)
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
def format_command_result(
|
| 13 |
+
command_args: List[str],
|
| 14 |
+
command_output: str,
|
| 15 |
+
) -> str:
|
| 16 |
+
"""Format command information for logging."""
|
| 17 |
+
command_desc = format_command_args(command_args)
|
| 18 |
+
text = f"Command arguments: {command_desc}\n"
|
| 19 |
+
|
| 20 |
+
if not command_output:
|
| 21 |
+
text += "Command output: None"
|
| 22 |
+
elif logger.getEffectiveLevel() > logging.DEBUG:
|
| 23 |
+
text += "Command output: [use --verbose to show]"
|
| 24 |
+
else:
|
| 25 |
+
if not command_output.endswith("\n"):
|
| 26 |
+
command_output += "\n"
|
| 27 |
+
text += f"Command output:\n{command_output}"
|
| 28 |
+
|
| 29 |
+
return text
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def get_legacy_build_wheel_path(
|
| 33 |
+
names: List[str],
|
| 34 |
+
temp_dir: str,
|
| 35 |
+
name: str,
|
| 36 |
+
command_args: List[str],
|
| 37 |
+
command_output: str,
|
| 38 |
+
) -> Optional[str]:
|
| 39 |
+
"""Return the path to the wheel in the temporary build directory."""
|
| 40 |
+
# Sort for determinism.
|
| 41 |
+
names = sorted(names)
|
| 42 |
+
if not names:
|
| 43 |
+
msg = f"Legacy build of wheel for {name!r} created no files.\n"
|
| 44 |
+
msg += format_command_result(command_args, command_output)
|
| 45 |
+
logger.warning(msg)
|
| 46 |
+
return None
|
| 47 |
+
|
| 48 |
+
if len(names) > 1:
|
| 49 |
+
msg = (
|
| 50 |
+
f"Legacy build of wheel for {name!r} created more than one file.\n"
|
| 51 |
+
f"Filenames (choosing first): {names}\n"
|
| 52 |
+
)
|
| 53 |
+
msg += format_command_result(command_args, command_output)
|
| 54 |
+
logger.warning(msg)
|
| 55 |
+
|
| 56 |
+
return os.path.join(temp_dir, names[0])
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def build_wheel_legacy(
|
| 60 |
+
name: str,
|
| 61 |
+
setup_py_path: str,
|
| 62 |
+
source_dir: str,
|
| 63 |
+
global_options: List[str],
|
| 64 |
+
build_options: List[str],
|
| 65 |
+
tempd: str,
|
| 66 |
+
) -> Optional[str]:
|
| 67 |
+
"""Build one unpacked package using the "legacy" build process.
|
| 68 |
+
|
| 69 |
+
Returns path to wheel if successfully built. Otherwise, returns None.
|
| 70 |
+
"""
|
| 71 |
+
wheel_args = make_setuptools_bdist_wheel_args(
|
| 72 |
+
setup_py_path,
|
| 73 |
+
global_options=global_options,
|
| 74 |
+
build_options=build_options,
|
| 75 |
+
destination_dir=tempd,
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
spin_message = f"Building wheel for {name} (setup.py)"
|
| 79 |
+
with open_spinner(spin_message) as spinner:
|
| 80 |
+
logger.debug("Destination directory: %s", tempd)
|
| 81 |
+
|
| 82 |
+
try:
|
| 83 |
+
output = call_subprocess(
|
| 84 |
+
wheel_args,
|
| 85 |
+
command_desc="python setup.py bdist_wheel",
|
| 86 |
+
cwd=source_dir,
|
| 87 |
+
spinner=spinner,
|
| 88 |
+
)
|
| 89 |
+
except Exception:
|
| 90 |
+
spinner.finish("error")
|
| 91 |
+
logger.error("Failed building wheel for %s", name)
|
| 92 |
+
return None
|
| 93 |
+
|
| 94 |
+
names = os.listdir(tempd)
|
| 95 |
+
wheel_path = get_legacy_build_wheel_path(
|
| 96 |
+
names=names,
|
| 97 |
+
temp_dir=tempd,
|
| 98 |
+
name=name,
|
| 99 |
+
command_args=wheel_args,
|
| 100 |
+
command_output=output,
|
| 101 |
+
)
|
| 102 |
+
return wheel_path
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/install/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (299 Bytes). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/operations/install/__pycache__/wheel.cpython-311.pyc
ADDED
|
Binary file (40.6 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_internal/self_outdated_check.py
ADDED
|
@@ -0,0 +1,244 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import datetime
|
| 2 |
+
import functools
|
| 3 |
+
import hashlib
|
| 4 |
+
import json
|
| 5 |
+
import logging
|
| 6 |
+
import optparse
|
| 7 |
+
import os.path
|
| 8 |
+
import sys
|
| 9 |
+
from dataclasses import dataclass
|
| 10 |
+
from typing import Any, Callable, Dict, Optional
|
| 11 |
+
|
| 12 |
+
from pip._vendor.packaging.version import Version
|
| 13 |
+
from pip._vendor.packaging.version import parse as parse_version
|
| 14 |
+
from pip._vendor.rich.console import Group
|
| 15 |
+
from pip._vendor.rich.markup import escape
|
| 16 |
+
from pip._vendor.rich.text import Text
|
| 17 |
+
|
| 18 |
+
from pip._internal.index.collector import LinkCollector
|
| 19 |
+
from pip._internal.index.package_finder import PackageFinder
|
| 20 |
+
from pip._internal.metadata import get_default_environment
|
| 21 |
+
from pip._internal.models.selection_prefs import SelectionPreferences
|
| 22 |
+
from pip._internal.network.session import PipSession
|
| 23 |
+
from pip._internal.utils.compat import WINDOWS
|
| 24 |
+
from pip._internal.utils.entrypoints import (
|
| 25 |
+
get_best_invocation_for_this_pip,
|
| 26 |
+
get_best_invocation_for_this_python,
|
| 27 |
+
)
|
| 28 |
+
from pip._internal.utils.filesystem import adjacent_tmp_file, check_path_owner, replace
|
| 29 |
+
from pip._internal.utils.misc import ensure_dir
|
| 30 |
+
|
| 31 |
+
_WEEK = datetime.timedelta(days=7)
|
| 32 |
+
|
| 33 |
+
logger = logging.getLogger(__name__)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def _get_statefile_name(key: str) -> str:
|
| 37 |
+
key_bytes = key.encode()
|
| 38 |
+
name = hashlib.sha224(key_bytes).hexdigest()
|
| 39 |
+
return name
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
def _convert_date(isodate: str) -> datetime.datetime:
|
| 43 |
+
"""Convert an ISO format string to a date.
|
| 44 |
+
|
| 45 |
+
Handles the format 2020-01-22T14:24:01Z (trailing Z)
|
| 46 |
+
which is not supported by older versions of fromisoformat.
|
| 47 |
+
"""
|
| 48 |
+
return datetime.datetime.fromisoformat(isodate.replace("Z", "+00:00"))
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class SelfCheckState:
|
| 52 |
+
def __init__(self, cache_dir: str) -> None:
|
| 53 |
+
self._state: Dict[str, Any] = {}
|
| 54 |
+
self._statefile_path = None
|
| 55 |
+
|
| 56 |
+
# Try to load the existing state
|
| 57 |
+
if cache_dir:
|
| 58 |
+
self._statefile_path = os.path.join(
|
| 59 |
+
cache_dir, "selfcheck", _get_statefile_name(self.key)
|
| 60 |
+
)
|
| 61 |
+
try:
|
| 62 |
+
with open(self._statefile_path, encoding="utf-8") as statefile:
|
| 63 |
+
self._state = json.load(statefile)
|
| 64 |
+
except (OSError, ValueError, KeyError):
|
| 65 |
+
# Explicitly suppressing exceptions, since we don't want to
|
| 66 |
+
# error out if the cache file is invalid.
|
| 67 |
+
pass
|
| 68 |
+
|
| 69 |
+
@property
|
| 70 |
+
def key(self) -> str:
|
| 71 |
+
return sys.prefix
|
| 72 |
+
|
| 73 |
+
def get(self, current_time: datetime.datetime) -> Optional[str]:
|
| 74 |
+
"""Check if we have a not-outdated version loaded already."""
|
| 75 |
+
if not self._state:
|
| 76 |
+
return None
|
| 77 |
+
|
| 78 |
+
if "last_check" not in self._state:
|
| 79 |
+
return None
|
| 80 |
+
|
| 81 |
+
if "pypi_version" not in self._state:
|
| 82 |
+
return None
|
| 83 |
+
|
| 84 |
+
# Determine if we need to refresh the state
|
| 85 |
+
last_check = _convert_date(self._state["last_check"])
|
| 86 |
+
time_since_last_check = current_time - last_check
|
| 87 |
+
if time_since_last_check > _WEEK:
|
| 88 |
+
return None
|
| 89 |
+
|
| 90 |
+
return self._state["pypi_version"]
|
| 91 |
+
|
| 92 |
+
def set(self, pypi_version: str, current_time: datetime.datetime) -> None:
|
| 93 |
+
# If we do not have a path to cache in, don't bother saving.
|
| 94 |
+
if not self._statefile_path:
|
| 95 |
+
return
|
| 96 |
+
|
| 97 |
+
# Check to make sure that we own the directory
|
| 98 |
+
if not check_path_owner(os.path.dirname(self._statefile_path)):
|
| 99 |
+
return
|
| 100 |
+
|
| 101 |
+
# Now that we've ensured the directory is owned by this user, we'll go
|
| 102 |
+
# ahead and make sure that all our directories are created.
|
| 103 |
+
ensure_dir(os.path.dirname(self._statefile_path))
|
| 104 |
+
|
| 105 |
+
state = {
|
| 106 |
+
# Include the key so it's easy to tell which pip wrote the
|
| 107 |
+
# file.
|
| 108 |
+
"key": self.key,
|
| 109 |
+
"last_check": current_time.isoformat(),
|
| 110 |
+
"pypi_version": pypi_version,
|
| 111 |
+
}
|
| 112 |
+
|
| 113 |
+
text = json.dumps(state, sort_keys=True, separators=(",", ":"))
|
| 114 |
+
|
| 115 |
+
with adjacent_tmp_file(self._statefile_path) as f:
|
| 116 |
+
f.write(text.encode())
|
| 117 |
+
|
| 118 |
+
try:
|
| 119 |
+
# Since we have a prefix-specific state file, we can just
|
| 120 |
+
# overwrite whatever is there, no need to check.
|
| 121 |
+
replace(f.name, self._statefile_path)
|
| 122 |
+
except OSError:
|
| 123 |
+
# Best effort.
|
| 124 |
+
pass
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
@dataclass
|
| 128 |
+
class UpgradePrompt:
|
| 129 |
+
old: str
|
| 130 |
+
new: str
|
| 131 |
+
|
| 132 |
+
def __rich__(self) -> Group:
|
| 133 |
+
if WINDOWS:
|
| 134 |
+
pip_cmd = f"{get_best_invocation_for_this_python()} -m pip"
|
| 135 |
+
else:
|
| 136 |
+
pip_cmd = get_best_invocation_for_this_pip()
|
| 137 |
+
|
| 138 |
+
notice = "[bold][[reset][blue]notice[reset][bold]][reset]"
|
| 139 |
+
return Group(
|
| 140 |
+
Text(),
|
| 141 |
+
Text.from_markup(
|
| 142 |
+
f"{notice} A new release of pip is available: "
|
| 143 |
+
f"[red]{self.old}[reset] -> [green]{self.new}[reset]"
|
| 144 |
+
),
|
| 145 |
+
Text.from_markup(
|
| 146 |
+
f"{notice} To update, run: "
|
| 147 |
+
f"[green]{escape(pip_cmd)} install --upgrade pip"
|
| 148 |
+
),
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def was_installed_by_pip(pkg: str) -> bool:
|
| 153 |
+
"""Checks whether pkg was installed by pip
|
| 154 |
+
|
| 155 |
+
This is used not to display the upgrade message when pip is in fact
|
| 156 |
+
installed by system package manager, such as dnf on Fedora.
|
| 157 |
+
"""
|
| 158 |
+
dist = get_default_environment().get_distribution(pkg)
|
| 159 |
+
return dist is not None and "pip" == dist.installer
|
| 160 |
+
|
| 161 |
+
|
| 162 |
+
def _get_current_remote_pip_version(
|
| 163 |
+
session: PipSession, options: optparse.Values
|
| 164 |
+
) -> Optional[str]:
|
| 165 |
+
# Lets use PackageFinder to see what the latest pip version is
|
| 166 |
+
link_collector = LinkCollector.create(
|
| 167 |
+
session,
|
| 168 |
+
options=options,
|
| 169 |
+
suppress_no_index=True,
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
# Pass allow_yanked=False so we don't suggest upgrading to a
|
| 173 |
+
# yanked version.
|
| 174 |
+
selection_prefs = SelectionPreferences(
|
| 175 |
+
allow_yanked=False,
|
| 176 |
+
allow_all_prereleases=False, # Explicitly set to False
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
finder = PackageFinder.create(
|
| 180 |
+
link_collector=link_collector,
|
| 181 |
+
selection_prefs=selection_prefs,
|
| 182 |
+
)
|
| 183 |
+
best_candidate = finder.find_best_candidate("pip").best_candidate
|
| 184 |
+
if best_candidate is None:
|
| 185 |
+
return None
|
| 186 |
+
|
| 187 |
+
return str(best_candidate.version)
|
| 188 |
+
|
| 189 |
+
|
| 190 |
+
def _self_version_check_logic(
|
| 191 |
+
*,
|
| 192 |
+
state: SelfCheckState,
|
| 193 |
+
current_time: datetime.datetime,
|
| 194 |
+
local_version: Version,
|
| 195 |
+
get_remote_version: Callable[[], Optional[str]],
|
| 196 |
+
) -> Optional[UpgradePrompt]:
|
| 197 |
+
remote_version_str = state.get(current_time)
|
| 198 |
+
if remote_version_str is None:
|
| 199 |
+
remote_version_str = get_remote_version()
|
| 200 |
+
if remote_version_str is None:
|
| 201 |
+
logger.debug("No remote pip version found")
|
| 202 |
+
return None
|
| 203 |
+
state.set(remote_version_str, current_time)
|
| 204 |
+
|
| 205 |
+
remote_version = parse_version(remote_version_str)
|
| 206 |
+
logger.debug("Remote version of pip: %s", remote_version)
|
| 207 |
+
logger.debug("Local version of pip: %s", local_version)
|
| 208 |
+
|
| 209 |
+
pip_installed_by_pip = was_installed_by_pip("pip")
|
| 210 |
+
logger.debug("Was pip installed by pip? %s", pip_installed_by_pip)
|
| 211 |
+
if not pip_installed_by_pip:
|
| 212 |
+
return None # Only suggest upgrade if pip is installed by pip.
|
| 213 |
+
|
| 214 |
+
local_version_is_older = (
|
| 215 |
+
local_version < remote_version
|
| 216 |
+
and local_version.base_version != remote_version.base_version
|
| 217 |
+
)
|
| 218 |
+
if local_version_is_older:
|
| 219 |
+
return UpgradePrompt(old=str(local_version), new=remote_version_str)
|
| 220 |
+
|
| 221 |
+
return None
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
def pip_self_version_check(session: PipSession, options: optparse.Values) -> None:
|
| 225 |
+
"""Check for an update for pip.
|
| 226 |
+
|
| 227 |
+
Limit the frequency of checks to once per week. State is stored either in
|
| 228 |
+
the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix
|
| 229 |
+
of the pip script path.
|
| 230 |
+
"""
|
| 231 |
+
installed_dist = get_default_environment().get_distribution("pip")
|
| 232 |
+
if not installed_dist:
|
| 233 |
+
return
|
| 234 |
+
|
| 235 |
+
upgrade_prompt = _self_version_check_logic(
|
| 236 |
+
state=SelfCheckState(cache_dir=options.cache_dir),
|
| 237 |
+
current_time=datetime.datetime.now(datetime.timezone.utc),
|
| 238 |
+
local_version=installed_dist.version,
|
| 239 |
+
get_remote_version=functools.partial(
|
| 240 |
+
_get_current_remote_pip_version, session, options
|
| 241 |
+
),
|
| 242 |
+
)
|
| 243 |
+
if upgrade_prompt is not None:
|
| 244 |
+
logger.warning("%s", upgrade_prompt, extra={"rich": True})
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/py.typed
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pip is a command line program. While it is implemented in Python, and so is
|
| 2 |
+
available for import, you must not use pip's internal APIs in this way. Typing
|
| 3 |
+
information is provided as a convenience only and is not a guarantee. Expect
|
| 4 |
+
unannounced changes to the API and types in releases.
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/dispatcher.cpython-311.pyc
ADDED
|
Binary file (4.32 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/functionalization.cpython-311.pyc
ADDED
|
Binary file (6.79 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/native.cpython-311.pyc
ADDED
|
Binary file (6.43 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/python.cpython-311.pyc
ADDED
|
Binary file (53.3 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/ufunc.cpython-311.pyc
ADDED
|
Binary file (8.47 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/__pycache__/unboxing.cpython-311.pyc
ADDED
|
Binary file (8.18 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/autograd.py
ADDED
|
@@ -0,0 +1,853 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
from dataclasses import dataclass
|
| 3 |
+
from typing import cast, Dict, List, Match, Optional, Sequence, Set, Tuple
|
| 4 |
+
|
| 5 |
+
from torchgen import local
|
| 6 |
+
|
| 7 |
+
from torchgen.api import cpp
|
| 8 |
+
from torchgen.api.types import BaseCType, Binding, NamedCType, tensorListT
|
| 9 |
+
from torchgen.model import (
|
| 10 |
+
BaseTy,
|
| 11 |
+
BaseType,
|
| 12 |
+
FunctionSchema,
|
| 13 |
+
ListType,
|
| 14 |
+
NativeFunction,
|
| 15 |
+
NativeFunctionsViewGroup,
|
| 16 |
+
SchemaKind,
|
| 17 |
+
Type,
|
| 18 |
+
)
|
| 19 |
+
from torchgen.utils import IDENT_REGEX
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
# Represents a saved attribute involved in backward calculation.
|
| 23 |
+
# Note that it can be a derived property of an input argument, e.g.:
|
| 24 |
+
# we could save `other.scalar_type()` instead of the entire `other` tensor.
|
| 25 |
+
@dataclass(frozen=True)
|
| 26 |
+
class SavedAttribute:
|
| 27 |
+
# The NamedCType holds the updated name and cpp type of the attribute
|
| 28 |
+
# for the name, Suffix is appended if it's derived property, e.g.: `other_scalar_type`
|
| 29 |
+
nctype: NamedCType
|
| 30 |
+
|
| 31 |
+
# The expression to read the derived property at save time, e.g.:
|
| 32 |
+
# `other.scalar_type()`.
|
| 33 |
+
expr: str
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
# Represents a backward formula that calculates derivatives for one
|
| 37 |
+
# or more tensors.
|
| 38 |
+
@dataclass(frozen=True)
|
| 39 |
+
class Derivative:
|
| 40 |
+
# The formula string (legit C++ expression).
|
| 41 |
+
# Note that expressions against input arguments have been replaced with the
|
| 42 |
+
# corresponding saved attributes.
|
| 43 |
+
# E.g.:
|
| 44 |
+
# raw formula: `mul_tensor_backward(grad, self, other.scalar_type())`
|
| 45 |
+
# here: `mul_tensor_backward(grad, self, other_scalar_type)`
|
| 46 |
+
formula: str
|
| 47 |
+
|
| 48 |
+
# The formula string before input argument replacement
|
| 49 |
+
original_formula: str
|
| 50 |
+
|
| 51 |
+
# Names of the arguments for which this formula calculates derivatives.
|
| 52 |
+
var_names: Tuple[str, ...]
|
| 53 |
+
|
| 54 |
+
# Saved inputs that are referenced by the formula.
|
| 55 |
+
saved_inputs: Tuple[SavedAttribute, ...]
|
| 56 |
+
|
| 57 |
+
# Saved outputs that are referenced by the formula.
|
| 58 |
+
saved_outputs: Tuple[SavedAttribute, ...]
|
| 59 |
+
|
| 60 |
+
# Gradients that are referenced by name in the formula.
|
| 61 |
+
named_gradients: Set[str]
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# Represents a forward formula that calculates forward derivatives
|
| 65 |
+
# for one tensor.
|
| 66 |
+
@dataclass(frozen=True)
|
| 67 |
+
class ForwardDerivative:
|
| 68 |
+
# The formula string (legit C++ expression).
|
| 69 |
+
# Note that special keywords such as "linear" or "element_wise" have been
|
| 70 |
+
# replaced by the automatically generated formula.
|
| 71 |
+
formula: str
|
| 72 |
+
|
| 73 |
+
# Name of the output arguments for which this formula calculates forward
|
| 74 |
+
# derivatives
|
| 75 |
+
var_names: Tuple[str, ...]
|
| 76 |
+
|
| 77 |
+
# Type of the output arguments for which this formula calculates forward
|
| 78 |
+
# derivatives
|
| 79 |
+
var_types: Tuple[Type, ...]
|
| 80 |
+
|
| 81 |
+
# Inputs for which the forward derivatives are required for this formula
|
| 82 |
+
required_inputs_fw_grad: Optional[Tuple[str, ...]]
|
| 83 |
+
|
| 84 |
+
# Inputs for which the primal is required for this formula
|
| 85 |
+
required_inputs_primal: Optional[Tuple[str, ...]]
|
| 86 |
+
|
| 87 |
+
# Flag to specify if this formula requires the original value of self
|
| 88 |
+
# This is only used by inplace operations
|
| 89 |
+
required_original_self_value: bool
|
| 90 |
+
|
| 91 |
+
# If this formula is specified in derivatives.yaml or if we are re-using the
|
| 92 |
+
# out of place formula for inplace
|
| 93 |
+
is_reusing_outplace_formula: bool
|
| 94 |
+
|
| 95 |
+
|
| 96 |
+
# Represents differentiability info for a NativeFunction.
|
| 97 |
+
@dataclass(frozen=True)
|
| 98 |
+
class DifferentiabilityInfo:
|
| 99 |
+
# The base name read from derivatives.yaml.
|
| 100 |
+
name: str
|
| 101 |
+
|
| 102 |
+
# The matching native function.
|
| 103 |
+
#
|
| 104 |
+
# There can be multiple NativeFunction having the same base name:
|
| 105 |
+
# - different overloads with different types of input arguments;
|
| 106 |
+
# - in-place/out/functional variants of the same function;
|
| 107 |
+
#
|
| 108 |
+
# We first use the schema string (under the 'name' key) in derivatives.yaml
|
| 109 |
+
# to find the NativeFunction having the same schema string.
|
| 110 |
+
# Then we find the in-place/out/functional variants of the matching function.
|
| 111 |
+
# Among these variants, we choose the one having the same name as the
|
| 112 |
+
# derivatives.yaml entry. If there is no exact match, then we choose the
|
| 113 |
+
# in-place variant.
|
| 114 |
+
# TODO: maybe the logic to search for all variants is no longer necessary?
|
| 115 |
+
func: NativeFunction
|
| 116 |
+
|
| 117 |
+
# The name of the generated autograd function.
|
| 118 |
+
# It's set only if we will calculate a derivative, i.e.
|
| 119 |
+
# 'args_with_derivatives' is not empty.
|
| 120 |
+
op: Optional[str]
|
| 121 |
+
|
| 122 |
+
# The derivatives formulae for this function.
|
| 123 |
+
# Note that the length of this sequence is the number of differentiable inputs
|
| 124 |
+
derivatives: Sequence[Derivative]
|
| 125 |
+
|
| 126 |
+
# The forward derivatives formulae for this function.
|
| 127 |
+
# Note that the length of this sequence is the number of differentiable outputs
|
| 128 |
+
forward_derivatives: Sequence[ForwardDerivative]
|
| 129 |
+
|
| 130 |
+
# The union of 'saved_inputs' of all 'derivatives'.
|
| 131 |
+
all_saved_inputs: Sequence[SavedAttribute]
|
| 132 |
+
|
| 133 |
+
# The union of 'saved_outputs' of all 'derivatives'.
|
| 134 |
+
all_saved_outputs: Sequence[SavedAttribute]
|
| 135 |
+
|
| 136 |
+
# All named gradients that are available for use, in the same
|
| 137 |
+
# order as in the grads vector.
|
| 138 |
+
available_named_gradients: Sequence[str]
|
| 139 |
+
|
| 140 |
+
# The named gradients that are used in any of the derivatives.
|
| 141 |
+
# Invariant: all(name in available_named_gradients for name in used_named_gradients)
|
| 142 |
+
used_named_gradients: Set[str]
|
| 143 |
+
|
| 144 |
+
# The function's input arguments for which it calculates derivatives.
|
| 145 |
+
# It's the union of 'var_names' of all 'derivatives', sorted by the
|
| 146 |
+
# argument order in the function schema.
|
| 147 |
+
args_with_derivatives: Sequence[Binding]
|
| 148 |
+
|
| 149 |
+
# Names of arguments whose derivative formula is 'non_differentiable'.
|
| 150 |
+
non_differentiable_arg_names: Sequence[str]
|
| 151 |
+
|
| 152 |
+
# Raw data read from derivatives.yaml.
|
| 153 |
+
output_differentiability: Optional[List[bool]]
|
| 154 |
+
|
| 155 |
+
# output_differentiability in derivatives.yaml can be a list of
|
| 156 |
+
# conditions that express if the output is differentiable. In this case,
|
| 157 |
+
# the number of conditions must match the number of outputs
|
| 158 |
+
# (NB: we only support one condition right now).
|
| 159 |
+
# output_differentiability gets populated with True for each condition,
|
| 160 |
+
# while output_differentiability_conditions gets populated with the conditions
|
| 161 |
+
output_differentiability_conditions: Optional[List[str]]
|
| 162 |
+
|
| 163 |
+
@property
|
| 164 |
+
def has_derivatives(self) -> bool:
|
| 165 |
+
return len(self.args_with_derivatives) > 0
|
| 166 |
+
|
| 167 |
+
# Generates a new DifferentiabilityInfo using the exact same set of derivative information,
|
| 168 |
+
# but with a new operator name.
|
| 169 |
+
# This is used when generating "copy" variants of view ops,
|
| 170 |
+
# which are able to use the exact same derivative formula as the original view op
|
| 171 |
+
# See Note [Codegen'd {view}_copy Operators]
|
| 172 |
+
def create_view_copy_from_view_derivative(
|
| 173 |
+
self, g: NativeFunctionsViewGroup
|
| 174 |
+
) -> Optional["DifferentiabilityInfo"]:
|
| 175 |
+
if g.view_copy is None:
|
| 176 |
+
return None
|
| 177 |
+
f = g.view_copy
|
| 178 |
+
|
| 179 |
+
name_split_by_period = self.name.split(".", maxsplit=2)
|
| 180 |
+
# Append a "_copy" to the base name of the operator (but keep the overload name the same)
|
| 181 |
+
view_copy_name = f"{name_split_by_period[0]}_copy." + ".".join(
|
| 182 |
+
name_split_by_period[1:]
|
| 183 |
+
)
|
| 184 |
+
view_copy_op_name = None if self.op is None else f"{self.op}_copy"
|
| 185 |
+
|
| 186 |
+
return DifferentiabilityInfo(
|
| 187 |
+
# Use the "_copy" version of name/func/op
|
| 188 |
+
name=view_copy_name,
|
| 189 |
+
func=f,
|
| 190 |
+
op=view_copy_op_name,
|
| 191 |
+
# But keep all derivative info the same
|
| 192 |
+
derivatives=self.derivatives,
|
| 193 |
+
forward_derivatives=self.forward_derivatives,
|
| 194 |
+
all_saved_inputs=self.all_saved_inputs,
|
| 195 |
+
all_saved_outputs=self.all_saved_outputs,
|
| 196 |
+
available_named_gradients=self.available_named_gradients,
|
| 197 |
+
used_named_gradients=self.used_named_gradients,
|
| 198 |
+
args_with_derivatives=self.args_with_derivatives,
|
| 199 |
+
non_differentiable_arg_names=self.non_differentiable_arg_names,
|
| 200 |
+
output_differentiability=self.output_differentiability,
|
| 201 |
+
output_differentiability_conditions=self.output_differentiability_conditions,
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
|
| 205 |
+
def uses_ident(info: Optional[DifferentiabilityInfo], ident: str) -> bool:
|
| 206 |
+
if info is None:
|
| 207 |
+
return False
|
| 208 |
+
for derivative in info.derivatives:
|
| 209 |
+
formula = derivative.formula
|
| 210 |
+
if re.search(IDENT_REGEX.format(ident), formula):
|
| 211 |
+
return True
|
| 212 |
+
return False
|
| 213 |
+
|
| 214 |
+
|
| 215 |
+
def uses_retain_variables(info: Optional[DifferentiabilityInfo]) -> bool:
|
| 216 |
+
return uses_ident(info, "retain_variables")
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def uses_single_grad(info: Optional[DifferentiabilityInfo]) -> bool:
|
| 220 |
+
return uses_ident(info, "grad")
|
| 221 |
+
|
| 222 |
+
|
| 223 |
+
# Represents a differentiable `Argument`.
|
| 224 |
+
# How is it different from the `Argument` type?
|
| 225 |
+
# - It's processed Arguments which are differentiable and only used in the
|
| 226 |
+
# context of the autograd codegen;
|
| 227 |
+
# - It can represent SelfArgument or regular Argument but not TensorOptionsArgument;
|
| 228 |
+
@dataclass(frozen=True)
|
| 229 |
+
class DifferentiableInput:
|
| 230 |
+
name: str
|
| 231 |
+
type: Type
|
| 232 |
+
|
| 233 |
+
# TODO: only to keep it byte-for-byte compatible with the old codegen, should remove.
|
| 234 |
+
cpp_type: str
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
# Represents a differentiable `Return`.
|
| 238 |
+
# How it it different from the `Return` type?
|
| 239 |
+
# - The name in `Return` is optional. Here it is always populated using the same
|
| 240 |
+
# `cpp.return_names()` method.
|
| 241 |
+
# TODO: some cpp naming logic (e.g. resolving name conflict) might be irrelevant?
|
| 242 |
+
# - It's processed Returns which are differentiable, in compliance with the
|
| 243 |
+
# `output_differentiability` field defined in derivatives.yaml (if specified),
|
| 244 |
+
# and are only used in the context of the autograd codegen;
|
| 245 |
+
@dataclass(frozen=True)
|
| 246 |
+
class DifferentiableOutput:
|
| 247 |
+
name: str
|
| 248 |
+
type: Type
|
| 249 |
+
|
| 250 |
+
# TODO: only to keep it byte-for-byte compatible with the old codegen, should remove.
|
| 251 |
+
cpp_type: str
|
| 252 |
+
|
| 253 |
+
|
| 254 |
+
@dataclass(frozen=True)
|
| 255 |
+
class NativeFunctionWithDifferentiabilityInfo:
|
| 256 |
+
func: NativeFunction
|
| 257 |
+
info: Optional[Dict[str, DifferentiabilityInfo]]
|
| 258 |
+
fw_derivatives: Optional[Dict[str, Sequence[ForwardDerivative]]]
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
# TODO: Update comment below since it is out of date.
|
| 262 |
+
def dispatch_strategy(fn: NativeFunctionWithDifferentiabilityInfo) -> str:
|
| 263 |
+
"""How are we going to call the underlying implementation of a
|
| 264 |
+
declaration? There are two strategies:
|
| 265 |
+
- use_derived: we want to call the implementation on CPUDoubleType
|
| 266 |
+
(or a similar, derived Type instance). Because these derived
|
| 267 |
+
instances deal in Tensors, not Variables (it's a completely different
|
| 268 |
+
object, so it doesn't dispatch back to VariableType), code on
|
| 269 |
+
this dispatch path needs to wrap/unwrap tensors. If the
|
| 270 |
+
derived implementation takes and returns tensors, the
|
| 271 |
+
implementation is usually differentiable (although we also use
|
| 272 |
+
the derived dispatch path for non-differentiable functions
|
| 273 |
+
that we still want to dispatch on the derived Type instance;
|
| 274 |
+
e.g., size())
|
| 275 |
+
- use_type: we want to call the implementation on Type, because
|
| 276 |
+
it is implemented concretely, and the functions it invokes will
|
| 277 |
+
get dispatched back to VariableType (which will ensure that they
|
| 278 |
+
are differentiable.)
|
| 279 |
+
"""
|
| 280 |
+
# fn is derived as long as any of its per-key differentiability infos
|
| 281 |
+
# has_derivatives. dispatch_strategy() is used to guard generation of fns in VariableType
|
| 282 |
+
# and ADInplaceOrViewType. We want to generate these functions as long as a
|
| 283 |
+
# derivative is defined for ANY dispatch key.
|
| 284 |
+
if fn.func.is_abstract or (
|
| 285 |
+
fn.info is not None and any(info.has_derivatives for info in fn.info.values())
|
| 286 |
+
):
|
| 287 |
+
# If the function is abstract (not implemented on at::Type), we must
|
| 288 |
+
# call the implementation on the derived type with unpacked tensors.
|
| 289 |
+
|
| 290 |
+
# If the function has a derivative specified and is concrete, we could
|
| 291 |
+
# call either implementation. We prefer the calling the derived
|
| 292 |
+
# type's implementation with unpacked tensors because it is more
|
| 293 |
+
# performant in some cases: any internal calls to other ATen functions
|
| 294 |
+
# won't have the history tracked.
|
| 295 |
+
|
| 296 |
+
# If the function has a type dispatched argument (i.e. is a factory),
|
| 297 |
+
# we prefer calling the derived type's implementation both because it is
|
| 298 |
+
# more performant and to ensure factory functions return tensors with _version
|
| 299 |
+
# of 0 (probably not strictly necessary, but nice to have to keeps versions simple
|
| 300 |
+
# to understand.
|
| 301 |
+
|
| 302 |
+
return "use_derived"
|
| 303 |
+
else:
|
| 304 |
+
# If the function is concrete (we don't have to override it) and we
|
| 305 |
+
# didn't declare it in derivatives.yaml, we'll assume that it is
|
| 306 |
+
# actually implemented out of differentiable functions. (This
|
| 307 |
+
# assumption might not hold, but then you'll see gradcheck fail.)
|
| 308 |
+
return "use_type"
|
| 309 |
+
|
| 310 |
+
|
| 311 |
+
def is_foreach_func(f: NativeFunction) -> bool:
|
| 312 |
+
return f.func.name.name.base.startswith("_foreach_")
|
| 313 |
+
|
| 314 |
+
|
| 315 |
+
# note(crcrpar): Most foreach functions can reference an out-place `torch` function whose schema kind
|
| 316 |
+
# is functional for their backward derivatives (and forward derivatives in the future), i.e.,
|
| 317 |
+
# they would find such one in `functional_info_by_signature`. There however are some exceptions:
|
| 318 |
+
_foreach_with_inplace_ref = {"_foreach_zero_"}
|
| 319 |
+
_foreach_with_tensor_overload = {
|
| 320 |
+
"_foreach_add.Tensor",
|
| 321 |
+
"_foreach_mul.Tensor",
|
| 322 |
+
"_foreach_div.Tensor",
|
| 323 |
+
}
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
# Checks if `function_schema` is a native, non-foreach function which `f`, a foreach function
|
| 327 |
+
# reference to generate derivatives.
|
| 328 |
+
def is_reference_for_foreach(
|
| 329 |
+
f: NativeFunction,
|
| 330 |
+
function_schema: FunctionSchema,
|
| 331 |
+
) -> bool:
|
| 332 |
+
return (
|
| 333 |
+
f.func.name.name.base.split("_foreach_")[-1] == function_schema.name.name.base
|
| 334 |
+
and (
|
| 335 |
+
not function_schema.name.name.inplace
|
| 336 |
+
or str(f.func.name) in _foreach_with_inplace_ref
|
| 337 |
+
)
|
| 338 |
+
and all(
|
| 339 |
+
ref_arg.type in (arg.type, getattr(arg.type, "elem", None))
|
| 340 |
+
for arg, ref_arg in zip(
|
| 341 |
+
f.func.arguments.flat_non_out,
|
| 342 |
+
function_schema.arguments.flat_non_out,
|
| 343 |
+
)
|
| 344 |
+
)
|
| 345 |
+
)
|
| 346 |
+
|
| 347 |
+
|
| 348 |
+
# TODO(crcrpar): Avoid hard coding "Default" ideally.
|
| 349 |
+
def gen_foreach_derivativeinfo(
|
| 350 |
+
foreach_function: NativeFunction,
|
| 351 |
+
functional_info_by_signature: Dict[
|
| 352 |
+
FunctionSchema, Dict[str, DifferentiabilityInfo]
|
| 353 |
+
],
|
| 354 |
+
non_functional_info_by_signature: Dict[
|
| 355 |
+
FunctionSchema, Dict[str, DifferentiabilityInfo]
|
| 356 |
+
],
|
| 357 |
+
dispatch_key: str = "Default",
|
| 358 |
+
) -> Tuple[Optional[DifferentiabilityInfo], bool]:
|
| 359 |
+
"""Generate DifferentiabilityInfo for out-place foreach function, return the existing one for in-place.
|
| 360 |
+
|
| 361 |
+
The second return value indicates whether the info is generated in this function.
|
| 362 |
+
"""
|
| 363 |
+
ref_diff_info: Optional[DifferentiabilityInfo] = None
|
| 364 |
+
|
| 365 |
+
for function_schema, diff_info in functional_info_by_signature.items():
|
| 366 |
+
if not is_reference_for_foreach(foreach_function, function_schema):
|
| 367 |
+
continue
|
| 368 |
+
ref_diff_info = diff_info[dispatch_key]
|
| 369 |
+
if ref_diff_info is not None:
|
| 370 |
+
break
|
| 371 |
+
# note(crcrpar): It seems like `zero`'s info isn't available in functional_info_by_signature
|
| 372 |
+
# while the info of `zero_` is in non_functional_info_by_signature
|
| 373 |
+
if (
|
| 374 |
+
ref_diff_info is None
|
| 375 |
+
and foreach_function.func.kind() == SchemaKind.inplace
|
| 376 |
+
and str(foreach_function.func.name) in _foreach_with_inplace_ref
|
| 377 |
+
):
|
| 378 |
+
for function_schema, diff_info in non_functional_info_by_signature.items():
|
| 379 |
+
if not is_reference_for_foreach(foreach_function, function_schema):
|
| 380 |
+
continue
|
| 381 |
+
ref_diff_info = diff_info[dispatch_key]
|
| 382 |
+
if ref_diff_info is not None:
|
| 383 |
+
break
|
| 384 |
+
if ref_diff_info is None:
|
| 385 |
+
return None, False
|
| 386 |
+
|
| 387 |
+
# non out-place uses the existing Derivative.
|
| 388 |
+
if foreach_function.func.kind() == SchemaKind.inplace:
|
| 389 |
+
return ref_diff_info, False
|
| 390 |
+
|
| 391 |
+
map_refarg2foreacharg, map_name2arg = {}, {}
|
| 392 |
+
for i, (arg, ref_arg) in enumerate(
|
| 393 |
+
zip(
|
| 394 |
+
foreach_function.func.arguments.flat_non_out,
|
| 395 |
+
function_schema.arguments.flat_non_out,
|
| 396 |
+
)
|
| 397 |
+
):
|
| 398 |
+
map_refarg2foreacharg[ref_arg.name] = arg.name
|
| 399 |
+
map_name2arg[arg.name] = arg
|
| 400 |
+
|
| 401 |
+
all_saved_inputs, all_saved_outputs, all_var_names = [], [], []
|
| 402 |
+
modified_derivative_formulas = []
|
| 403 |
+
for i, derivative in enumerate(ref_diff_info.derivatives):
|
| 404 |
+
modified_formula = derivative.formula.replace("grad", "grads[i]").replace(
|
| 405 |
+
"result", "result[i]"
|
| 406 |
+
)
|
| 407 |
+
saved_inputs, saved_outputs = [], []
|
| 408 |
+
# note(crcrpar): This context seems necessary to call `cpp.argument_type`
|
| 409 |
+
with local.parametrize(
|
| 410 |
+
use_const_ref_for_mutable_tensors=foreach_function.use_const_ref_for_mutable_tensors,
|
| 411 |
+
use_ilistref_for_tensor_lists=foreach_function.part_of_structured_group,
|
| 412 |
+
):
|
| 413 |
+
for ref_input in derivative.saved_inputs:
|
| 414 |
+
ref_input_jit_name = ref_input.expr.split(".")[0]
|
| 415 |
+
mapped_name = map_refarg2foreacharg[ref_input_jit_name]
|
| 416 |
+
if isinstance(map_name2arg[mapped_name].type, ListType):
|
| 417 |
+
mapped_expr = mapped_name + "[i]"
|
| 418 |
+
else:
|
| 419 |
+
mapped_expr = mapped_name
|
| 420 |
+
new_expr = ref_input.expr.replace(ref_input_jit_name, mapped_expr)
|
| 421 |
+
modified_formula = modified_formula.replace(
|
| 422 |
+
cast(str, ref_input.nctype.name), new_expr
|
| 423 |
+
)
|
| 424 |
+
|
| 425 |
+
nctype = cpp.argument_type(map_name2arg[mapped_name], binds=mapped_name)
|
| 426 |
+
canonical_nctype = NamedCType(
|
| 427 |
+
nctype.name, nctype.type.remove_const_ref()
|
| 428 |
+
)
|
| 429 |
+
saved_inputs.append(
|
| 430 |
+
SavedAttribute(nctype=canonical_nctype, expr=mapped_name)
|
| 431 |
+
)
|
| 432 |
+
for ref_output in derivative.saved_outputs:
|
| 433 |
+
if ref_output.nctype.name == "result":
|
| 434 |
+
saved_outputs.append(
|
| 435 |
+
SavedAttribute(
|
| 436 |
+
nctype=NamedCType(
|
| 437 |
+
name="result", type=BaseCType(tensorListT)
|
| 438 |
+
),
|
| 439 |
+
expr="result",
|
| 440 |
+
)
|
| 441 |
+
)
|
| 442 |
+
else:
|
| 443 |
+
raise RuntimeError("")
|
| 444 |
+
var_names = [map_refarg2foreacharg[var] for var in derivative.var_names]
|
| 445 |
+
all_var_names.extend(var_names)
|
| 446 |
+
all_saved_inputs.extend(saved_inputs)
|
| 447 |
+
all_saved_outputs.extend(saved_outputs)
|
| 448 |
+
modified_derivative = Derivative(
|
| 449 |
+
formula=modified_formula,
|
| 450 |
+
original_formula=derivative.formula,
|
| 451 |
+
var_names=tuple(var_names),
|
| 452 |
+
saved_inputs=tuple(saved_inputs),
|
| 453 |
+
saved_outputs=tuple(saved_outputs),
|
| 454 |
+
named_gradients=set(),
|
| 455 |
+
)
|
| 456 |
+
modified_derivative_formulas.append(modified_derivative)
|
| 457 |
+
|
| 458 |
+
with local.parametrize(
|
| 459 |
+
use_const_ref_for_mutable_tensors=foreach_function.use_const_ref_for_mutable_tensors,
|
| 460 |
+
use_ilistref_for_tensor_lists=foreach_function.part_of_structured_group,
|
| 461 |
+
):
|
| 462 |
+
args_with_derivatives = [
|
| 463 |
+
Binding(
|
| 464 |
+
name=arg.name,
|
| 465 |
+
nctype=cpp.argument_type(arg, binds=arg.name),
|
| 466 |
+
argument=arg,
|
| 467 |
+
default=None,
|
| 468 |
+
)
|
| 469 |
+
for arg in foreach_function.func.arguments.flat_non_out
|
| 470 |
+
if arg.name in all_var_names
|
| 471 |
+
]
|
| 472 |
+
|
| 473 |
+
forward_derivatives: List[ForwardDerivative] = []
|
| 474 |
+
fw_derivative: ForwardDerivative
|
| 475 |
+
for fw_derivative in ref_diff_info.forward_derivatives:
|
| 476 |
+
var_names: List[str] = list(fw_derivative.var_names) # type: ignore[no-redef]
|
| 477 |
+
var_types: List[Type] = list(fw_derivative.var_types)
|
| 478 |
+
required_inputs_fw_grad: List[str] = []
|
| 479 |
+
required_inputs_primal: List[str] = []
|
| 480 |
+
if fw_derivative.required_inputs_fw_grad is not None:
|
| 481 |
+
required_inputs_fw_grad = list(fw_derivative.required_inputs_fw_grad)
|
| 482 |
+
if fw_derivative.required_inputs_primal:
|
| 483 |
+
required_inputs_primal = list(fw_derivative.required_inputs_primal)
|
| 484 |
+
modified_formula = fw_derivative.formula
|
| 485 |
+
|
| 486 |
+
# Foreach's result is TensorList
|
| 487 |
+
if "result" in modified_formula:
|
| 488 |
+
modified_formula = fw_derivative.formula.replace("result", "result[i]")
|
| 489 |
+
|
| 490 |
+
for foreach_arg, ref_arg in zip(
|
| 491 |
+
foreach_function.func.arguments.flat_non_out,
|
| 492 |
+
ref_diff_info.func.func.arguments.flat_non_out,
|
| 493 |
+
):
|
| 494 |
+
# Modify reference forward formula
|
| 495 |
+
if (
|
| 496 |
+
isinstance(foreach_arg.type, ListType)
|
| 497 |
+
and not foreach_arg.type.is_tensor_like()
|
| 498 |
+
):
|
| 499 |
+
# Assuming ScalarList
|
| 500 |
+
modified_formula = modified_formula.replace(
|
| 501 |
+
ref_arg.name, foreach_arg.name + "[i]"
|
| 502 |
+
)
|
| 503 |
+
elif foreach_arg.type.is_tensor_like():
|
| 504 |
+
# Assuming TensorList / Tensor
|
| 505 |
+
# assert isinstance(foreach_arg.type, ListType), f"{foreach_function.func.name}, {foreach_arg.type}"
|
| 506 |
+
assert isinstance(foreach_arg.type, ListType) or (
|
| 507 |
+
foreach_arg.type == BaseType(BaseTy.Tensor)
|
| 508 |
+
and str(foreach_function.func.name) in _foreach_with_tensor_overload
|
| 509 |
+
), f"{foreach_function.func.name}, {foreach_arg.type}"
|
| 510 |
+
for suffix in ("_p", "_t"):
|
| 511 |
+
curr_expr = ref_arg.name + suffix
|
| 512 |
+
if curr_expr in modified_formula:
|
| 513 |
+
new_expr = foreach_arg.name + suffix
|
| 514 |
+
modified_formula = modified_formula.replace(curr_expr, new_expr)
|
| 515 |
+
else:
|
| 516 |
+
# Assuming Scalar
|
| 517 |
+
if foreach_arg.name != ref_arg.name:
|
| 518 |
+
modified_formula = modified_formula.replace(
|
| 519 |
+
ref_arg.name, foreach_arg.name
|
| 520 |
+
)
|
| 521 |
+
|
| 522 |
+
# note(crcrpar): there should exist a cooler way...
|
| 523 |
+
for i, name in enumerate(var_names):
|
| 524 |
+
if name == ref_arg.name:
|
| 525 |
+
var_names[i] = foreach_arg.name
|
| 526 |
+
var_types[i] = foreach_arg.type
|
| 527 |
+
for i, name in enumerate(required_inputs_fw_grad):
|
| 528 |
+
if name == ref_arg.name:
|
| 529 |
+
required_inputs_fw_grad[i] = foreach_arg.name
|
| 530 |
+
for i, name in enumerate(required_inputs_primal):
|
| 531 |
+
if name == ref_arg.name:
|
| 532 |
+
required_inputs_primal[i] = foreach_arg.name
|
| 533 |
+
forward_derivatives.append(
|
| 534 |
+
ForwardDerivative(
|
| 535 |
+
formula=modified_formula,
|
| 536 |
+
var_names=tuple(var_names),
|
| 537 |
+
var_types=tuple(var_types),
|
| 538 |
+
required_inputs_fw_grad=tuple(required_inputs_fw_grad),
|
| 539 |
+
required_inputs_primal=tuple(required_inputs_primal),
|
| 540 |
+
required_original_self_value=fw_derivative.required_original_self_value,
|
| 541 |
+
is_reusing_outplace_formula=fw_derivative.is_reusing_outplace_formula,
|
| 542 |
+
)
|
| 543 |
+
)
|
| 544 |
+
|
| 545 |
+
return (
|
| 546 |
+
DifferentiabilityInfo(
|
| 547 |
+
name=foreach_function.func.name.name.base,
|
| 548 |
+
func=foreach_function,
|
| 549 |
+
op=f"Foreach{ref_diff_info.op}{foreach_function.func.name.overload_name}",
|
| 550 |
+
derivatives=modified_derivative_formulas,
|
| 551 |
+
forward_derivatives=forward_derivatives,
|
| 552 |
+
all_saved_inputs=tuple(set(all_saved_inputs)),
|
| 553 |
+
all_saved_outputs=tuple(set(all_saved_outputs)),
|
| 554 |
+
available_named_gradients=(),
|
| 555 |
+
used_named_gradients=set(),
|
| 556 |
+
args_with_derivatives=args_with_derivatives,
|
| 557 |
+
non_differentiable_arg_names=[],
|
| 558 |
+
output_differentiability=None,
|
| 559 |
+
output_differentiability_conditions=None,
|
| 560 |
+
),
|
| 561 |
+
True,
|
| 562 |
+
)
|
| 563 |
+
|
| 564 |
+
|
| 565 |
+
def match_differentiability_info(
|
| 566 |
+
native_functions: List[NativeFunction],
|
| 567 |
+
differentiability_infos: Dict[FunctionSchema, Dict[str, DifferentiabilityInfo]],
|
| 568 |
+
) -> List[NativeFunctionWithDifferentiabilityInfo]:
|
| 569 |
+
"""Sets the "derivative" key on declarations to matching autograd function
|
| 570 |
+
In-place functions will use the out-of-place derivative definition if there
|
| 571 |
+
is no in-place specific derivative.
|
| 572 |
+
"""
|
| 573 |
+
|
| 574 |
+
functional_info_by_signature = {
|
| 575 |
+
schema.signature(strip_default=True): info_dict
|
| 576 |
+
for schema, info_dict in differentiability_infos.items()
|
| 577 |
+
if schema.kind() == SchemaKind.functional
|
| 578 |
+
}
|
| 579 |
+
non_functional_info_by_signature = {
|
| 580 |
+
schema.signature(strip_default=True): info_dict
|
| 581 |
+
for schema, info_dict in differentiability_infos.items()
|
| 582 |
+
if schema.kind() != SchemaKind.functional
|
| 583 |
+
}
|
| 584 |
+
|
| 585 |
+
def find_info(
|
| 586 |
+
f: NativeFunction,
|
| 587 |
+
) -> Tuple[Optional[Dict[str, DifferentiabilityInfo]], bool]:
|
| 588 |
+
# Don't bother matching info to generated out= variants
|
| 589 |
+
if "generated" in f.tags and f.func.kind() == SchemaKind.out:
|
| 590 |
+
return None, False
|
| 591 |
+
|
| 592 |
+
# (1) Check for an exact match
|
| 593 |
+
if f.func in differentiability_infos:
|
| 594 |
+
return differentiability_infos[f.func], True
|
| 595 |
+
|
| 596 |
+
# (2) If no exact match, check if the out-of-place variant
|
| 597 |
+
# of this operator has a match.
|
| 598 |
+
# i.e mul() for mul_() or mul_out()
|
| 599 |
+
# note(crcrpar): Check foreach or not because in-place foreach functions use backward defined for the existing
|
| 600 |
+
# native functions instead of the out-place counterparts.
|
| 601 |
+
f_sig = f.func.signature(strip_default=True)
|
| 602 |
+
if f_sig in functional_info_by_signature and not is_foreach_func(f):
|
| 603 |
+
return functional_info_by_signature[f_sig], False
|
| 604 |
+
|
| 605 |
+
# (3) Some operators have a derivative explicitly defined for the mutable
|
| 606 |
+
# variant, but get a code-generated out-of-place variant which does *not*
|
| 607 |
+
# come with a derivative formula.
|
| 608 |
+
# For the generated out-of-place variant, use the mutable variant's formula
|
| 609 |
+
# if it exists.
|
| 610 |
+
if "generated" in f.tags and f_sig in non_functional_info_by_signature:
|
| 611 |
+
info_dict = non_functional_info_by_signature[f_sig]
|
| 612 |
+
# See https://github.com/pytorch/pytorch/pull/76320/files#r874816389
|
| 613 |
+
assert not any(
|
| 614 |
+
any("self" in str(inpt.nctype.name) for inpt in info.all_saved_inputs)
|
| 615 |
+
for info in info_dict.values()
|
| 616 |
+
), f"""\
|
| 617 |
+
Attempted to convert a derivative formula for a mutable operator
|
| 618 |
+
to be used by automatically by its functional variant ("{str(f.func)}").
|
| 619 |
+
this is not currently supported (we'd need to fix up the formula in the codegen)."""
|
| 620 |
+
return info_dict, False
|
| 621 |
+
|
| 622 |
+
# (4) Generate derivative information of foreach functions if none is defined in `derivatives.yaml`
|
| 623 |
+
if is_foreach_func(f):
|
| 624 |
+
assert f.func not in differentiability_infos
|
| 625 |
+
diff_info, is_generated = gen_foreach_derivativeinfo(
|
| 626 |
+
f,
|
| 627 |
+
functional_info_by_signature,
|
| 628 |
+
non_functional_info_by_signature,
|
| 629 |
+
)
|
| 630 |
+
if diff_info is None:
|
| 631 |
+
return None, False
|
| 632 |
+
# TODO(crcrpar): Avoid hard coding "Default" ideally.
|
| 633 |
+
diff_info_dict = {"Default": diff_info}
|
| 634 |
+
if is_generated:
|
| 635 |
+
differentiability_infos[f.func] = diff_info_dict
|
| 636 |
+
functional_info_by_signature[f.func] = diff_info_dict
|
| 637 |
+
return diff_info_dict, is_generated
|
| 638 |
+
|
| 639 |
+
return None, False
|
| 640 |
+
|
| 641 |
+
result: List[NativeFunctionWithDifferentiabilityInfo] = []
|
| 642 |
+
for f in native_functions:
|
| 643 |
+
info_dict, is_exact_match = find_info(f)
|
| 644 |
+
|
| 645 |
+
# Currently, the '.strides()' to 'strides_or_error' replacement does not support
|
| 646 |
+
# 'self' derivatives of an inplace function, so we must check for this case.
|
| 647 |
+
if f.func.kind() == SchemaKind.inplace and (info_dict is not None):
|
| 648 |
+
for info in info_dict.values():
|
| 649 |
+
for derivative in info.derivatives:
|
| 650 |
+
if "self" in derivative.var_names:
|
| 651 |
+
for saved_input in derivative.saved_inputs:
|
| 652 |
+
assert "strides_or_error" not in saved_input.expr, (
|
| 653 |
+
"Calling '.strides()' in the 'self' derivative formula of an "
|
| 654 |
+
f"in-place function is not supported: {f.func}"
|
| 655 |
+
)
|
| 656 |
+
|
| 657 |
+
if not info_dict:
|
| 658 |
+
result.append(
|
| 659 |
+
NativeFunctionWithDifferentiabilityInfo(
|
| 660 |
+
func=f, info=None, fw_derivatives=None
|
| 661 |
+
)
|
| 662 |
+
)
|
| 663 |
+
continue
|
| 664 |
+
|
| 665 |
+
fw_derivative_dict: Dict[str, Sequence[ForwardDerivative]] = {}
|
| 666 |
+
for key, info in info_dict.items():
|
| 667 |
+
if not info.forward_derivatives:
|
| 668 |
+
fw_derivative_dict[key] = []
|
| 669 |
+
continue
|
| 670 |
+
|
| 671 |
+
forward_derivatives = info.forward_derivatives
|
| 672 |
+
|
| 673 |
+
# For functions that have a single def for out-of-place and inplace (like abs())
|
| 674 |
+
if f.func.kind() == SchemaKind.inplace:
|
| 675 |
+
# For inplace functions there is a little bit of work to do:
|
| 676 |
+
# 1) Validate the formula and make sure the input that is modified in not used:
|
| 677 |
+
# - If there is a formula for the inplace variant of the function (is_exact_match == True) then
|
| 678 |
+
# we make sure that the original value of the input that is being modified inplace (self_p) is
|
| 679 |
+
# not used in the formula. Note that the formula can use "original_self_p" here and that would
|
| 680 |
+
# trigger a clone of the original input.
|
| 681 |
+
# - If we are re-using the out of place formula (is_exact_match == False) then we replace every
|
| 682 |
+
# occurrence of self_p and self_t by original_self_p and original_self_t. These will be
|
| 683 |
+
# populated by cloned version of the original input (either the clone done by the backward AD
|
| 684 |
+
# logic if self is also used in a backward formula or a special clone that we add).
|
| 685 |
+
# 2) At this point, there cannot be a self_p in the formula.
|
| 686 |
+
# 3) Change "result" into "self_p" as by design, in the inplace function codegen, the result is
|
| 687 |
+
# simply called self (as it is modified inplace).
|
| 688 |
+
# 4) Update the required primals data in case it used to contain "result" but should now contain
|
| 689 |
+
# "self"
|
| 690 |
+
# 5) If it is not an exact match, the user formula is not modifying the existing forward grad
|
| 691 |
+
# inplace as it should. So add some code that makes sure that we do so if the forward grad
|
| 692 |
+
# already exists.
|
| 693 |
+
|
| 694 |
+
assert (
|
| 695 |
+
len(info.forward_derivatives) == 1
|
| 696 |
+
) # Only single output inplace should exist
|
| 697 |
+
fw_info = info.forward_derivatives[0]
|
| 698 |
+
formula = fw_info.formula
|
| 699 |
+
|
| 700 |
+
def replace_self_with_original_self(formula: str, postfix: str) -> str:
|
| 701 |
+
def repl(m: Match[str]) -> str:
|
| 702 |
+
return f"{m.group(1)}original_self{postfix}{m.group(2)}"
|
| 703 |
+
|
| 704 |
+
return re.sub(IDENT_REGEX.format(f"self{postfix}"), repl, formula)
|
| 705 |
+
|
| 706 |
+
if re.search(IDENT_REGEX.format("self_p"), formula):
|
| 707 |
+
if is_exact_match:
|
| 708 |
+
# For manually defined formulas, don't allow the original value to be used
|
| 709 |
+
raise RuntimeError(
|
| 710 |
+
f'The formula for "{f.func.name}" is using the original value of self '
|
| 711 |
+
"that is being modified inplace. This would lead to wrong forward gradients. "
|
| 712 |
+
'Please use "result" in the formula only.'
|
| 713 |
+
)
|
| 714 |
+
else:
|
| 715 |
+
# When the original formula is out of place, we save a clone of the primal
|
| 716 |
+
# value to be able to access this value if needed
|
| 717 |
+
# replace "self_p"/"self_t" from the formula by "original_self_p"/"original_self_t"
|
| 718 |
+
formula = replace_self_with_original_self(formula, "_p")
|
| 719 |
+
formula = replace_self_with_original_self(formula, "_t")
|
| 720 |
+
|
| 721 |
+
# replace "result" from the formula by "self_p"
|
| 722 |
+
def repl(m: Match[str]) -> str:
|
| 723 |
+
return f"{m.group(1)}self_p{m.group(2)}"
|
| 724 |
+
|
| 725 |
+
formula = re.sub(IDENT_REGEX.format("result"), repl, formula)
|
| 726 |
+
|
| 727 |
+
required_primals = fw_info.required_inputs_primal
|
| 728 |
+
if re.search(IDENT_REGEX.format("self_p"), formula):
|
| 729 |
+
required_primals = (
|
| 730 |
+
required_primals + ("self",) if required_primals else ("self",)
|
| 731 |
+
)
|
| 732 |
+
|
| 733 |
+
if not is_exact_match:
|
| 734 |
+
# NOTE [In-place forward AD formula Optimization]
|
| 735 |
+
#
|
| 736 |
+
# This optimization transforms the formula to directly do inplace, i.e.
|
| 737 |
+
# instead of self_t.copy_(self_t.op()) we do self_t.op_() when the following are met:
|
| 738 |
+
#
|
| 739 |
+
# 1) the formula satisfies the pattern: "self_t.op(*args)"
|
| 740 |
+
# 2) "op" in (1) needs to be the same as the op the derivative is for
|
| 741 |
+
#
|
| 742 |
+
# (2) may seem too strict, but currently the only ops that satisfy (1) also satisfy (2)
|
| 743 |
+
# If there is a need, we can relax (2) to allow any op that has an in-place variant
|
| 744 |
+
is_single_method_on_self_t = False
|
| 745 |
+
directly_do_inplace = False
|
| 746 |
+
op_name: Optional[str] = None
|
| 747 |
+
between_parens: Optional[str] = None
|
| 748 |
+
match = re.fullmatch(r"self_t.([\w]*)\((.*)\)", formula)
|
| 749 |
+
if match:
|
| 750 |
+
op_name, between_parens = match.group(1), match.group(2)
|
| 751 |
+
|
| 752 |
+
# We want to...
|
| 753 |
+
# Match: self_t.op1(other_p.op2(arg))
|
| 754 |
+
# Avoid: self_t.op1(args) + self_t.op2(args)
|
| 755 |
+
# Avoid: self_t.op1(other_p.op2(arg)) + self_t.op2(args)
|
| 756 |
+
def check_parens_nest_level_gt_zero(s: str) -> bool:
|
| 757 |
+
level = 1
|
| 758 |
+
for ch in s:
|
| 759 |
+
if ch == ")":
|
| 760 |
+
level -= 1
|
| 761 |
+
if level == 0:
|
| 762 |
+
return False
|
| 763 |
+
if ch == "(":
|
| 764 |
+
level += 1
|
| 765 |
+
return True
|
| 766 |
+
|
| 767 |
+
is_single_method_on_self_t = check_parens_nest_level_gt_zero(
|
| 768 |
+
between_parens
|
| 769 |
+
)
|
| 770 |
+
directly_do_inplace = (
|
| 771 |
+
is_single_method_on_self_t and op_name == info.name
|
| 772 |
+
)
|
| 773 |
+
|
| 774 |
+
if directly_do_inplace:
|
| 775 |
+
assert op_name is not None
|
| 776 |
+
assert between_parens is not None
|
| 777 |
+
formula = f"self_t_raw.defined() ? self_t_raw.{op_name}_({between_parens}) : {formula}"
|
| 778 |
+
else:
|
| 779 |
+
# Make sure that the forward grad is modified inplace when the original formula
|
| 780 |
+
# is out of place
|
| 781 |
+
formula = f"self_t_raw.defined() ? self_t_raw.copy_({formula}) : {formula}"
|
| 782 |
+
|
| 783 |
+
required_original_self_value = bool(
|
| 784 |
+
re.search(IDENT_REGEX.format("original_self_p"), formula)
|
| 785 |
+
) or bool(re.search(IDENT_REGEX.format("original_self_t"), formula))
|
| 786 |
+
|
| 787 |
+
forward_derivatives = [
|
| 788 |
+
ForwardDerivative(
|
| 789 |
+
formula=formula,
|
| 790 |
+
var_names=("self",),
|
| 791 |
+
var_types=fw_info.var_types,
|
| 792 |
+
required_inputs_fw_grad=fw_info.required_inputs_fw_grad,
|
| 793 |
+
required_inputs_primal=required_primals,
|
| 794 |
+
required_original_self_value=required_original_self_value,
|
| 795 |
+
is_reusing_outplace_formula=not is_exact_match,
|
| 796 |
+
),
|
| 797 |
+
]
|
| 798 |
+
|
| 799 |
+
fw_derivative_dict[key] = forward_derivatives
|
| 800 |
+
|
| 801 |
+
result.append(
|
| 802 |
+
NativeFunctionWithDifferentiabilityInfo(
|
| 803 |
+
func=f, info=info_dict, fw_derivatives=fw_derivative_dict
|
| 804 |
+
)
|
| 805 |
+
)
|
| 806 |
+
|
| 807 |
+
return result
|
| 808 |
+
|
| 809 |
+
|
| 810 |
+
def is_differentiable(
|
| 811 |
+
name: str, type: Type, info: Optional[DifferentiabilityInfo]
|
| 812 |
+
) -> bool:
|
| 813 |
+
return type.is_tensor_like() and (
|
| 814 |
+
info is None or name not in info.non_differentiable_arg_names
|
| 815 |
+
)
|
| 816 |
+
|
| 817 |
+
|
| 818 |
+
def gen_differentiable_outputs(
|
| 819 |
+
fn: NativeFunctionWithDifferentiabilityInfo, key: str = "Default"
|
| 820 |
+
) -> List[DifferentiableOutput]:
|
| 821 |
+
f = fn.func
|
| 822 |
+
info = fn.info[key] if fn.info else None
|
| 823 |
+
outputs: List[DifferentiableOutput] = [
|
| 824 |
+
DifferentiableOutput(
|
| 825 |
+
name=name,
|
| 826 |
+
type=ret.type,
|
| 827 |
+
cpp_type=cpp.return_type(ret, symint=True).cpp_type(),
|
| 828 |
+
)
|
| 829 |
+
for name, ret in zip(cpp.return_names(f), f.func.returns)
|
| 830 |
+
]
|
| 831 |
+
output_differentiability = info.output_differentiability if info else None
|
| 832 |
+
if output_differentiability is not None:
|
| 833 |
+
if len(output_differentiability) != len(outputs):
|
| 834 |
+
raise RuntimeError(
|
| 835 |
+
f"The length of output_differentiability ({len(output_differentiability)}), "
|
| 836 |
+
f"does not match the number of outputs ({len(outputs)})."
|
| 837 |
+
)
|
| 838 |
+
differentiable_outputs: List[DifferentiableOutput] = []
|
| 839 |
+
if False in output_differentiability and f.func.kind() == SchemaKind.inplace:
|
| 840 |
+
raise RuntimeError(
|
| 841 |
+
"output_differentiability=False for inplace operation (version_counter won't get updated)"
|
| 842 |
+
)
|
| 843 |
+
for differentiable, output in zip(output_differentiability, outputs):
|
| 844 |
+
if differentiable:
|
| 845 |
+
differentiable_outputs.append(output)
|
| 846 |
+
return differentiable_outputs
|
| 847 |
+
candidate_differentiable_outputs = list(
|
| 848 |
+
filter(lambda r: is_differentiable(r.name, r.type, info), outputs)
|
| 849 |
+
)
|
| 850 |
+
if uses_single_grad(info):
|
| 851 |
+
return candidate_differentiable_outputs[:1]
|
| 852 |
+
else:
|
| 853 |
+
return candidate_differentiable_outputs
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/cpp.py
ADDED
|
@@ -0,0 +1,467 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Sequence, Set, Union
|
| 2 |
+
|
| 3 |
+
from torchgen import local
|
| 4 |
+
from torchgen.api.types import (
|
| 5 |
+
ArgName,
|
| 6 |
+
ArrayCType,
|
| 7 |
+
ArrayRefCType,
|
| 8 |
+
BaseCType,
|
| 9 |
+
BaseTypeToCppMapping,
|
| 10 |
+
Binding,
|
| 11 |
+
boolT,
|
| 12 |
+
ConstRefCType,
|
| 13 |
+
CType,
|
| 14 |
+
dimnameListT,
|
| 15 |
+
intArrayRefT,
|
| 16 |
+
iTensorListRefT,
|
| 17 |
+
ListCType,
|
| 18 |
+
longT,
|
| 19 |
+
MutRefCType,
|
| 20 |
+
NamedCType,
|
| 21 |
+
OptionalCType,
|
| 22 |
+
optionalIntArrayRefT,
|
| 23 |
+
optionalSymIntArrayRefT,
|
| 24 |
+
scalarT,
|
| 25 |
+
SpecialArgName,
|
| 26 |
+
symIntArrayRefT,
|
| 27 |
+
SymIntT,
|
| 28 |
+
tensorListT,
|
| 29 |
+
tensorOptionsT,
|
| 30 |
+
tensorT,
|
| 31 |
+
TupleCType,
|
| 32 |
+
VectorCType,
|
| 33 |
+
voidT,
|
| 34 |
+
)
|
| 35 |
+
from torchgen.model import (
|
| 36 |
+
Argument,
|
| 37 |
+
Arguments,
|
| 38 |
+
BaseTy,
|
| 39 |
+
BaseType,
|
| 40 |
+
FunctionSchema,
|
| 41 |
+
ListType,
|
| 42 |
+
NativeFunction,
|
| 43 |
+
OptionalType,
|
| 44 |
+
Return,
|
| 45 |
+
SelfArgument,
|
| 46 |
+
TensorOptionsArguments,
|
| 47 |
+
Type,
|
| 48 |
+
)
|
| 49 |
+
from torchgen.utils import assert_never
|
| 50 |
+
|
| 51 |
+
# This file describes the translation of JIT schema to the public C++
|
| 52 |
+
# API, which is what people use when they call functions like at::add.
|
| 53 |
+
#
|
| 54 |
+
# Prominent characteristics of the C++ API:
|
| 55 |
+
#
|
| 56 |
+
# - dtype, layout, device and pin_memory are collected into
|
| 57 |
+
# a single C++ type TensorOptions (the native functions API
|
| 58 |
+
# also has this, but tensor options is really most relevant
|
| 59 |
+
# for the C++ API; it makes calling kwarg factory functions
|
| 60 |
+
# pleasant)
|
| 61 |
+
#
|
| 62 |
+
# - defaulting lives here (in fact, the dispatcher is completely
|
| 63 |
+
# oblivious of defaults!)
|
| 64 |
+
#
|
| 65 |
+
# BTW: policy on name collisions: we try not to have types with
|
| 66 |
+
# collisions, but functions are fair game to collide
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def name(
|
| 70 |
+
func: FunctionSchema,
|
| 71 |
+
*,
|
| 72 |
+
faithful_name_for_out_overloads: bool = False,
|
| 73 |
+
symint_overload: bool = False,
|
| 74 |
+
) -> str:
|
| 75 |
+
name = str(func.name.name)
|
| 76 |
+
if symint_overload:
|
| 77 |
+
name += "_symint"
|
| 78 |
+
if func.is_out_fn():
|
| 79 |
+
if faithful_name_for_out_overloads:
|
| 80 |
+
name += "_outf"
|
| 81 |
+
else:
|
| 82 |
+
name += "_out"
|
| 83 |
+
|
| 84 |
+
return name
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
# Translation of "value types" in JIT schema to C++ API type. Value
|
| 88 |
+
# types look the same no matter if they are argument types or return
|
| 89 |
+
# types. Returns None if the type in question is not a value type.
|
| 90 |
+
def valuetype_type(
|
| 91 |
+
t: Type,
|
| 92 |
+
*,
|
| 93 |
+
binds: ArgName,
|
| 94 |
+
remove_non_owning_ref_types: bool = False,
|
| 95 |
+
symint: bool = False,
|
| 96 |
+
) -> Optional[NamedCType]:
|
| 97 |
+
if isinstance(t, BaseType):
|
| 98 |
+
if t.name == BaseTy.Tensor or t.name == BaseTy.Scalar:
|
| 99 |
+
return None
|
| 100 |
+
elif str(t) == "SymInt":
|
| 101 |
+
if symint:
|
| 102 |
+
return NamedCType(binds, BaseCType(SymIntT))
|
| 103 |
+
else:
|
| 104 |
+
return NamedCType(binds, BaseCType(longT))
|
| 105 |
+
if remove_non_owning_ref_types:
|
| 106 |
+
if t.name == BaseTy.str:
|
| 107 |
+
raise AssertionError(
|
| 108 |
+
"string ref->value conversion: not implemented yet"
|
| 109 |
+
)
|
| 110 |
+
# All other BaseType currently map directly to BaseCppTypes.
|
| 111 |
+
return NamedCType(binds, BaseCType(BaseTypeToCppMapping[t.name]))
|
| 112 |
+
elif isinstance(t, OptionalType):
|
| 113 |
+
elem = valuetype_type(t.elem, binds=binds, symint=symint)
|
| 114 |
+
if elem is None:
|
| 115 |
+
return None
|
| 116 |
+
return NamedCType(binds, OptionalCType(elem.type))
|
| 117 |
+
elif isinstance(t, ListType):
|
| 118 |
+
if str(t.elem) == "bool":
|
| 119 |
+
assert t.size is not None
|
| 120 |
+
return NamedCType(binds, ArrayCType(BaseCType(boolT), t.size))
|
| 121 |
+
else:
|
| 122 |
+
return None
|
| 123 |
+
else:
|
| 124 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
# Translation of types occurring in JIT arguments to a C++ argument type.
|
| 128 |
+
# If remove_non_owning_ref_types is set, we'll guarantee that the outputed CType is not a non-owning reference type.
|
| 129 |
+
# For example, we'll return std::vector<int> instead of IntArrayRef.
|
| 130 |
+
# See Note [translation from C++ reference to value types]
|
| 131 |
+
def argumenttype_type(
|
| 132 |
+
t: Type,
|
| 133 |
+
*,
|
| 134 |
+
mutable: bool,
|
| 135 |
+
binds: ArgName,
|
| 136 |
+
remove_non_owning_ref_types: bool = False,
|
| 137 |
+
symint: bool = False,
|
| 138 |
+
) -> NamedCType:
|
| 139 |
+
# If it's a value type, do the value type translation
|
| 140 |
+
r = valuetype_type(
|
| 141 |
+
t,
|
| 142 |
+
binds=binds,
|
| 143 |
+
symint=symint,
|
| 144 |
+
remove_non_owning_ref_types=remove_non_owning_ref_types,
|
| 145 |
+
)
|
| 146 |
+
if r is not None:
|
| 147 |
+
return r
|
| 148 |
+
|
| 149 |
+
if isinstance(t, BaseType):
|
| 150 |
+
if t.name == BaseTy.Tensor:
|
| 151 |
+
if mutable and not local.use_const_ref_for_mutable_tensors():
|
| 152 |
+
return NamedCType(binds, MutRefCType(BaseCType(tensorT)))
|
| 153 |
+
else:
|
| 154 |
+
return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
|
| 155 |
+
elif t.name == BaseTy.Scalar:
|
| 156 |
+
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
|
| 157 |
+
else:
|
| 158 |
+
raise AssertionError(f"base type should have been value type {t}")
|
| 159 |
+
elif isinstance(t, OptionalType):
|
| 160 |
+
if str(t.elem) == "Tensor":
|
| 161 |
+
if mutable and not local.use_const_ref_for_mutable_tensors():
|
| 162 |
+
return NamedCType(
|
| 163 |
+
binds, MutRefCType(BaseCType(tensorT))
|
| 164 |
+
) # TODO: fix this discrepancy
|
| 165 |
+
else:
|
| 166 |
+
return NamedCType(
|
| 167 |
+
binds, ConstRefCType(OptionalCType(BaseCType(tensorT)))
|
| 168 |
+
)
|
| 169 |
+
elif str(t.elem) == "Scalar":
|
| 170 |
+
return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
|
| 171 |
+
elif isinstance(t.elem, ListType) and str(t.elem.elem) == "int":
|
| 172 |
+
return NamedCType(binds, BaseCType(optionalIntArrayRefT))
|
| 173 |
+
elif isinstance(t.elem, ListType) and str(t.elem.elem) == "SymInt":
|
| 174 |
+
if symint:
|
| 175 |
+
return NamedCType(binds, BaseCType(optionalSymIntArrayRefT))
|
| 176 |
+
else:
|
| 177 |
+
return NamedCType(binds, BaseCType(optionalIntArrayRefT))
|
| 178 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds, symint=symint)
|
| 179 |
+
return NamedCType(binds, OptionalCType(elem.type))
|
| 180 |
+
elif isinstance(t, ListType):
|
| 181 |
+
# TODO: remove these special cases, ArrayRef fallthrough works fine
|
| 182 |
+
if str(t.elem) == "int":
|
| 183 |
+
if remove_non_owning_ref_types:
|
| 184 |
+
return NamedCType(binds, VectorCType(BaseCType(longT)))
|
| 185 |
+
else:
|
| 186 |
+
return NamedCType(binds, BaseCType(intArrayRefT))
|
| 187 |
+
if str(t.elem) == "SymInt":
|
| 188 |
+
if remove_non_owning_ref_types:
|
| 189 |
+
if symint:
|
| 190 |
+
return NamedCType(binds, VectorCType(BaseCType(SymIntT)))
|
| 191 |
+
else:
|
| 192 |
+
return NamedCType(binds, VectorCType(BaseCType(longT)))
|
| 193 |
+
else:
|
| 194 |
+
if symint:
|
| 195 |
+
return NamedCType(binds, BaseCType(symIntArrayRefT))
|
| 196 |
+
else:
|
| 197 |
+
return NamedCType(binds, BaseCType(intArrayRefT))
|
| 198 |
+
if str(t.elem) == "Tensor":
|
| 199 |
+
if local.use_ilistref_for_tensor_lists():
|
| 200 |
+
return NamedCType(binds, ConstRefCType(BaseCType(iTensorListRefT)))
|
| 201 |
+
else:
|
| 202 |
+
return NamedCType(binds, BaseCType(tensorListT))
|
| 203 |
+
elif str(t.elem) == "Scalar":
|
| 204 |
+
return NamedCType(binds, ArrayRefCType(BaseCType(scalarT)))
|
| 205 |
+
elif str(t.elem) == "Dimname":
|
| 206 |
+
return NamedCType(binds, BaseCType(dimnameListT))
|
| 207 |
+
elif str(t.elem) == "Tensor?":
|
| 208 |
+
return NamedCType(
|
| 209 |
+
binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))
|
| 210 |
+
)
|
| 211 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds, symint=symint)
|
| 212 |
+
return NamedCType(binds, ArrayRefCType(elem.type))
|
| 213 |
+
else:
|
| 214 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 215 |
+
|
| 216 |
+
|
| 217 |
+
# Translate a JIT argument into its C++ type
|
| 218 |
+
def argument_type(a: Argument, *, binds: ArgName, symint: bool = False) -> NamedCType:
|
| 219 |
+
return argumenttype_type(a.type, mutable=a.is_write, symint=symint, binds=binds)
|
| 220 |
+
|
| 221 |
+
|
| 222 |
+
# Translation of a (non-multi) return type from JIT to C++
|
| 223 |
+
# N.B: returntype_type returns a CType, not a NamedCType.
|
| 224 |
+
# This is mostly because of the mismatch between return types and return names.
|
| 225 |
+
# e.g. a function with a return type of 'void' has 0 return names,
|
| 226 |
+
# and a function with a return type of 'std::tuple' has >1 return name.
|
| 227 |
+
def returntype_type(t: Type, *, mutable: bool, symint: bool = False) -> CType:
|
| 228 |
+
# placeholder is ignored
|
| 229 |
+
# NB: symint is ALWAYS respected for return types. So symint argument
|
| 230 |
+
# here is IGNORED
|
| 231 |
+
r = valuetype_type(t, binds="__placeholder__", symint=True)
|
| 232 |
+
if r is not None:
|
| 233 |
+
return r.type
|
| 234 |
+
|
| 235 |
+
if isinstance(t, BaseType):
|
| 236 |
+
if t.name == BaseTy.Tensor:
|
| 237 |
+
if mutable:
|
| 238 |
+
if local.use_const_ref_for_mutable_tensors():
|
| 239 |
+
return ConstRefCType(BaseCType(tensorT))
|
| 240 |
+
else:
|
| 241 |
+
return MutRefCType(BaseCType(tensorT))
|
| 242 |
+
else:
|
| 243 |
+
# Note [Tensor Copy Returns]
|
| 244 |
+
# Currently, we use "Argument.is_write" to determine
|
| 245 |
+
# whether or not Tensor return types should be copies or references.
|
| 246 |
+
# If that ever changes, take a look at other locations of this note!
|
| 247 |
+
return BaseCType(tensorT)
|
| 248 |
+
elif t.name == BaseTy.Scalar:
|
| 249 |
+
return BaseCType(scalarT)
|
| 250 |
+
elif isinstance(t, ListType):
|
| 251 |
+
assert (
|
| 252 |
+
not mutable
|
| 253 |
+
), "Native functions should never return a mutable tensor list. They should return void."
|
| 254 |
+
elem = returntype_type(t.elem, mutable=False)
|
| 255 |
+
assert t.size is None, f"fixed size list returns not supported: {t}"
|
| 256 |
+
return VectorCType(elem)
|
| 257 |
+
elif isinstance(t, OptionalType):
|
| 258 |
+
elem = returntype_type(t.elem, mutable=mutable)
|
| 259 |
+
if str(t.elem) == "Tensor":
|
| 260 |
+
return OptionalCType(elem)
|
| 261 |
+
|
| 262 |
+
raise AssertionError(f"unrecognized return type {t}")
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
# Translation of a single return to its C++ type
|
| 266 |
+
def return_type(r: Return, *, symint: bool = False) -> CType:
|
| 267 |
+
return returntype_type(r.type, mutable=r.is_write, symint=symint)
|
| 268 |
+
|
| 269 |
+
|
| 270 |
+
# Translation of a full (possibly multi) return from JIT to its C++ type
|
| 271 |
+
def returns_type(rs: Sequence[Return], *, symint: bool = False) -> CType:
|
| 272 |
+
if len(rs) == 0:
|
| 273 |
+
return BaseCType(voidT)
|
| 274 |
+
elif len(rs) == 1:
|
| 275 |
+
return return_type(rs[0], symint=symint)
|
| 276 |
+
else:
|
| 277 |
+
return TupleCType([return_type(r, symint=symint) for r in rs])
|
| 278 |
+
|
| 279 |
+
|
| 280 |
+
def return_names(f: NativeFunction, *, fallback_name: str = "result") -> Sequence[str]:
|
| 281 |
+
returns: List[str] = []
|
| 282 |
+
for i, r in enumerate(f.func.returns):
|
| 283 |
+
# If we have an inplace function, the return argument is
|
| 284 |
+
# implicitly named self.
|
| 285 |
+
# TODO: Consider incorporating this into the data model
|
| 286 |
+
if f.func.name.name.inplace:
|
| 287 |
+
assert i == 0, "illegal inplace function with multiple returns"
|
| 288 |
+
name = "self"
|
| 289 |
+
# If we are out function, the name is the name of the
|
| 290 |
+
# corresponding output function (r.name will get recorded
|
| 291 |
+
# in field_name later.)
|
| 292 |
+
elif f.func.is_out_fn():
|
| 293 |
+
name = f.func.arguments.out[i].name
|
| 294 |
+
# If the return argument is explicitly named...
|
| 295 |
+
elif r.name:
|
| 296 |
+
name_conflict = any(
|
| 297 |
+
r.name == a.name for a in f.func.schema_order_arguments()
|
| 298 |
+
)
|
| 299 |
+
if name_conflict and not f.func.is_out_fn():
|
| 300 |
+
name = f"{r.name}_return"
|
| 301 |
+
else:
|
| 302 |
+
name = r.name
|
| 303 |
+
# If there is no explicit name and no fallback name was passed in, we just name the output result,
|
| 304 |
+
# unless it's a multi-return, in which case it's result0,
|
| 305 |
+
# result1, etc (zero-indexed)
|
| 306 |
+
else:
|
| 307 |
+
name = fallback_name if len(f.func.returns) == 1 else f"{fallback_name}{i}"
|
| 308 |
+
returns.append(name)
|
| 309 |
+
return returns
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
JIT_TO_CPP_DEFAULT = {
|
| 313 |
+
"False": "false",
|
| 314 |
+
"True": "true",
|
| 315 |
+
"None": "c10::nullopt", # UGH this one is type directed
|
| 316 |
+
"Mean": "at::Reduction::Mean",
|
| 317 |
+
"[]": "{}",
|
| 318 |
+
"contiguous_format": "MemoryFormat::Contiguous",
|
| 319 |
+
"long": "at::kLong",
|
| 320 |
+
}
|
| 321 |
+
|
| 322 |
+
|
| 323 |
+
# Convert a JIT default into C++ expression representing the default
|
| 324 |
+
def default_expr(d: str, t: Type, *, symint: bool) -> str:
|
| 325 |
+
if d == "None" and str(t) == "Tensor?":
|
| 326 |
+
return "{}"
|
| 327 |
+
if isinstance(t, BaseType) and t.name is BaseTy.str:
|
| 328 |
+
# Schema allows single quotes but C++ needs double
|
| 329 |
+
if len(d) >= 2 and d[0] == "'" and d[-1] == "'":
|
| 330 |
+
s = ""
|
| 331 |
+
i = 1
|
| 332 |
+
while i + 1 < len(d):
|
| 333 |
+
if d[i] != "\\":
|
| 334 |
+
if d[i] == '"':
|
| 335 |
+
s += '\\"'
|
| 336 |
+
else:
|
| 337 |
+
s += d[i]
|
| 338 |
+
i += 1
|
| 339 |
+
else:
|
| 340 |
+
if d[i + 1] == "'":
|
| 341 |
+
s += "'"
|
| 342 |
+
else:
|
| 343 |
+
s += d[i : i + 2]
|
| 344 |
+
i += 2
|
| 345 |
+
|
| 346 |
+
return f'"{s}"'
|
| 347 |
+
|
| 348 |
+
if isinstance(t, OptionalType):
|
| 349 |
+
if d == "None":
|
| 350 |
+
return "c10::nullopt"
|
| 351 |
+
|
| 352 |
+
return default_expr(d, t.elem, symint=symint)
|
| 353 |
+
|
| 354 |
+
if isinstance(t, ListType):
|
| 355 |
+
if d.startswith("[") and d.endswith("]"):
|
| 356 |
+
return "{" + d[1:-1] + "}"
|
| 357 |
+
elif symint and d.isdigit() and str(t.elem) == "SymInt":
|
| 358 |
+
return f"c10::SymInt({d})"
|
| 359 |
+
elif t.size is None:
|
| 360 |
+
# NOTE: Sized lists can have scalar defaults
|
| 361 |
+
raise ValueError(f"Expected a list default '[...]' but found: '{d}'")
|
| 362 |
+
|
| 363 |
+
return JIT_TO_CPP_DEFAULT.get(d, d)
|
| 364 |
+
|
| 365 |
+
|
| 366 |
+
# Convert an argument into its C++ API form
|
| 367 |
+
|
| 368 |
+
|
| 369 |
+
def argument(
|
| 370 |
+
a: Union[Argument, TensorOptionsArguments, SelfArgument],
|
| 371 |
+
*,
|
| 372 |
+
cpp_no_default_args: Set[str],
|
| 373 |
+
method: bool,
|
| 374 |
+
faithful: bool,
|
| 375 |
+
symint: bool = False,
|
| 376 |
+
has_tensor_options: bool,
|
| 377 |
+
) -> List[Binding]:
|
| 378 |
+
def sub_argument(
|
| 379 |
+
a: Union[Argument, TensorOptionsArguments, SelfArgument]
|
| 380 |
+
) -> List[Binding]:
|
| 381 |
+
return argument(
|
| 382 |
+
a,
|
| 383 |
+
cpp_no_default_args=cpp_no_default_args,
|
| 384 |
+
method=method,
|
| 385 |
+
faithful=faithful,
|
| 386 |
+
symint=symint,
|
| 387 |
+
has_tensor_options=has_tensor_options,
|
| 388 |
+
)
|
| 389 |
+
|
| 390 |
+
if isinstance(a, Argument):
|
| 391 |
+
binds: ArgName
|
| 392 |
+
if a.name == "memory_format" and has_tensor_options:
|
| 393 |
+
binds = SpecialArgName.possibly_redundant_memory_format
|
| 394 |
+
else:
|
| 395 |
+
binds = a.name
|
| 396 |
+
default: Optional[str] = None
|
| 397 |
+
if a.name not in cpp_no_default_args and a.default is not None:
|
| 398 |
+
default = default_expr(a.default, a.type, symint=symint)
|
| 399 |
+
return [
|
| 400 |
+
Binding(
|
| 401 |
+
nctype=argument_type(a, binds=binds, symint=symint),
|
| 402 |
+
name=a.name,
|
| 403 |
+
default=default,
|
| 404 |
+
argument=a,
|
| 405 |
+
)
|
| 406 |
+
]
|
| 407 |
+
elif isinstance(a, TensorOptionsArguments):
|
| 408 |
+
if faithful:
|
| 409 |
+
return (
|
| 410 |
+
sub_argument(a.dtype)
|
| 411 |
+
+ sub_argument(a.layout)
|
| 412 |
+
+ sub_argument(a.device)
|
| 413 |
+
+ sub_argument(a.pin_memory)
|
| 414 |
+
)
|
| 415 |
+
else:
|
| 416 |
+
default = None
|
| 417 |
+
# Enforced by NativeFunction.__post_init__
|
| 418 |
+
assert "options" not in cpp_no_default_args
|
| 419 |
+
if all(x.default == "None" for x in a.all()):
|
| 420 |
+
default = "{}"
|
| 421 |
+
elif a.dtype.default == "long":
|
| 422 |
+
default = "at::kLong" # TODO: this is wrong
|
| 423 |
+
return [
|
| 424 |
+
Binding(
|
| 425 |
+
nctype=NamedCType("options", BaseCType(tensorOptionsT)),
|
| 426 |
+
name="options",
|
| 427 |
+
default=default,
|
| 428 |
+
argument=a,
|
| 429 |
+
)
|
| 430 |
+
]
|
| 431 |
+
elif isinstance(a, SelfArgument):
|
| 432 |
+
if method:
|
| 433 |
+
# Caller is responsible for installing implicit this in context!
|
| 434 |
+
return []
|
| 435 |
+
else:
|
| 436 |
+
return sub_argument(a.argument)
|
| 437 |
+
else:
|
| 438 |
+
assert_never(a)
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
def arguments(
|
| 442 |
+
arguments: Arguments,
|
| 443 |
+
*,
|
| 444 |
+
faithful: bool,
|
| 445 |
+
symint: bool = False,
|
| 446 |
+
method: bool,
|
| 447 |
+
cpp_no_default_args: Set[str],
|
| 448 |
+
) -> List[Binding]:
|
| 449 |
+
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
| 450 |
+
if faithful:
|
| 451 |
+
args.extend(arguments.non_out)
|
| 452 |
+
args.extend(arguments.out)
|
| 453 |
+
else:
|
| 454 |
+
args.extend(arguments.out)
|
| 455 |
+
args.extend(arguments.non_out)
|
| 456 |
+
return [
|
| 457 |
+
r.no_default() if faithful else r
|
| 458 |
+
for a in args
|
| 459 |
+
for r in argument(
|
| 460 |
+
a,
|
| 461 |
+
faithful=faithful,
|
| 462 |
+
symint=symint,
|
| 463 |
+
method=method,
|
| 464 |
+
has_tensor_options=arguments.tensor_options is not None,
|
| 465 |
+
cpp_no_default_args=cpp_no_default_args,
|
| 466 |
+
)
|
| 467 |
+
]
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/dispatcher.py
ADDED
|
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import itertools
|
| 2 |
+
from typing import List, Sequence, Union
|
| 3 |
+
|
| 4 |
+
from torchgen.api import cpp
|
| 5 |
+
|
| 6 |
+
from torchgen.api.types import ArgName, Binding, CType, NamedCType
|
| 7 |
+
from torchgen.model import (
|
| 8 |
+
Argument,
|
| 9 |
+
FunctionSchema,
|
| 10 |
+
Return,
|
| 11 |
+
SelfArgument,
|
| 12 |
+
TensorOptionsArguments,
|
| 13 |
+
Type,
|
| 14 |
+
)
|
| 15 |
+
from torchgen.utils import assert_never, concatMap
|
| 16 |
+
|
| 17 |
+
# This file describes the translation of JIT schema to the dispatcher
|
| 18 |
+
# API, the *unboxed* calling convention by which invocations through
|
| 19 |
+
# the dispatcher are made. Historically, the dispatcher API matched
|
| 20 |
+
# the C++ API, but with the establishment of the boxed API, we've
|
| 21 |
+
# made changes to the dispatcher API to so that the unboxed API
|
| 22 |
+
# better aligns with the boxed API. The dispatcher API hooks heavily
|
| 23 |
+
# into our template based boxing/unboxing machinery, so changes
|
| 24 |
+
# to this convention will usually need template updates too.
|
| 25 |
+
#
|
| 26 |
+
# Prominent characteristics of the dispatcher API:
|
| 27 |
+
#
|
| 28 |
+
# - dtype, layout, device and pin_memory are represented as separate
|
| 29 |
+
# arguments.
|
| 30 |
+
#
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def name(func: FunctionSchema) -> str:
|
| 34 |
+
return cpp.name(func)
|
| 35 |
+
|
| 36 |
+
|
| 37 |
+
def argumenttype_type(
|
| 38 |
+
t: Type,
|
| 39 |
+
*,
|
| 40 |
+
mutable: bool,
|
| 41 |
+
binds: ArgName,
|
| 42 |
+
remove_non_owning_ref_types: bool = False,
|
| 43 |
+
symint: bool = True,
|
| 44 |
+
) -> NamedCType:
|
| 45 |
+
# This is a faux amis. If it makes sense in the future to add
|
| 46 |
+
# more special cases here, or invert things so cpp.argument_type
|
| 47 |
+
# calls this, or just completely inline the function, please do
|
| 48 |
+
# it.
|
| 49 |
+
return cpp.argumenttype_type(
|
| 50 |
+
t,
|
| 51 |
+
mutable=mutable,
|
| 52 |
+
binds=binds,
|
| 53 |
+
symint=symint,
|
| 54 |
+
remove_non_owning_ref_types=remove_non_owning_ref_types,
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def argument_type(
|
| 59 |
+
a: Argument,
|
| 60 |
+
*,
|
| 61 |
+
binds: ArgName,
|
| 62 |
+
remove_non_owning_ref_types: bool = False,
|
| 63 |
+
symint: bool = True,
|
| 64 |
+
) -> NamedCType:
|
| 65 |
+
return argumenttype_type(
|
| 66 |
+
a.type,
|
| 67 |
+
mutable=a.is_write,
|
| 68 |
+
binds=binds,
|
| 69 |
+
remove_non_owning_ref_types=remove_non_owning_ref_types,
|
| 70 |
+
symint=symint,
|
| 71 |
+
)
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def returns_type(rs: Sequence[Return], *, symint: bool = True) -> CType:
|
| 75 |
+
# At present, there is no difference. But there could be!
|
| 76 |
+
return cpp.returns_type(rs, symint=symint)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def jit_arguments(func: FunctionSchema) -> List[Argument]:
|
| 80 |
+
def to_argument(
|
| 81 |
+
a: Union[Argument, TensorOptionsArguments, SelfArgument]
|
| 82 |
+
) -> List[Argument]:
|
| 83 |
+
if isinstance(a, Argument):
|
| 84 |
+
return [a]
|
| 85 |
+
elif isinstance(a, SelfArgument):
|
| 86 |
+
return [a.argument]
|
| 87 |
+
elif isinstance(a, TensorOptionsArguments):
|
| 88 |
+
return [a.dtype, a.layout, a.device, a.pin_memory]
|
| 89 |
+
else:
|
| 90 |
+
assert_never(a)
|
| 91 |
+
|
| 92 |
+
return list(
|
| 93 |
+
concatMap(
|
| 94 |
+
to_argument,
|
| 95 |
+
itertools.chain(
|
| 96 |
+
func.arguments.positional, func.arguments.kwarg_only, func.arguments.out
|
| 97 |
+
),
|
| 98 |
+
)
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
def argument(
|
| 103 |
+
a: Argument, *, remove_non_owning_ref_types: bool = False, symint: bool = True
|
| 104 |
+
) -> Binding:
|
| 105 |
+
return Binding(
|
| 106 |
+
nctype=argument_type(
|
| 107 |
+
a,
|
| 108 |
+
binds=a.name,
|
| 109 |
+
remove_non_owning_ref_types=remove_non_owning_ref_types,
|
| 110 |
+
symint=symint,
|
| 111 |
+
),
|
| 112 |
+
name=a.name,
|
| 113 |
+
argument=a,
|
| 114 |
+
)
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
def arguments(func: FunctionSchema, *, symint: bool = True) -> List[Binding]:
|
| 118 |
+
return [argument(a, symint=symint) for a in jit_arguments(func)]
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/lazy.py
ADDED
|
@@ -0,0 +1,464 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Any, Dict, List, Optional, Tuple, Union
|
| 2 |
+
|
| 3 |
+
from torchgen.api.types import (
|
| 4 |
+
BaseCppType,
|
| 5 |
+
BaseCType,
|
| 6 |
+
boolT,
|
| 7 |
+
CType,
|
| 8 |
+
deviceT,
|
| 9 |
+
doubleT,
|
| 10 |
+
generatorT,
|
| 11 |
+
layoutT,
|
| 12 |
+
ListCType,
|
| 13 |
+
longT,
|
| 14 |
+
memoryFormatT,
|
| 15 |
+
NamedCType,
|
| 16 |
+
OptionalCType,
|
| 17 |
+
scalarT,
|
| 18 |
+
scalarTypeT,
|
| 19 |
+
stringT,
|
| 20 |
+
SymIntT,
|
| 21 |
+
VectorCType,
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
from torchgen.model import (
|
| 25 |
+
Argument,
|
| 26 |
+
BaseTy,
|
| 27 |
+
BaseType,
|
| 28 |
+
FunctionSchema,
|
| 29 |
+
ListType,
|
| 30 |
+
OperatorName,
|
| 31 |
+
OptionalType,
|
| 32 |
+
Return,
|
| 33 |
+
TensorOptionsArguments,
|
| 34 |
+
Type,
|
| 35 |
+
)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
_valueT: Optional[BaseCppType] = None
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# A ValueT is an IR type which represents the computation of a Tensor. In other
|
| 42 |
+
# words, a PyTorch user will do operations on lazy tensors, and each output lazy
|
| 43 |
+
# tensor internally tracks a ValueT representing the IR node that would have
|
| 44 |
+
# actually produced the value of this tensor for real.
|
| 45 |
+
#
|
| 46 |
+
# This is configurable because different lazy tensor backends (LTC vs XLA) will
|
| 47 |
+
# have different IR representations. (Though, arguably, after unification they
|
| 48 |
+
# shouldn't!)
|
| 49 |
+
def getValueT() -> BaseCppType:
|
| 50 |
+
global _valueT
|
| 51 |
+
if not _valueT:
|
| 52 |
+
raise NotImplementedError(
|
| 53 |
+
"The value type needs to be set with setValueT() in run_gen_lazy_tensor()"
|
| 54 |
+
)
|
| 55 |
+
|
| 56 |
+
return _valueT
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def setValueT(val: BaseCppType) -> None:
|
| 60 |
+
global _valueT
|
| 61 |
+
_valueT = val
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# this is a bad hack. I need to refactor the data model to represent each arg in the schema as an object,
|
| 65 |
+
# making it easier to represent special properties of an arg.
|
| 66 |
+
tensorListValueT = BaseCppType("torch::lazy", "Value")
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def process_ir_type(
|
| 70 |
+
typ: Type, properties: "LazyIrProperties", *, symint: bool
|
| 71 |
+
) -> Union[BaseCType, VectorCType, OptionalCType, ListCType]:
|
| 72 |
+
"""
|
| 73 |
+
This function takes a type from NativeFunctions and converts it for use with
|
| 74 |
+
lazy tensor codegen.
|
| 75 |
+
|
| 76 |
+
Type conversion for lazy currently consists of
|
| 77 |
+
(1) changing at::Tensors into lazy::Values
|
| 78 |
+
(2) wrapping everything in a BaseCType
|
| 79 |
+
(3) making cpp-reference types into cpp-value types (e.g. vector instead of IntArrayRef)
|
| 80 |
+
|
| 81 |
+
(1) converts at::Tensors to lazy::Values (which wrap lazy::Nodes, with which Lazy IR represents tensors.)
|
| 82 |
+
There is special handling for Optional[Tensor] or List[Tensor], etc- hence 'tensor-like'
|
| 83 |
+
|
| 84 |
+
This is incomplete- there are assertions in places that it's expected to need to add
|
| 85 |
+
more types as the codegen is used with more operators.
|
| 86 |
+
"""
|
| 87 |
+
if isinstance(typ, BaseType):
|
| 88 |
+
if typ.name == BaseTy.Tensor:
|
| 89 |
+
return BaseCType(getValueT())
|
| 90 |
+
elif typ.name == BaseTy.Scalar:
|
| 91 |
+
if properties.TreatScalarsAsConstants:
|
| 92 |
+
return BaseCType(scalarT)
|
| 93 |
+
# at::scalar has special handling,
|
| 94 |
+
# and is wrapped in an lazy::Value just like at::tensor
|
| 95 |
+
return BaseCType(getValueT())
|
| 96 |
+
elif typ.name == BaseTy.ScalarType:
|
| 97 |
+
return BaseCType(scalarTypeT)
|
| 98 |
+
elif typ.name == BaseTy.int:
|
| 99 |
+
return BaseCType(longT)
|
| 100 |
+
elif typ.name == BaseTy.SymInt:
|
| 101 |
+
if symint:
|
| 102 |
+
return BaseCType(getValueT())
|
| 103 |
+
else:
|
| 104 |
+
return BaseCType(longT)
|
| 105 |
+
elif typ.name == BaseTy.bool:
|
| 106 |
+
return BaseCType(boolT)
|
| 107 |
+
elif typ.name == BaseTy.float:
|
| 108 |
+
return BaseCType(doubleT)
|
| 109 |
+
elif typ.name == BaseTy.str:
|
| 110 |
+
return BaseCType(stringT)
|
| 111 |
+
elif typ.name == BaseTy.Device:
|
| 112 |
+
return BaseCType(deviceT)
|
| 113 |
+
elif typ.name == BaseTy.Generator:
|
| 114 |
+
return BaseCType(generatorT)
|
| 115 |
+
elif typ.name == BaseTy.Layout:
|
| 116 |
+
return BaseCType(layoutT)
|
| 117 |
+
elif typ.name == BaseTy.MemoryFormat:
|
| 118 |
+
return BaseCType(memoryFormatT)
|
| 119 |
+
else:
|
| 120 |
+
raise AssertionError(f"TODO add support for type {repr(typ)}")
|
| 121 |
+
elif isinstance(typ, OptionalType):
|
| 122 |
+
return OptionalCType(process_ir_type(typ.elem, properties, symint=symint))
|
| 123 |
+
elif isinstance(typ, ListType):
|
| 124 |
+
if str(typ.elem) == "Tensor?":
|
| 125 |
+
# TODO(whc) is this actually correct? or should it use a Vector like above
|
| 126 |
+
return ListCType(OptionalCType(BaseCType(getValueT())))
|
| 127 |
+
elif str(typ.elem) == "Tensor":
|
| 128 |
+
# this is a TensorList which comes in from GetTensorList as a Value
|
| 129 |
+
return BaseCType(tensorListValueT)
|
| 130 |
+
elif typ.elem == BaseType(BaseTy.SymInt):
|
| 131 |
+
# TODO: return a value type. The problem here is analogous to
|
| 132 |
+
# the problem with tensorListValueT: if you have SymInt[] you
|
| 133 |
+
# cannot conveniently save the list of Value directly, as nodes
|
| 134 |
+
# expect to save values as a vector for ALL arguments. So you
|
| 135 |
+
# need a separate IR node that represents all of the size nodes
|
| 136 |
+
# assembled into a list. I'm not an LTC dev so I don't want to
|
| 137 |
+
# figure it out right now. Y'all figure it out...
|
| 138 |
+
return VectorCType(BaseCType(longT))
|
| 139 |
+
|
| 140 |
+
else:
|
| 141 |
+
return VectorCType(process_ir_type(typ.elem, properties, symint=symint))
|
| 142 |
+
else:
|
| 143 |
+
raise AssertionError(f"unrecognized type {repr(typ)}")
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
# TODO: Determining this based off of CType is bad; this should be computed
|
| 147 |
+
# from Type directly; then the same logic as process_ir_type can be used
|
| 148 |
+
#
|
| 149 |
+
# Invariant: passed typ should be an *owning* CType (e.g., we will report
|
| 150 |
+
# that ArrayRef<Value> is NOT a value type)
|
| 151 |
+
def isValueType(typ: CType, properties: "Optional[LazyIrProperties]" = None) -> bool:
|
| 152 |
+
"""
|
| 153 |
+
Given a type, determine if it is a Value-like type. This is equivalent to
|
| 154 |
+
being Tensor-like, but assumes the type has already been transformed.
|
| 155 |
+
"""
|
| 156 |
+
if isinstance(typ, BaseCType):
|
| 157 |
+
# I am regretting my naming conventions, but now we are wrapping at::scalar in
|
| 158 |
+
# lazy value, while preserving other 'scalar' types as scalars in the IR
|
| 159 |
+
treat_scalars_as_constants = properties and properties.TreatScalarsAsConstants
|
| 160 |
+
return (
|
| 161 |
+
typ.type == getValueT()
|
| 162 |
+
or (typ.type == scalarT and not treat_scalars_as_constants)
|
| 163 |
+
or typ.type == SymIntT
|
| 164 |
+
)
|
| 165 |
+
elif typ == VectorCType(BaseCType(SymIntT)):
|
| 166 |
+
# TODO: report True for this
|
| 167 |
+
return False
|
| 168 |
+
elif isinstance(typ, (OptionalCType, ListCType, VectorCType)):
|
| 169 |
+
return isValueType(typ.elem, properties)
|
| 170 |
+
return False
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
def isSymIntType(typ: Type) -> bool:
|
| 174 |
+
return isinstance(typ, BaseType) and typ.name == BaseTy.SymInt
|
| 175 |
+
|
| 176 |
+
|
| 177 |
+
def isWrappedScalarType(typ: Type) -> bool:
|
| 178 |
+
"""
|
| 179 |
+
Given a type, determine if it is a c10::scalar which we will wrap in a lazy Value.
|
| 180 |
+
Since we literally change the type from scalarT to valueT, information is lost.
|
| 181 |
+
This function helps build a list of wrapped scalars to save that information
|
| 182 |
+
"""
|
| 183 |
+
if isinstance(typ, BaseType):
|
| 184 |
+
# I am regretting my naming conventions, but now we are wrapping at::scalar in
|
| 185 |
+
# lazy value, while preserving other 'scalar' types as scalars in the IR
|
| 186 |
+
return typ.name == BaseTy.Scalar
|
| 187 |
+
elif isinstance(typ, (OptionalType, ListType)):
|
| 188 |
+
return isWrappedScalarType(typ.elem)
|
| 189 |
+
return False
|
| 190 |
+
|
| 191 |
+
|
| 192 |
+
# TODO: dedupe with Type.is_generator_like
|
| 193 |
+
def isGeneratorType(typ: Type) -> bool:
|
| 194 |
+
if isinstance(typ, BaseType):
|
| 195 |
+
return typ.name == BaseTy.Generator
|
| 196 |
+
elif isinstance(typ, (OptionalType)):
|
| 197 |
+
return isGeneratorType(typ.elem)
|
| 198 |
+
return False
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
# This class caches a few derived properties computed from an Argument
|
| 202 |
+
# and LazyIrProperties
|
| 203 |
+
class LazyArgument:
|
| 204 |
+
name: str
|
| 205 |
+
orig_type: Type
|
| 206 |
+
lazy_type_: Optional[CType]
|
| 207 |
+
is_wrapped_scalar: bool
|
| 208 |
+
is_generator: bool
|
| 209 |
+
# TODO: this is lies, it is false for symint list
|
| 210 |
+
is_symint_or_list: bool
|
| 211 |
+
|
| 212 |
+
# Whether or not we are treating this as symint or not
|
| 213 |
+
symint: bool
|
| 214 |
+
|
| 215 |
+
# true if this argument is or contains a lazy IR value
|
| 216 |
+
is_lazy_value: bool
|
| 217 |
+
|
| 218 |
+
def __init__(self, arg: Argument, properties: "LazyIrProperties", *, symint: bool):
|
| 219 |
+
self.name = arg.name
|
| 220 |
+
self.orig_type = arg.type
|
| 221 |
+
self.symint = symint
|
| 222 |
+
self.is_optional = isinstance(arg.type, OptionalType)
|
| 223 |
+
self.is_generator = isGeneratorType(arg.type)
|
| 224 |
+
self.lazy_type_ = process_ir_type(arg.type, properties, symint=symint)
|
| 225 |
+
self.is_wrapped_scalar = isWrappedScalarType(arg.type)
|
| 226 |
+
self.is_symint_or_list = symint and (
|
| 227 |
+
isSymIntType(arg.type)
|
| 228 |
+
or (isinstance(arg.type, OptionalType) and isSymIntType(arg.type.elem))
|
| 229 |
+
# TODO: lists of symints are not currently treated as value types
|
| 230 |
+
# or (isinstance(arg.type, ListType) and isSymIntType(arg.type.elem))
|
| 231 |
+
)
|
| 232 |
+
|
| 233 |
+
self.is_lazy_value = isValueType(self.lazy_type, properties)
|
| 234 |
+
|
| 235 |
+
@property
|
| 236 |
+
def lazy_type(self) -> CType:
|
| 237 |
+
assert (
|
| 238 |
+
self.lazy_type_ is not None
|
| 239 |
+
), f"Attempted to access lazy_type for invalid argument {self.name}"
|
| 240 |
+
return self.lazy_type_
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class LazyIrProperties:
|
| 244 |
+
"""Collection of properties for an IR node
|
| 245 |
+
|
| 246 |
+
The property groups are listed below. Each group is mutually
|
| 247 |
+
exclusive, meaning that only one property from each group can be True
|
| 248 |
+
at any one time. The properties can be accessed as if they were normal
|
| 249 |
+
attributes. The mutual exclusivity is automatically handled.
|
| 250 |
+
"""
|
| 251 |
+
|
| 252 |
+
Properties: Tuple[Tuple[str, ...], ...] = (
|
| 253 |
+
(
|
| 254 |
+
"ShapePrecompute", # Assume shape has been precomputed
|
| 255 |
+
"ShapeCompute", # Need to compute the shape on construction
|
| 256 |
+
"ShapeCache", # Utilize the shape cache to defer computation
|
| 257 |
+
),
|
| 258 |
+
(
|
| 259 |
+
"Lower", # Codegen full lower function
|
| 260 |
+
"LowerDeclOnly", # Codegen only lower function declaration
|
| 261 |
+
),
|
| 262 |
+
(
|
| 263 |
+
"CanBeReused", # Codegen full reuse function
|
| 264 |
+
"CanBeReusedDeclOnly", # Codegen only reuse function declaration
|
| 265 |
+
),
|
| 266 |
+
(
|
| 267 |
+
"CreateFn", # Codegen full create function
|
| 268 |
+
"CreateFnDeclOnly", # Codegen only create function declaration
|
| 269 |
+
),
|
| 270 |
+
(
|
| 271 |
+
"TreatScalarsAsConstants", # Treat Scalars as constants instead of handling like values
|
| 272 |
+
),
|
| 273 |
+
)
|
| 274 |
+
|
| 275 |
+
def __init__(self, *default_properties: str):
|
| 276 |
+
properties: Dict[Tuple[str, ...], Optional[str]] = dict.fromkeys(
|
| 277 |
+
LazyIrProperties.Properties
|
| 278 |
+
)
|
| 279 |
+
self.__dict__["properties"] = properties
|
| 280 |
+
for p in default_properties:
|
| 281 |
+
setattr(self, p, True)
|
| 282 |
+
|
| 283 |
+
def __getattr__(self, key: str) -> Any:
|
| 284 |
+
properties = self.__dict__["properties"]
|
| 285 |
+
for values in LazyIrProperties.Properties:
|
| 286 |
+
if key in values:
|
| 287 |
+
return properties[values] == key
|
| 288 |
+
|
| 289 |
+
return self.__getattribute__(key)
|
| 290 |
+
|
| 291 |
+
def __setattr__(self, key: str, value: Any) -> Any:
|
| 292 |
+
properties = self.__dict__["properties"]
|
| 293 |
+
for values in LazyIrProperties.Properties:
|
| 294 |
+
if key in values:
|
| 295 |
+
properties[values] = key if value else None
|
| 296 |
+
return value
|
| 297 |
+
|
| 298 |
+
raise KeyError(f"Invalid property: {key}")
|
| 299 |
+
|
| 300 |
+
|
| 301 |
+
# Inspired by a FunctionSchema object, a LazyIrSchema holds the schema of a Lazy IR node.
|
| 302 |
+
# Unlike a FunctionSchema, it has no round-trippable string form (relating to the YAML),
|
| 303 |
+
# but carries type information from a native FunctionSchema modified for use with IR nodes,
|
| 304 |
+
# and preserving original argument names.
|
| 305 |
+
#
|
| 306 |
+
# TODO: This is not idiomatic with how other torchgen APIs transform on schema.
|
| 307 |
+
class LazyIrSchema:
|
| 308 |
+
# The name of the operator this function schema describes.
|
| 309 |
+
name: "OperatorName"
|
| 310 |
+
|
| 311 |
+
positional_args: Tuple[LazyArgument, ...]
|
| 312 |
+
keyword_args: Tuple[LazyArgument, ...]
|
| 313 |
+
|
| 314 |
+
# TODO: Need to handle collisions with argument names at some point
|
| 315 |
+
returns: Tuple["Return", ...]
|
| 316 |
+
|
| 317 |
+
# if this schema has a Generator arg, list its orig ctype/name but don't
|
| 318 |
+
# build a LazyArgument since lazy IR doesn't support it
|
| 319 |
+
generator_arg: Optional[NamedCType] = None
|
| 320 |
+
|
| 321 |
+
# original function schema
|
| 322 |
+
func: FunctionSchema
|
| 323 |
+
|
| 324 |
+
# Whether or not we are code-genning for SymInt or not
|
| 325 |
+
symint: bool
|
| 326 |
+
|
| 327 |
+
properties: LazyIrProperties = LazyIrProperties(
|
| 328 |
+
# default properties
|
| 329 |
+
"ShapePrecompute",
|
| 330 |
+
"Lower",
|
| 331 |
+
"CanBeReused",
|
| 332 |
+
)
|
| 333 |
+
opkind: Optional[str] = None
|
| 334 |
+
|
| 335 |
+
def __init__(
|
| 336 |
+
self,
|
| 337 |
+
func: FunctionSchema,
|
| 338 |
+
properties: Optional[LazyIrProperties] = None,
|
| 339 |
+
*,
|
| 340 |
+
symint: bool,
|
| 341 |
+
):
|
| 342 |
+
if properties:
|
| 343 |
+
self.properties = properties
|
| 344 |
+
|
| 345 |
+
self.func = func
|
| 346 |
+
self.symint = symint
|
| 347 |
+
positional_args: List[LazyArgument] = []
|
| 348 |
+
for arg_field in ["pre_self_positional", "self_arg", "post_self_positional"]:
|
| 349 |
+
if arg_field == "self_arg" and func.arguments.self_arg is not None:
|
| 350 |
+
arg = func.arguments.self_arg.argument
|
| 351 |
+
positional_args.append(
|
| 352 |
+
LazyArgument(arg, self.properties, symint=symint)
|
| 353 |
+
)
|
| 354 |
+
elif getattr(func.arguments, arg_field) is not None:
|
| 355 |
+
positional_args.extend(
|
| 356 |
+
LazyArgument(arg, self.properties, symint=symint)
|
| 357 |
+
for arg in getattr(func.arguments, arg_field)
|
| 358 |
+
)
|
| 359 |
+
self.positional_args = tuple(positional_args)
|
| 360 |
+
|
| 361 |
+
keyword_args: List[LazyArgument] = []
|
| 362 |
+
for arg_field in [
|
| 363 |
+
"pre_tensor_options_kwarg_only",
|
| 364 |
+
"tensor_options",
|
| 365 |
+
"post_tensor_options_kwarg_only",
|
| 366 |
+
"out",
|
| 367 |
+
]:
|
| 368 |
+
curr_args = getattr(func.arguments, arg_field)
|
| 369 |
+
if curr_args is not None:
|
| 370 |
+
if isinstance(curr_args, TensorOptionsArguments):
|
| 371 |
+
curr_args = curr_args.all()
|
| 372 |
+
for arg in curr_args:
|
| 373 |
+
if isGeneratorType(arg.type):
|
| 374 |
+
assert (
|
| 375 |
+
self.generator_arg is None
|
| 376 |
+
), "We expect there is only one generator arg"
|
| 377 |
+
self.generator_arg = NamedCType(
|
| 378 |
+
arg.name, arg.type # type:ignore[arg-type]
|
| 379 |
+
)
|
| 380 |
+
keyword_args.extend(
|
| 381 |
+
LazyArgument(arg, self.properties, symint=symint)
|
| 382 |
+
for arg in curr_args
|
| 383 |
+
)
|
| 384 |
+
self.keyword_args = tuple(keyword_args)
|
| 385 |
+
self.name = func.name
|
| 386 |
+
self.returns = func.returns
|
| 387 |
+
|
| 388 |
+
@property
|
| 389 |
+
def node_name(self) -> str:
|
| 390 |
+
"""
|
| 391 |
+
Return camel-case version of op in node.
|
| 392 |
+
|
| 393 |
+
Note: This function also appends any `overload_name` in the operation.
|
| 394 |
+
For example, if the op is `bitwise_and.Tensor`, the returned name
|
| 395 |
+
will be `BitwiseAndTensor`.
|
| 396 |
+
"""
|
| 397 |
+
op_name = f"{self.name.name}_{self.name.overload_name}".lower()
|
| 398 |
+
return "".join(word.capitalize() or "" for word in op_name.split("_"))
|
| 399 |
+
|
| 400 |
+
@property
|
| 401 |
+
def aten_name(self) -> str:
|
| 402 |
+
return str(self.name.name)
|
| 403 |
+
|
| 404 |
+
@property
|
| 405 |
+
def base_name(self) -> str:
|
| 406 |
+
return f"{self.name.name.base}"
|
| 407 |
+
|
| 408 |
+
def filtered_args(
|
| 409 |
+
self,
|
| 410 |
+
positional: bool = True,
|
| 411 |
+
keyword: bool = True,
|
| 412 |
+
values: bool = True,
|
| 413 |
+
scalars: bool = True,
|
| 414 |
+
generator: bool = True,
|
| 415 |
+
) -> List[LazyArgument]:
|
| 416 |
+
# This function maintains the sorted order of arguments but provides different filtered views.
|
| 417 |
+
# Some parts of the code care about kwargs vs args (TS lowerings),
|
| 418 |
+
# other parts care about whether they need to wrap the arg in a lazy value or leave it alone.
|
| 419 |
+
# Generators are special cased, as they are needed for fallback/shape-inference but not supported
|
| 420 |
+
# in TS lowerings and therefore also omitted from lazy IR.
|
| 421 |
+
args: List[LazyArgument] = []
|
| 422 |
+
if positional:
|
| 423 |
+
args.extend(self.positional_args)
|
| 424 |
+
if keyword:
|
| 425 |
+
args.extend(self.keyword_args)
|
| 426 |
+
|
| 427 |
+
if values and scalars and generator:
|
| 428 |
+
return args
|
| 429 |
+
elif values and scalars:
|
| 430 |
+
return [a for a in args if not a.is_generator]
|
| 431 |
+
elif values:
|
| 432 |
+
return [a for a in args if a.is_lazy_value]
|
| 433 |
+
elif scalars:
|
| 434 |
+
return [
|
| 435 |
+
a
|
| 436 |
+
for a in args
|
| 437 |
+
if not a.is_lazy_value and (generator or not a.is_generator)
|
| 438 |
+
]
|
| 439 |
+
|
| 440 |
+
return []
|
| 441 |
+
|
| 442 |
+
@property
|
| 443 |
+
def positional_values(self) -> List[LazyArgument]:
|
| 444 |
+
return self.filtered_args(
|
| 445 |
+
positional=True, keyword=False, values=True, scalars=False
|
| 446 |
+
)
|
| 447 |
+
|
| 448 |
+
@property
|
| 449 |
+
def positional_scalars(self) -> List[LazyArgument]:
|
| 450 |
+
return self.filtered_args(
|
| 451 |
+
positional=True, keyword=False, values=False, scalars=True
|
| 452 |
+
)
|
| 453 |
+
|
| 454 |
+
@property
|
| 455 |
+
def keyword_values(self) -> List[LazyArgument]:
|
| 456 |
+
return self.filtered_args(
|
| 457 |
+
positional=False, keyword=True, values=True, scalars=False
|
| 458 |
+
)
|
| 459 |
+
|
| 460 |
+
@property
|
| 461 |
+
def keyword_scalars(self) -> List[LazyArgument]:
|
| 462 |
+
return self.filtered_args(
|
| 463 |
+
positional=False, keyword=True, values=False, scalars=True
|
| 464 |
+
)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/meta.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from torchgen.model import NativeFunctionsGroup
|
| 2 |
+
|
| 3 |
+
# Follows dispatcher calling convention, but:
|
| 4 |
+
# - Mutable arguments not allowed. Meta functions are always
|
| 5 |
+
# written in functional form. Look at FunctionSchema.signature()
|
| 6 |
+
# - No tensor returns; instead we return a TensorMeta describing
|
| 7 |
+
# the tensor in question
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def name(g: NativeFunctionsGroup) -> str:
|
| 11 |
+
# use the overload name from the functional version
|
| 12 |
+
return str(g.functional.func.name).replace(".", "_")
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/native.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional, Sequence, Union
|
| 2 |
+
|
| 3 |
+
from torchgen import local
|
| 4 |
+
from torchgen.api import cpp
|
| 5 |
+
|
| 6 |
+
from torchgen.api.types import (
|
| 7 |
+
ArgName,
|
| 8 |
+
BaseCType,
|
| 9 |
+
Binding,
|
| 10 |
+
boolT,
|
| 11 |
+
ConstRefCType,
|
| 12 |
+
CType,
|
| 13 |
+
deviceT,
|
| 14 |
+
layoutT,
|
| 15 |
+
ListCType,
|
| 16 |
+
MutRefCType,
|
| 17 |
+
NamedCType,
|
| 18 |
+
OptionalCType,
|
| 19 |
+
scalarT,
|
| 20 |
+
scalarTypeT,
|
| 21 |
+
tensorT,
|
| 22 |
+
)
|
| 23 |
+
from torchgen.model import (
|
| 24 |
+
Argument,
|
| 25 |
+
FunctionSchema,
|
| 26 |
+
Return,
|
| 27 |
+
SelfArgument,
|
| 28 |
+
TensorOptionsArguments,
|
| 29 |
+
Type,
|
| 30 |
+
)
|
| 31 |
+
from torchgen.utils import assert_never
|
| 32 |
+
|
| 33 |
+
# This file describes the translation of JIT schema to the native functions API.
|
| 34 |
+
# This looks a lot like the C++ API (which makes historical sense, because the
|
| 35 |
+
# idea was you wrote native functions to implement functions in the C++ API),
|
| 36 |
+
# but over time we have evolved the C++ API without actually changing our
|
| 37 |
+
# native:: kernels. The intention is to make native API and dispatcher API
|
| 38 |
+
# line up as closely as possible, since this results in the least overhead
|
| 39 |
+
# (no translation is needed from dispatcher API to native API).
|
| 40 |
+
#
|
| 41 |
+
# NB: this is symint aware, you will get the non-SymInt variant for some
|
| 42 |
+
# dispatch entries and SymInt for others.
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
def name(func: FunctionSchema) -> str:
|
| 46 |
+
name = str(func.name.name)
|
| 47 |
+
# TODO: delete this!
|
| 48 |
+
if func.is_out_fn():
|
| 49 |
+
name += "_out"
|
| 50 |
+
if func.name.overload_name:
|
| 51 |
+
name += f"_{func.name.overload_name}"
|
| 52 |
+
return name
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def argumenttype_type(
|
| 56 |
+
t: Type, *, mutable: bool, binds: ArgName, symint: bool
|
| 57 |
+
) -> NamedCType:
|
| 58 |
+
if str(t) == "Tensor?":
|
| 59 |
+
tensor_type: OptionalCType = OptionalCType(BaseCType(tensorT))
|
| 60 |
+
if mutable and not local.use_const_ref_for_mutable_tensors():
|
| 61 |
+
return NamedCType(binds, MutRefCType(tensor_type))
|
| 62 |
+
else:
|
| 63 |
+
return NamedCType(binds, ConstRefCType(tensor_type))
|
| 64 |
+
elif str(t) == "Tensor?[]":
|
| 65 |
+
return NamedCType(
|
| 66 |
+
binds, ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT))))
|
| 67 |
+
)
|
| 68 |
+
elif str(t) == "Scalar":
|
| 69 |
+
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
|
| 70 |
+
elif str(t) == "Scalar?":
|
| 71 |
+
return NamedCType(binds, ConstRefCType(OptionalCType(BaseCType(scalarT))))
|
| 72 |
+
return cpp.argumenttype_type(t, mutable=mutable, binds=binds, symint=symint)
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
def returns_type(rs: Sequence[Return], *, symint: bool) -> CType:
|
| 76 |
+
return cpp.returns_type(rs, symint=symint)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
def argument_type(a: Argument, *, binds: ArgName, symint: bool) -> NamedCType:
|
| 80 |
+
return argumenttype_type(a.type, mutable=a.is_write, binds=binds, symint=symint)
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
def argument(
|
| 84 |
+
a: Union[Argument, SelfArgument, TensorOptionsArguments],
|
| 85 |
+
*,
|
| 86 |
+
is_out: bool,
|
| 87 |
+
symint: bool,
|
| 88 |
+
) -> List[Binding]:
|
| 89 |
+
# Ideally, we NEVER default native functions. However, there are a number
|
| 90 |
+
# of functions that call native:: directly and rely on the defaulting
|
| 91 |
+
# existing. So for BC, we generate defaults for non-out variants (but not
|
| 92 |
+
# for out variants, where it is impossible to generate an appropriate
|
| 93 |
+
# default)
|
| 94 |
+
should_default = not is_out
|
| 95 |
+
if isinstance(a, Argument):
|
| 96 |
+
default: Optional[str] = None
|
| 97 |
+
if should_default and a.default is not None:
|
| 98 |
+
default = cpp.default_expr(a.default, a.type, symint=symint)
|
| 99 |
+
return [
|
| 100 |
+
Binding(
|
| 101 |
+
nctype=argument_type(a, binds=a.name, symint=symint),
|
| 102 |
+
name=a.name,
|
| 103 |
+
default=default,
|
| 104 |
+
argument=a,
|
| 105 |
+
)
|
| 106 |
+
]
|
| 107 |
+
elif isinstance(a, SelfArgument):
|
| 108 |
+
# Erase SelfArgument from the distinction
|
| 109 |
+
return argument(a.argument, is_out=is_out, symint=symint)
|
| 110 |
+
elif isinstance(a, TensorOptionsArguments):
|
| 111 |
+
default = None
|
| 112 |
+
if should_default:
|
| 113 |
+
default = "{}"
|
| 114 |
+
# TODO: Not sure why the arguments assigned here are for
|
| 115 |
+
# TensorOptionsArguments and not the constituent pieces. It seems
|
| 116 |
+
# to matter
|
| 117 |
+
return [
|
| 118 |
+
Binding(
|
| 119 |
+
nctype=NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))),
|
| 120 |
+
name="dtype",
|
| 121 |
+
default=default,
|
| 122 |
+
argument=a,
|
| 123 |
+
),
|
| 124 |
+
Binding(
|
| 125 |
+
nctype=NamedCType("layout", OptionalCType(BaseCType(layoutT))),
|
| 126 |
+
name="layout",
|
| 127 |
+
default=default,
|
| 128 |
+
argument=a,
|
| 129 |
+
),
|
| 130 |
+
Binding(
|
| 131 |
+
nctype=NamedCType("device", OptionalCType(BaseCType(deviceT))),
|
| 132 |
+
name="device",
|
| 133 |
+
default=default,
|
| 134 |
+
argument=a,
|
| 135 |
+
),
|
| 136 |
+
Binding(
|
| 137 |
+
nctype=NamedCType("pin_memory", OptionalCType(BaseCType(boolT))),
|
| 138 |
+
name="pin_memory",
|
| 139 |
+
default=default,
|
| 140 |
+
argument=a,
|
| 141 |
+
),
|
| 142 |
+
]
|
| 143 |
+
else:
|
| 144 |
+
assert_never(a)
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
def arguments(func: FunctionSchema, *, symint: bool) -> List[Binding]:
|
| 148 |
+
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
| 149 |
+
args.extend(func.arguments.non_out)
|
| 150 |
+
args.extend(func.arguments.out)
|
| 151 |
+
return [
|
| 152 |
+
r for arg in args for r in argument(arg, symint=symint, is_out=func.is_out_fn())
|
| 153 |
+
]
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/structured.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Union
|
| 2 |
+
|
| 3 |
+
from torchgen.api import cpp
|
| 4 |
+
|
| 5 |
+
from torchgen.api.types import (
|
| 6 |
+
ArgName,
|
| 7 |
+
ArrayRefCType,
|
| 8 |
+
BaseCType,
|
| 9 |
+
Binding,
|
| 10 |
+
ConstRefCType,
|
| 11 |
+
dimnameListT,
|
| 12 |
+
intArrayRefT,
|
| 13 |
+
iOptTensorListRefT,
|
| 14 |
+
iTensorListRefT,
|
| 15 |
+
NamedCType,
|
| 16 |
+
OptionalCType,
|
| 17 |
+
optionalIntArrayRefT,
|
| 18 |
+
optionalScalarRefT,
|
| 19 |
+
optionalTensorRefT,
|
| 20 |
+
scalarT,
|
| 21 |
+
tensorT,
|
| 22 |
+
)
|
| 23 |
+
from torchgen.model import (
|
| 24 |
+
Argument,
|
| 25 |
+
BaseTy,
|
| 26 |
+
BaseType,
|
| 27 |
+
ListType,
|
| 28 |
+
NativeFunctionsGroup,
|
| 29 |
+
OptionalType,
|
| 30 |
+
SelfArgument,
|
| 31 |
+
TensorOptionsArguments,
|
| 32 |
+
Type,
|
| 33 |
+
)
|
| 34 |
+
from torchgen.utils import assert_never
|
| 35 |
+
|
| 36 |
+
# This file describes the translation of JIT schema to the structured functions API.
|
| 37 |
+
# This is similar to native API, but a number of historical problems with native
|
| 38 |
+
# API have been fixed.
|
| 39 |
+
|
| 40 |
+
|
| 41 |
+
# Translation of types occurring in JIT arguments to a C++ argument type.
|
| 42 |
+
# NB: For now, mutable doesn't do anything; but it could if we make
|
| 43 |
+
# some more nominal types
|
| 44 |
+
def argumenttype_type(t: Type, *, mutable: bool, binds: ArgName) -> NamedCType:
|
| 45 |
+
# If it's a value type, do the value type translation
|
| 46 |
+
# NB: structured kernels ALWAYS have symint off, since they involve actual
|
| 47 |
+
# kernels that require real ints. The one exception is the
|
| 48 |
+
# CompositeExplicitAutograd and the meta function (which could
|
| 49 |
+
# hypothetically be SymInt), but for simplicity we plan for these to just
|
| 50 |
+
# be handled in Python
|
| 51 |
+
r = cpp.valuetype_type(t, symint=False, binds=binds)
|
| 52 |
+
if r is not None:
|
| 53 |
+
return r
|
| 54 |
+
|
| 55 |
+
if isinstance(t, BaseType):
|
| 56 |
+
if t.name == BaseTy.Tensor:
|
| 57 |
+
return NamedCType(binds, ConstRefCType(BaseCType(tensorT)))
|
| 58 |
+
elif t.name == BaseTy.Scalar:
|
| 59 |
+
return NamedCType(binds, ConstRefCType(BaseCType(scalarT)))
|
| 60 |
+
else:
|
| 61 |
+
raise AssertionError(f"base type should have been value type {t}")
|
| 62 |
+
elif isinstance(t, OptionalType):
|
| 63 |
+
if t.elem == BaseType(BaseTy.Tensor):
|
| 64 |
+
return NamedCType(binds, BaseCType(optionalTensorRefT))
|
| 65 |
+
elif t.elem == BaseType(BaseTy.Scalar):
|
| 66 |
+
return NamedCType(binds, BaseCType(optionalScalarRefT))
|
| 67 |
+
elif isinstance(t.elem, ListType) and str(t.elem.elem) == "int":
|
| 68 |
+
return NamedCType(binds, BaseCType(optionalIntArrayRefT))
|
| 69 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
|
| 70 |
+
return NamedCType(binds, OptionalCType(elem.type))
|
| 71 |
+
elif isinstance(t, ListType):
|
| 72 |
+
if t.elem == BaseType(BaseTy.Tensor):
|
| 73 |
+
return NamedCType(binds, ConstRefCType(BaseCType(iTensorListRefT)))
|
| 74 |
+
elif t.elem == OptionalType(BaseType(BaseTy.Tensor)):
|
| 75 |
+
return NamedCType(binds, BaseCType(iOptTensorListRefT))
|
| 76 |
+
# TODO: delete these special cases; see torchgen.api.cpp--these
|
| 77 |
+
# must be changed in tandem, but there are problems; see
|
| 78 |
+
# https://github.com/pytorch/pytorch/pull/51485
|
| 79 |
+
elif str(t.elem) == "int":
|
| 80 |
+
return NamedCType(binds, BaseCType(intArrayRefT))
|
| 81 |
+
elif str(t.elem) == "Dimname":
|
| 82 |
+
return NamedCType(binds, BaseCType(dimnameListT))
|
| 83 |
+
elem = argumenttype_type(t.elem, mutable=mutable, binds=binds)
|
| 84 |
+
return NamedCType(binds, ArrayRefCType(elem.type))
|
| 85 |
+
else:
|
| 86 |
+
raise AssertionError(f"unrecognized type {repr(t)}")
|
| 87 |
+
|
| 88 |
+
|
| 89 |
+
def argument_type(a: Argument, *, binds: ArgName) -> NamedCType:
|
| 90 |
+
return argumenttype_type(a.type, mutable=a.is_write, binds=binds)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
# returns_type intentionally omitted, because structured kernels never "return";
|
| 94 |
+
# instead, they always indirectly report their outputs (in the case of a meta
|
| 95 |
+
# function, by calling set_output; in the case of an impl function, by writing
|
| 96 |
+
# directly into the provided out argument).
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
# Structured kernels are never defaulted
|
| 100 |
+
def argument(a: Union[Argument, SelfArgument, TensorOptionsArguments]) -> List[Binding]:
|
| 101 |
+
if isinstance(a, Argument):
|
| 102 |
+
return [
|
| 103 |
+
Binding(
|
| 104 |
+
nctype=argument_type(a, binds=a.name),
|
| 105 |
+
name=a.name,
|
| 106 |
+
default=None,
|
| 107 |
+
argument=a,
|
| 108 |
+
)
|
| 109 |
+
]
|
| 110 |
+
elif isinstance(a, SelfArgument):
|
| 111 |
+
return argument(a.argument)
|
| 112 |
+
elif isinstance(a, TensorOptionsArguments):
|
| 113 |
+
raise AssertionError("structured kernels don't support TensorOptions yet")
|
| 114 |
+
else:
|
| 115 |
+
assert_never(a)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def impl_arguments(g: NativeFunctionsGroup) -> List[Binding]:
|
| 119 |
+
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
| 120 |
+
|
| 121 |
+
if g.out.precomputed:
|
| 122 |
+
# A list of parameters for the impl function with
|
| 123 |
+
# certain parameters replaced with precomputed counterparts
|
| 124 |
+
# as specified in native_functions.yaml.
|
| 125 |
+
non_out_args_replaced: List[
|
| 126 |
+
Union[Argument, TensorOptionsArguments, SelfArgument]
|
| 127 |
+
] = []
|
| 128 |
+
for a in g.out.func.arguments.non_out:
|
| 129 |
+
if isinstance(a, Argument) and a.name in g.out.precomputed.replace:
|
| 130 |
+
# If a is in precompute.replace, append the parameters
|
| 131 |
+
# that should replace it onto non_out_args_replaced.
|
| 132 |
+
non_out_args_replaced.extend(g.out.precomputed.replace[a.name])
|
| 133 |
+
else:
|
| 134 |
+
# If not, push a as it is.
|
| 135 |
+
non_out_args_replaced.append(a)
|
| 136 |
+
|
| 137 |
+
args.extend(non_out_args_replaced)
|
| 138 |
+
# g.out.precomputed.add is the list of parameters that are added
|
| 139 |
+
# without replacement after the non out args and just before the out args
|
| 140 |
+
args.extend(g.out.precomputed.add)
|
| 141 |
+
else:
|
| 142 |
+
args.extend(g.out.func.arguments.non_out)
|
| 143 |
+
|
| 144 |
+
args.extend(g.out.func.arguments.out)
|
| 145 |
+
return [r for arg in args for r in argument(arg)]
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def meta_arguments(g: NativeFunctionsGroup) -> List[Binding]:
|
| 149 |
+
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
| 150 |
+
args.extend(g.functional.func.arguments.non_out)
|
| 151 |
+
return [r for arg in args for r in argument(arg)]
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
def out_arguments(g: NativeFunctionsGroup) -> List[Binding]:
|
| 155 |
+
args: List[Union[Argument, TensorOptionsArguments, SelfArgument]] = []
|
| 156 |
+
args.extend(g.out.func.arguments.out)
|
| 157 |
+
return [r for arg in args for r in argument(arg)]
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/translate.py
ADDED
|
@@ -0,0 +1,430 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, List, NoReturn, Sequence, Union
|
| 2 |
+
|
| 3 |
+
from torchgen.api.types import (
|
| 4 |
+
ArrayRefCType,
|
| 5 |
+
BaseCType,
|
| 6 |
+
Binding,
|
| 7 |
+
boolT,
|
| 8 |
+
ConstRefCType,
|
| 9 |
+
deviceT,
|
| 10 |
+
Expr,
|
| 11 |
+
intArrayRefT,
|
| 12 |
+
iOptTensorListRefT,
|
| 13 |
+
layoutT,
|
| 14 |
+
ListCType,
|
| 15 |
+
longT,
|
| 16 |
+
memoryFormatT,
|
| 17 |
+
MutRefCType,
|
| 18 |
+
NamedCType,
|
| 19 |
+
opmath_t,
|
| 20 |
+
OptionalCType,
|
| 21 |
+
optionalIntArrayRefT,
|
| 22 |
+
optionalScalarRefT,
|
| 23 |
+
optionalSymIntArrayRefT,
|
| 24 |
+
optionalTensorRefT,
|
| 25 |
+
scalar_t,
|
| 26 |
+
scalarT,
|
| 27 |
+
scalarTypeT,
|
| 28 |
+
SpecialArgName,
|
| 29 |
+
symIntArrayRefT,
|
| 30 |
+
SymIntT,
|
| 31 |
+
tensorOptionsT,
|
| 32 |
+
tensorT,
|
| 33 |
+
VectorCType,
|
| 34 |
+
)
|
| 35 |
+
|
| 36 |
+
# This file implements a small program synthesis engine that implements
|
| 37 |
+
# conversions between one API to another.
|
| 38 |
+
#
|
| 39 |
+
# The key data type in this file in NamedCType, short for Named C++ semantic type. A NamedCType
|
| 40 |
+
# represents a C++ type, plus semantic information about what it represents.
|
| 41 |
+
# For example, consider the argument "bool pin_memory"; its normal C++ type is
|
| 42 |
+
# "bool", but its C++ semantic type also keeps track that this represents a
|
| 43 |
+
# "pin_memory"; you can't just use a random other boolean in a context where you
|
| 44 |
+
# need a "pin_memory"!
|
| 45 |
+
#
|
| 46 |
+
# The translator takes a list of needed NamedCTypes, and then figures out how
|
| 47 |
+
# to construct expressions with these NamedCTypes from the given bindings. Many
|
| 48 |
+
# of these expressions are trivial (I need a Tensor other; there's a Tensor
|
| 49 |
+
# other scope); others are more nontrivial and may require packing/unpacking.
|
| 50 |
+
# Some examples of non-trivial action:
|
| 51 |
+
#
|
| 52 |
+
# - Need the "dtype" binding? Well, maybe "dtype" isn't available
|
| 53 |
+
# in the context, instead, "options" is, and you need to extract
|
| 54 |
+
# it from there. (Gather)
|
| 55 |
+
#
|
| 56 |
+
# - Need the "context" binding? Well, maybe "context" isn't available
|
| 57 |
+
# in the context, and you need to construct it from "dtype", "device",
|
| 58 |
+
# etc. (Scatter)
|
| 59 |
+
#
|
| 60 |
+
# - Need the "memory_format" binding? Well, actually, it's available
|
| 61 |
+
# from both "memory_format" and "options", so you had better make sure
|
| 62 |
+
# they are consistent. (Join)
|
| 63 |
+
|
| 64 |
+
options_ctype = NamedCType("options", ConstRefCType(BaseCType(tensorOptionsT)))
|
| 65 |
+
|
| 66 |
+
out_tensor_ctype = NamedCType("out", ConstRefCType(BaseCType(tensorT)))
|
| 67 |
+
|
| 68 |
+
longVec_ctype = VectorCType(BaseCType(longT))
|
| 69 |
+
longSymVec_ctype = VectorCType(BaseCType(SymIntT))
|
| 70 |
+
optionalLongVec_ctype = OptionalCType(VectorCType(BaseCType(longT)))
|
| 71 |
+
optionalScalar_ctype = OptionalCType(BaseCType(scalarT))
|
| 72 |
+
optionalTensor_ctype = OptionalCType(BaseCType(tensorT))
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
class UnsatError(RuntimeError):
|
| 76 |
+
pass
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
# Given a set of in-scope bindings and a set of target bindings, synthesize
|
| 80 |
+
# a list of expressions that uses only the in-scope bindings (bindings) that
|
| 81 |
+
# have all of the types of goals. You may want to use this function if
|
| 82 |
+
# you're generating code for a function like:
|
| 83 |
+
#
|
| 84 |
+
# void f({args}) {
|
| 85 |
+
# g({exprs}); // g is a different API
|
| 86 |
+
# }
|
| 87 |
+
#
|
| 88 |
+
# and you need to generate "exprs".
|
| 89 |
+
#
|
| 90 |
+
# Typically, a list of Bindings is convenient to get (you usually call something
|
| 91 |
+
# like arguments() to get them); but technically you only need less information:
|
| 92 |
+
# for 'bindings' an (un-ordered) list of Exprs is sufficient; similarly, for
|
| 93 |
+
# 'goals', an (ordered) list of NamedCType goals is sufficient. If you are doing
|
| 94 |
+
# something more complicated, e.g., tracking the set of bindings in a context,
|
| 95 |
+
# you may find using these smaller types more convenient.
|
| 96 |
+
def translate(
|
| 97 |
+
bindings: Sequence[Union[Expr, Binding]],
|
| 98 |
+
goals: Sequence[Union[NamedCType, Binding]],
|
| 99 |
+
*,
|
| 100 |
+
method: bool = False,
|
| 101 |
+
allow_expensive_conversions: bool = False,
|
| 102 |
+
) -> List[Expr]:
|
| 103 |
+
binding_exprs: List[Expr] = []
|
| 104 |
+
for b in bindings:
|
| 105 |
+
if isinstance(b, Binding):
|
| 106 |
+
binding_exprs.append(
|
| 107 |
+
Expr(
|
| 108 |
+
expr=b.name,
|
| 109 |
+
type=b.nctype,
|
| 110 |
+
)
|
| 111 |
+
)
|
| 112 |
+
else:
|
| 113 |
+
binding_exprs.append(b)
|
| 114 |
+
|
| 115 |
+
goal_ctypes: List[NamedCType] = []
|
| 116 |
+
for g in goals:
|
| 117 |
+
if isinstance(g, Binding):
|
| 118 |
+
goal_ctypes.append(g.nctype)
|
| 119 |
+
else:
|
| 120 |
+
goal_ctypes.append(g)
|
| 121 |
+
|
| 122 |
+
# Add all the bindings to the context
|
| 123 |
+
ctx: Dict[NamedCType, str] = {}
|
| 124 |
+
for b in binding_exprs:
|
| 125 |
+
ctx[b.type] = b.expr
|
| 126 |
+
|
| 127 |
+
# While we're at it, do some simple forward inference, looking through
|
| 128 |
+
# constructors.
|
| 129 |
+
#
|
| 130 |
+
# NB: When should you do forward inference versus backward inference?
|
| 131 |
+
# The general idea:
|
| 132 |
+
#
|
| 133 |
+
# - Backward inference WHEN the goal gets smaller
|
| 134 |
+
# - Forward inference WHEN the hypothesis gets smaller
|
| 135 |
+
#
|
| 136 |
+
# This helps ensure termination: backward inference starts with a goal
|
| 137 |
+
# and tries to make it simpler and simpler until it's trivial; if the
|
| 138 |
+
# goal can grow in size, we blow up to a really huge goal size.
|
| 139 |
+
# Similarly, with forward inference we take hypotheses and decompose
|
| 140 |
+
# them into simpler hypotheses; if hypotheses could expand in size,
|
| 141 |
+
# we also have potential nontermination. (In the code below, forward
|
| 142 |
+
# inference is only ever carried out at a single step, but you could
|
| 143 |
+
# imagine repeated application of forward inference being profitable.)
|
| 144 |
+
#
|
| 145 |
+
# A good starting point in the literature for exploring more about proof
|
| 146 |
+
# search are these lecture notes
|
| 147 |
+
# https://www.cs.cmu.edu/~fp/courses/oregon-m10/04-focusing.pdf
|
| 148 |
+
#
|
| 149 |
+
# TODO: My kingdom for a pattern matcher
|
| 150 |
+
# https://www.python.org/dev/peps/pep-0634/
|
| 151 |
+
#
|
| 152 |
+
# TODO: This could get us in recomputation trouble if b.expr is nontrivial.
|
| 153 |
+
# Fix this by implementing some sort of sharing so that if multiple
|
| 154 |
+
# goals share the same expression, we only compute it once. This seems
|
| 155 |
+
# to matter in practice as compiler is often unwilling to CSE nontrivial
|
| 156 |
+
# expressions like scalar.to<scalar_t>()
|
| 157 |
+
t = b.type
|
| 158 |
+
if (
|
| 159 |
+
isinstance(t, ConstRefCType)
|
| 160 |
+
and isinstance(t.elem, OptionalCType)
|
| 161 |
+
and isinstance(t.elem.elem, BaseCType)
|
| 162 |
+
and str(t.elem.elem.type) == "at::Tensor"
|
| 163 |
+
):
|
| 164 |
+
ctx[
|
| 165 |
+
NamedCType(t.elem.elem.name, ConstRefCType(BaseCType(tensorT)))
|
| 166 |
+
] = f"({b.expr}.has_value() ? *{b.expr} : at::Tensor())"
|
| 167 |
+
|
| 168 |
+
if t.type == ConstRefCType(OptionalCType(BaseCType(tensorT))):
|
| 169 |
+
ctx[
|
| 170 |
+
NamedCType(t.name, BaseCType(optionalTensorRefT))
|
| 171 |
+
] = f"(({b.expr}.has_value() && (*{b.expr}).defined()) ? at::OptionalTensorRef(*{b.expr}) : at::OptionalTensorRef())"
|
| 172 |
+
|
| 173 |
+
if t.type == ConstRefCType(BaseCType(scalarT)):
|
| 174 |
+
ctx[NamedCType(t.name, BaseCType(opmath_t))] = f"({b.expr}).to<opmath_t>()"
|
| 175 |
+
|
| 176 |
+
if t.type == ConstRefCType(OptionalCType(BaseCType(scalarT))):
|
| 177 |
+
ctx[
|
| 178 |
+
NamedCType(t.name, BaseCType(optionalScalarRefT))
|
| 179 |
+
] = f"({b.expr}.has_value() ? at::OptionalScalarRef(&({b.expr}.value())) : at::OptionalScalarRef())"
|
| 180 |
+
|
| 181 |
+
if t.type == BaseCType(scalar_t):
|
| 182 |
+
ctx[
|
| 183 |
+
NamedCType(t.name, BaseCType(opmath_t))
|
| 184 |
+
] = f"static_cast<opmath_t>({b.expr})"
|
| 185 |
+
|
| 186 |
+
# [Note: IOptTensorListRef]
|
| 187 |
+
if t.type == ConstRefCType(ListCType(OptionalCType(BaseCType(tensorT)))):
|
| 188 |
+
ctx[
|
| 189 |
+
NamedCType(t.name, BaseCType(iOptTensorListRefT))
|
| 190 |
+
] = f"at::IOptTensorListRef({b.expr})"
|
| 191 |
+
|
| 192 |
+
# Add implicit bindings if the generated code is inside a Tensor method
|
| 193 |
+
if method:
|
| 194 |
+
ctx[
|
| 195 |
+
NamedCType("self", MutRefCType(BaseCType(tensorT)))
|
| 196 |
+
] = "const_cast<Tensor&>(*this)"
|
| 197 |
+
ctx[
|
| 198 |
+
NamedCType("self", ConstRefCType(BaseCType(tensorT)))
|
| 199 |
+
] = "const_cast<Tensor&>(*this)"
|
| 200 |
+
# This is better! Byte-for-byte compat
|
| 201 |
+
# ctx[NamedCType("self", ConstRefCType(BaseCType(tensorT)))] = "*this"
|
| 202 |
+
|
| 203 |
+
def unsat(goal: NamedCType) -> NoReturn:
|
| 204 |
+
ctx_desc = "\n".join(
|
| 205 |
+
f" {t.cpp_type()} {t.name}; // {e}" for t, e in ctx.items()
|
| 206 |
+
)
|
| 207 |
+
raise UnsatError(
|
| 208 |
+
f"""
|
| 209 |
+
Failed to synthesize the expression "{goal.cpp_type()} {goal.name}".
|
| 210 |
+
When I failed, the following bindings were available in the context:
|
| 211 |
+
|
| 212 |
+
{ctx_desc}
|
| 213 |
+
|
| 214 |
+
This probably means there is a missing rule in the rules of torchgen.api.translate.
|
| 215 |
+
Check this module for more information.
|
| 216 |
+
"""
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
# A shitty backtracking search implementation. It's shitty because it
|
| 220 |
+
# does backtracking via stack (bad idea!) and for the most part tries to
|
| 221 |
+
# avoid backtracking. In particular, if
|
| 222 |
+
# direct=True, we won't try to do any fancy synthesis, just trivial
|
| 223 |
+
# conversions (e.g., "T a" is OK for "const T& a"). So all of the
|
| 224 |
+
# existing rules in this function simply try to solve immediately,
|
| 225 |
+
# and bail if things don't work out.
|
| 226 |
+
def solve(goal: NamedCType, *, direct: bool) -> str:
|
| 227 |
+
def direct_solve(goal: NamedCType) -> str:
|
| 228 |
+
return solve(goal, direct=True)
|
| 229 |
+
|
| 230 |
+
if goal in ctx:
|
| 231 |
+
# Trivial
|
| 232 |
+
return ctx[goal]
|
| 233 |
+
|
| 234 |
+
# const & is satisfied with mutable &
|
| 235 |
+
if isinstance(goal.type, ConstRefCType):
|
| 236 |
+
try:
|
| 237 |
+
# WARNING: not strictly decreasing; be careful not
|
| 238 |
+
# to add a direct conversion that goes satisfies
|
| 239 |
+
# mutable& with const&
|
| 240 |
+
return solve(
|
| 241 |
+
NamedCType(goal.name, MutRefCType(goal.type.elem)), direct=direct
|
| 242 |
+
)
|
| 243 |
+
except UnsatError:
|
| 244 |
+
pass
|
| 245 |
+
|
| 246 |
+
# mutable & is satisfied with value
|
| 247 |
+
if isinstance(goal.type, MutRefCType):
|
| 248 |
+
try:
|
| 249 |
+
return solve(NamedCType(goal.name, goal.type.elem), direct=direct)
|
| 250 |
+
except UnsatError:
|
| 251 |
+
pass
|
| 252 |
+
|
| 253 |
+
# TODO: These are referentially equal, shouldn't have to do this;
|
| 254 |
+
# ensuring we don't use type synonym IntArrayRef in codegen would
|
| 255 |
+
# help
|
| 256 |
+
if goal.type == ArrayRefCType(BaseCType(longT)):
|
| 257 |
+
return solve(NamedCType(goal.name, BaseCType(intArrayRefT)), direct=direct)
|
| 258 |
+
|
| 259 |
+
if direct:
|
| 260 |
+
unsat(goal)
|
| 261 |
+
|
| 262 |
+
# For now, all of these rules are mutually exclusive.
|
| 263 |
+
if goal == NamedCType("memory_format", OptionalCType(BaseCType(memoryFormatT))):
|
| 264 |
+
memory_format = direct_solve(
|
| 265 |
+
NamedCType(
|
| 266 |
+
SpecialArgName.possibly_redundant_memory_format,
|
| 267 |
+
OptionalCType(BaseCType(memoryFormatT)),
|
| 268 |
+
)
|
| 269 |
+
)
|
| 270 |
+
# No need to join "memory_format" and "options" if the target API takes "options" directly.
|
| 271 |
+
# Otherwise it will cause the redundant memory_format error.
|
| 272 |
+
if options_ctype in goal_ctypes:
|
| 273 |
+
return memory_format
|
| 274 |
+
try:
|
| 275 |
+
options = direct_solve(options_ctype)
|
| 276 |
+
return f"c10::impl::check_tensor_options_and_extract_memory_format({options}, {memory_format})"
|
| 277 |
+
except UnsatError:
|
| 278 |
+
return memory_format
|
| 279 |
+
elif goal == NamedCType("options", BaseCType(tensorOptionsT)):
|
| 280 |
+
dtype = direct_solve(
|
| 281 |
+
NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT)))
|
| 282 |
+
)
|
| 283 |
+
pin_memory = direct_solve(
|
| 284 |
+
NamedCType("pin_memory", OptionalCType(BaseCType(boolT)))
|
| 285 |
+
)
|
| 286 |
+
device = direct_solve(
|
| 287 |
+
NamedCType("device", OptionalCType(BaseCType(deviceT)))
|
| 288 |
+
)
|
| 289 |
+
layout = direct_solve(
|
| 290 |
+
NamedCType("layout", OptionalCType(BaseCType(layoutT)))
|
| 291 |
+
)
|
| 292 |
+
return f"TensorOptions().dtype({dtype}).layout({layout}).device({device}).pinned_memory({pin_memory})"
|
| 293 |
+
|
| 294 |
+
elif goal == NamedCType("dtype", OptionalCType(BaseCType(scalarTypeT))):
|
| 295 |
+
try:
|
| 296 |
+
options = direct_solve(options_ctype)
|
| 297 |
+
return f"c10::optTypeMetaToScalarType({options}.dtype_opt())"
|
| 298 |
+
except UnsatError:
|
| 299 |
+
out_tensor = direct_solve(out_tensor_ctype)
|
| 300 |
+
return f"{out_tensor}.scalar_type()"
|
| 301 |
+
|
| 302 |
+
elif goal == NamedCType("layout", OptionalCType(BaseCType(layoutT))):
|
| 303 |
+
try:
|
| 304 |
+
options = direct_solve(options_ctype)
|
| 305 |
+
return f"{options}.layout_opt()"
|
| 306 |
+
except UnsatError:
|
| 307 |
+
out_tensor = direct_solve(out_tensor_ctype)
|
| 308 |
+
return f"{out_tensor}.layout()"
|
| 309 |
+
|
| 310 |
+
elif goal == NamedCType("device", OptionalCType(BaseCType(deviceT))):
|
| 311 |
+
try:
|
| 312 |
+
options = direct_solve(options_ctype)
|
| 313 |
+
return f"{options}.device_opt()"
|
| 314 |
+
except UnsatError:
|
| 315 |
+
out_tensor = direct_solve(out_tensor_ctype)
|
| 316 |
+
return f"{out_tensor}.device()"
|
| 317 |
+
|
| 318 |
+
elif goal == NamedCType("pin_memory", OptionalCType(BaseCType(boolT))):
|
| 319 |
+
try:
|
| 320 |
+
options = direct_solve(options_ctype)
|
| 321 |
+
return f"{options}.pinned_memory_opt()"
|
| 322 |
+
except UnsatError:
|
| 323 |
+
# If we're calling a factory op from its out= variant,
|
| 324 |
+
# We don't actually care about the value of pin_memory.
|
| 325 |
+
out_tensor = direct_solve(out_tensor_ctype)
|
| 326 |
+
return "c10::nullopt"
|
| 327 |
+
|
| 328 |
+
# We can always do translations from value types to reference types, like vector<int> -> IntArrayRef
|
| 329 |
+
elif goal.type == BaseCType(intArrayRefT):
|
| 330 |
+
try:
|
| 331 |
+
return direct_solve(NamedCType(goal.name, longVec_ctype))
|
| 332 |
+
except UnsatError:
|
| 333 |
+
# We can also go SymIntArrayRef -> IntArrayRef
|
| 334 |
+
symIntArrayRef_type = direct_solve(
|
| 335 |
+
NamedCType(goal.name, BaseCType(symIntArrayRefT))
|
| 336 |
+
)
|
| 337 |
+
return f"C10_AS_INTARRAYREF_SLOW({symIntArrayRef_type})"
|
| 338 |
+
elif goal.type == BaseCType(symIntArrayRefT):
|
| 339 |
+
try:
|
| 340 |
+
r = direct_solve(NamedCType(goal.name, BaseCType(intArrayRefT)))
|
| 341 |
+
return f"c10::fromIntArrayRefSlow({r})"
|
| 342 |
+
except UnsatError:
|
| 343 |
+
return direct_solve(NamedCType(goal.name, longSymVec_ctype))
|
| 344 |
+
elif goal.type == BaseCType(SymIntT):
|
| 345 |
+
return direct_solve(NamedCType(goal.name, BaseCType(longT)))
|
| 346 |
+
elif goal.type == OptionalCType(BaseCType(SymIntT)):
|
| 347 |
+
argname = direct_solve(
|
| 348 |
+
NamedCType(goal.name, OptionalCType(BaseCType(longT)))
|
| 349 |
+
)
|
| 350 |
+
return f"{argname}.has_value() ? c10::make_optional(c10::SymInt(*{argname})) : c10::nullopt"
|
| 351 |
+
elif goal.type == BaseCType(longT):
|
| 352 |
+
symInt_type = direct_solve(NamedCType(goal.name, BaseCType(SymIntT)))
|
| 353 |
+
return f"{symInt_type}.guard_int(__FILE__, __LINE__)"
|
| 354 |
+
elif goal.type == OptionalCType(BaseCType(longT)):
|
| 355 |
+
argname = direct_solve(
|
| 356 |
+
NamedCType(goal.name, OptionalCType(BaseCType(SymIntT)))
|
| 357 |
+
)
|
| 358 |
+
return f"{argname}.has_value() ? c10::make_optional({argname}->guard_int(__FILE__, __LINE__)) : c10::nullopt"
|
| 359 |
+
elif goal.type == BaseCType(optionalIntArrayRefT):
|
| 360 |
+
try:
|
| 361 |
+
return direct_solve(NamedCType(goal.name, optionalLongVec_ctype))
|
| 362 |
+
except UnsatError:
|
| 363 |
+
argname = direct_solve(
|
| 364 |
+
NamedCType(goal.name, BaseCType(optionalSymIntArrayRefT))
|
| 365 |
+
)
|
| 366 |
+
return f"{argname}.has_value() ? c10::make_optional(C10_AS_INTARRAYREF_SLOW(*{argname})) : c10::nullopt"
|
| 367 |
+
elif goal.type == BaseCType(optionalSymIntArrayRefT):
|
| 368 |
+
# TODO: You might also want to solve this from longSymVec_ctype or
|
| 369 |
+
# an optional version of it
|
| 370 |
+
argname = direct_solve(
|
| 371 |
+
NamedCType(goal.name, BaseCType(optionalIntArrayRefT))
|
| 372 |
+
)
|
| 373 |
+
return f"{argname}.has_value() ? c10::make_optional(c10::fromIntArrayRefSlow(*{argname})) : c10::nullopt"
|
| 374 |
+
elif goal.type == BaseCType(optionalScalarRefT):
|
| 375 |
+
return direct_solve(NamedCType(goal.name, optionalScalar_ctype))
|
| 376 |
+
elif goal.type == BaseCType(optionalTensorRefT):
|
| 377 |
+
return direct_solve(NamedCType(goal.name, optionalTensor_ctype))
|
| 378 |
+
|
| 379 |
+
# Note [translation from C++ reference to value types]
|
| 380 |
+
# The below cases are all for when we have an argument with a reference type,
|
| 381 |
+
# and a corresponding goal with a value type.
|
| 382 |
+
# These are needed when we populate the inputs to a lambda capture and we need
|
| 383 |
+
# to guarantee the lifetime of each captured argument.
|
| 384 |
+
# We guard it with an explicit kwarg because converting to a value type is expensive
|
| 385 |
+
# (O(n)) to convert from IntArrayRef to vector<int>),
|
| 386 |
+
# so the caller of translate() should be explicit that they need it.
|
| 387 |
+
if allow_expensive_conversions:
|
| 388 |
+
if goal.type == VectorCType(BaseCType(longT)):
|
| 389 |
+
intArrayRef_ctype = NamedCType(goal.name, BaseCType(intArrayRefT))
|
| 390 |
+
argname = direct_solve(intArrayRef_ctype)
|
| 391 |
+
return f"{argname}.vec()"
|
| 392 |
+
if goal.type == VectorCType(BaseCType(SymIntT)):
|
| 393 |
+
symIntArrayRef_ctype = NamedCType(goal.name, BaseCType(symIntArrayRefT))
|
| 394 |
+
argname = direct_solve(symIntArrayRef_ctype)
|
| 395 |
+
return f"{argname}.vec()"
|
| 396 |
+
elif goal.type == OptionalCType(VectorCType(BaseCType(longT))):
|
| 397 |
+
optionalIntArrayRef_ctype = NamedCType(
|
| 398 |
+
goal.name, BaseCType(optionalIntArrayRefT)
|
| 399 |
+
)
|
| 400 |
+
argname = direct_solve(optionalIntArrayRef_ctype)
|
| 401 |
+
return f"{argname}.has_value() ? c10::make_optional({argname}->vec()) : c10::nullopt"
|
| 402 |
+
elif goal.type == OptionalCType(BaseCType(scalarT)):
|
| 403 |
+
optionalScalarRef_ctype = NamedCType(
|
| 404 |
+
goal.name, BaseCType(optionalScalarRefT)
|
| 405 |
+
)
|
| 406 |
+
argname = direct_solve(optionalScalarRef_ctype)
|
| 407 |
+
return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt"
|
| 408 |
+
elif goal.type == OptionalCType(BaseCType(scalarT)):
|
| 409 |
+
optionalTensorRef_ctype = NamedCType(
|
| 410 |
+
goal.name, BaseCType(optionalTensorRefT)
|
| 411 |
+
)
|
| 412 |
+
argname = direct_solve(optionalTensorRef_ctype)
|
| 413 |
+
return f"{argname}.has_value() ? c10::make_optional({argname}) : c10::nullopt"
|
| 414 |
+
# Technically, we also need to handle cases of C++ containers holding reference types.
|
| 415 |
+
# But there currently aren't any ops that require lambda capture codegen
|
| 416 |
+
# With arguments like std::vector<IntArrayRef>.
|
| 417 |
+
# If that changes, we'll have to add the translation here.
|
| 418 |
+
|
| 419 |
+
# We allow const casting on tensors, since const-correctness is a bit broken for at::Tensor.
|
| 420 |
+
# We could probably generalize this to non-tensor types too.
|
| 421 |
+
if goal.type == MutRefCType(BaseCType(tensorT)):
|
| 422 |
+
const_ref_tensor_ctype = NamedCType(
|
| 423 |
+
goal.name, ConstRefCType(BaseCType(tensorT))
|
| 424 |
+
)
|
| 425 |
+
argname = direct_solve(const_ref_tensor_ctype)
|
| 426 |
+
return f"const_cast<Tensor&>({argname})"
|
| 427 |
+
|
| 428 |
+
unsat(goal)
|
| 429 |
+
|
| 430 |
+
return [Expr(solve(g, direct=False), g) for g in goal_ctypes]
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (313 Bytes). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/torchgen/api/types/__pycache__/types.cpython-311.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|