diff --git a/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/METADATA b/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..e1a9a27f56e8ab864801962bbd8c89b71c1818b4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/METADATA @@ -0,0 +1,159 @@ +Metadata-Version: 2.4 +Name: exceptiongroup +Version: 1.3.1 +Summary: Backport of PEP 654 (exception groups) +Author-email: Alex Grönholm +Requires-Python: >=3.7 +Description-Content-Type: text/x-rst +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Typing :: Typed +License-File: LICENSE +Requires-Dist: typing-extensions >= 4.6.0; python_version < '3.13' +Requires-Dist: pytest >= 6 ; extra == "test" +Project-URL: Changelog, https://github.com/agronholm/exceptiongroup/blob/main/CHANGES.rst +Project-URL: Issue Tracker, https://github.com/agronholm/exceptiongroup/issues +Project-URL: Source code, https://github.com/agronholm/exceptiongroup +Provides-Extra: test + +.. image:: https://github.com/agronholm/exceptiongroup/actions/workflows/test.yml/badge.svg + :target: https://github.com/agronholm/exceptiongroup/actions/workflows/test.yml + :alt: Build Status +.. image:: https://coveralls.io/repos/github/agronholm/exceptiongroup/badge.svg?branch=main + :target: https://coveralls.io/github/agronholm/exceptiongroup?branch=main + :alt: Code Coverage + +This is a backport of the ``BaseExceptionGroup`` and ``ExceptionGroup`` classes from +Python 3.11. + +It contains the following: + +* The ``exceptiongroup.BaseExceptionGroup`` and ``exceptiongroup.ExceptionGroup`` + classes +* A utility function (``exceptiongroup.catch()``) for catching exceptions possibly + nested in an exception group +* Patches to the ``TracebackException`` class that properly formats exception groups + (installed on import) +* An exception hook that handles formatting of exception groups through + ``TracebackException`` (installed on import) +* Special versions of some of the functions from the ``traceback`` module, modified to + correctly handle exception groups even when monkey patching is disabled, or blocked by + another custom exception hook: + + * ``traceback.format_exception()`` + * ``traceback.format_exception_only()`` + * ``traceback.print_exception()`` + * ``traceback.print_exc()`` +* A backported version of ``contextlib.suppress()`` from Python 3.12.1 which also + handles suppressing exceptions inside exception groups + +If this package is imported on Python 3.11 or later, the built-in implementations of the +exception group classes are used instead, ``TracebackException`` is not monkey patched +and the exception hook won't be installed. + +See the `standard library documentation`_ for more information on exception groups. + +.. _standard library documentation: https://docs.python.org/3/library/exceptions.html + +Catching exceptions +=================== + +Due to the lack of the ``except*`` syntax introduced by `PEP 654`_ in earlier Python +versions, you need to use ``exceptiongroup.catch()`` to catch exceptions that are +potentially nested inside an exception group. This function returns a context manager +that calls the given handler for any exceptions matching the sole argument. + +The argument to ``catch()`` must be a dict (or any ``Mapping``) where each key is either +an exception class or an iterable of exception classes. Each value must be a callable +that takes a single positional argument. The handler will be called at most once, with +an exception group as an argument which will contain all the exceptions that are any +of the given types, or their subclasses. The exception group may contain nested groups +containing more matching exceptions. + +Thus, the following Python 3.11+ code: + +.. code-block:: python + + try: + ... + except* (ValueError, KeyError) as excgroup: + for exc in excgroup.exceptions: + print('Caught exception:', type(exc)) + except* RuntimeError: + print('Caught runtime error') + +would be written with this backport like this: + +.. code-block:: python + + from exceptiongroup import BaseExceptionGroup, catch + + def value_key_err_handler(excgroup: BaseExceptionGroup) -> None: + for exc in excgroup.exceptions: + print('Caught exception:', type(exc)) + + def runtime_err_handler(exc: BaseExceptionGroup) -> None: + print('Caught runtime error') + + with catch({ + (ValueError, KeyError): value_key_err_handler, + RuntimeError: runtime_err_handler + }): + ... + +**NOTE**: Just like with ``except*``, you cannot handle ``BaseExceptionGroup`` or +``ExceptionGroup`` with ``catch()``. + +Suppressing exceptions +====================== + +This library contains a backport of the ``contextlib.suppress()`` context manager from +Python 3.12.1. It allows you to selectively ignore certain exceptions, even when they're +inside exception groups: + +.. code-block:: python + + from exceptiongroup import suppress + + with suppress(RuntimeError): + raise ExceptionGroup("", [RuntimeError("boo")]) + +Notes on monkey patching +======================== + +To make exception groups render properly when an unhandled exception group is being +printed out, this package does two things when it is imported on any Python version +earlier than 3.11: + +#. The ``traceback.TracebackException`` class is monkey patched to store extra + information about exception groups (in ``__init__()``) and properly format them (in + ``format()``) +#. An exception hook is installed at ``sys.excepthook``, provided that no other hook is + already present. This hook causes the exception to be formatted using + ``traceback.TracebackException`` rather than the built-in rendered. + +If ``sys.exceptionhook`` is found to be set to something else than the default when +``exceptiongroup`` is imported, no monkeypatching is done at all. + +To prevent the exception hook and patches from being installed, set the environment +variable ``EXCEPTIONGROUP_NO_PATCH`` to ``1``. + +Formatting exception groups +--------------------------- + +Normally, the monkey patching applied by this library on import will cause exception +groups to be printed properly in tracebacks. But in cases when the monkey patching is +blocked by a third party exception hook, or monkey patching is explicitly disabled, +you can still manually format exceptions using the special versions of the ``traceback`` +functions, like ``format_exception()``, listed at the top of this page. They work just +like their counterparts in the ``traceback`` module, except that they use a separately +patched subclass of ``TracebackException`` to perform the rendering. + +Particularly in cases where a library installs its own exception hook, it is recommended +to use these special versions to do the actual formatting of exceptions/tracebacks. + +.. _PEP 654: https://www.python.org/dev/peps/pep-0654/ + diff --git a/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..10fd78352ebea856d3da2c6a38c1c0873026f24c --- /dev/null +++ b/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/RECORD @@ -0,0 +1,18 @@ +exceptiongroup-1.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +exceptiongroup-1.3.1.dist-info/METADATA,sha256=gZhKUjovelIq0SvqeEqLuF7ewIBeu9D7TjUBaaNt2AI,6725 +exceptiongroup-1.3.1.dist-info/RECORD,, +exceptiongroup-1.3.1.dist-info/WHEEL,sha256=G2gURzTEtmeR8nrdXUJfNiB3VYVxigPQ-bEQujpNiNs,82 +exceptiongroup-1.3.1.dist-info/licenses/LICENSE,sha256=blBw12UDHgrUA6HL-Qrm0ZoCKPgC4yC3rP9GCqcu1Hw,3704 +exceptiongroup/__init__.py,sha256=7DHS0hDk-RIs3IQc3SbZVB0-1MhiSCJ9XgvEyEloL7M,1049 +exceptiongroup/__pycache__/__init__.cpython-310.pyc,, +exceptiongroup/__pycache__/_catch.cpython-310.pyc,, +exceptiongroup/__pycache__/_exceptions.cpython-310.pyc,, +exceptiongroup/__pycache__/_formatting.cpython-310.pyc,, +exceptiongroup/__pycache__/_suppress.cpython-310.pyc,, +exceptiongroup/__pycache__/_version.cpython-310.pyc,, +exceptiongroup/_catch.py,sha256=CaJez3E-Jkr-7B7RT3fzusdLWnuyeekooSFn7KyWt9s,4680 +exceptiongroup/_exceptions.py,sha256=wPwPsZ64SXEptuwb4XrTIa1Mc78uqF5vmCrXTdllLn4,11463 +exceptiongroup/_formatting.py,sha256=OYTuT_T6TzM8G2v3DVt8LRBwMNyNK0tNl0fKMls3chM,21063 +exceptiongroup/_suppress.py,sha256=LX11PRNpchwfNWwEMY92nYN1F_5qFenQcS8EjIONXKE,1772 +exceptiongroup/_version.py,sha256=-4u7pjQ4caDQqa-1Qgms81j5hpkXjmjUYRCVEaLmb88,704 +exceptiongroup/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..d8b9936dad9ab2513fa6979f411560d3b6b57e37 --- /dev/null +++ b/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: flit 3.12.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/licenses/LICENSE b/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..50d4fa5e68439ce837f6eef437b299c0dd7c8594 --- /dev/null +++ b/venv/lib/python3.10/site-packages/exceptiongroup-1.3.1.dist-info/licenses/LICENSE @@ -0,0 +1,73 @@ +The MIT License (MIT) + +Copyright (c) 2022 Alex Grönholm + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +This project contains code copied from the Python standard library. +The following is the required license notice for those parts. + +PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2 +-------------------------------------------- + +1. This LICENSE AGREEMENT is between the Python Software Foundation +("PSF"), and the Individual or Organization ("Licensee") accessing and +otherwise using this software ("Python") in source or binary form and +its associated documentation. + +2. Subject to the terms and conditions of this License Agreement, PSF hereby +grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce, +analyze, test, perform and/or display publicly, prepare derivative works, +distribute, and otherwise use Python alone or in any derivative version, +provided, however, that PSF's License Agreement and PSF's notice of copyright, +i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020, 2021, 2022 Python Software Foundation; +All Rights Reserved" are retained in Python alone or in any derivative version +prepared by Licensee. + +3. In the event Licensee prepares a derivative work that is based on +or incorporates Python or any part thereof, and wants to make +the derivative work available to others as provided herein, then +Licensee hereby agrees to include in any such work a brief summary of +the changes made to Python. + +4. PSF is making Python available to Licensee on an "AS IS" +basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR +IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND +DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS +FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT +INFRINGE ANY THIRD PARTY RIGHTS. + +5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON +FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS +A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON, +OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF. + +6. This License Agreement will automatically terminate upon a material +breach of its terms and conditions. + +7. Nothing in this License Agreement shall be deemed to create any +relationship of agency, partnership, or joint venture between PSF and +Licensee. This License Agreement does not grant permission to use PSF +trademarks or trade name in a trademark sense to endorse or promote +products or services of Licensee, or any third party. + +8. By copying, installing or otherwise using Python, Licensee +agrees to be bound by the terms and conditions of this License +Agreement. diff --git a/venv/lib/python3.10/site-packages/exceptiongroup/__pycache__/_exceptions.cpython-310.pyc b/venv/lib/python3.10/site-packages/exceptiongroup/__pycache__/_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..eaa5372d3a948c90cc11c3221d7f2a3db3b11dce Binary files /dev/null and b/venv/lib/python3.10/site-packages/exceptiongroup/__pycache__/_exceptions.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/exceptiongroup/__pycache__/_formatting.cpython-310.pyc b/venv/lib/python3.10/site-packages/exceptiongroup/__pycache__/_formatting.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5a0761d7968f8bc636ce2a0dcd64a4a014ce6ce Binary files /dev/null and b/venv/lib/python3.10/site-packages/exceptiongroup/__pycache__/_formatting.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/exceptiongroup/__pycache__/_suppress.cpython-310.pyc b/venv/lib/python3.10/site-packages/exceptiongroup/__pycache__/_suppress.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b36714785b2ba36259df18109e9464098ce6a46e Binary files /dev/null and b/venv/lib/python3.10/site-packages/exceptiongroup/__pycache__/_suppress.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/exceptiongroup/__pycache__/_version.cpython-310.pyc b/venv/lib/python3.10/site-packages/exceptiongroup/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa2c7b4fcbfc67d658372cc7127a2876f3e1a577 Binary files /dev/null and b/venv/lib/python3.10/site-packages/exceptiongroup/__pycache__/_version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/METADATA b/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..7a9022a30ed6fd46c28fed49f956e581b9a2b092 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/METADATA @@ -0,0 +1,38 @@ +Metadata-Version: 2.4 +Name: filelock +Version: 3.24.3 +Summary: A platform independent file lock. +Project-URL: Documentation, https://py-filelock.readthedocs.io +Project-URL: Homepage, https://github.com/tox-dev/py-filelock +Project-URL: Source, https://github.com/tox-dev/py-filelock +Project-URL: Tracker, https://github.com/tox-dev/py-filelock/issues +Maintainer-email: Bernát Gábor +License-Expression: MIT +License-File: LICENSE +Keywords: application,cache,directory,log,user +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Topic :: Internet +Classifier: Topic :: Software Development :: Libraries +Classifier: Topic :: System +Requires-Python: >=3.10 +Description-Content-Type: text/markdown + +# filelock + +[![PyPI](https://img.shields.io/pypi/v/filelock)](https://pypi.org/project/filelock/) +[![Supported Python versions](https://img.shields.io/pypi/pyversions/filelock.svg)](https://pypi.org/project/filelock/) +[![Documentation status](https://readthedocs.org/projects/py-filelock/badge/?version=latest)](https://py-filelock.readthedocs.io/en/latest/?badge=latest) +[![Downloads](https://static.pepy.tech/badge/filelock/month)](https://pepy.tech/project/filelock) +[![check](https://github.com/tox-dev/py-filelock/actions/workflows/check.yaml/badge.svg)](https://github.com/tox-dev/py-filelock/actions/workflows/check.yaml) + +For more information checkout the [official documentation](https://py-filelock.readthedocs.io/en/latest/index.html). diff --git a/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/RECORD b/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..a262043f331ff21553ce3b2182614c7bcac2534c --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/RECORD @@ -0,0 +1,26 @@ +filelock-3.24.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +filelock-3.24.3.dist-info/METADATA,sha256=2SzY43viecqbmVNOokZgXcYTE04sufy5Rjf6wZTMWC4,1977 +filelock-3.24.3.dist-info/RECORD,, +filelock-3.24.3.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87 +filelock-3.24.3.dist-info/licenses/LICENSE,sha256=YIyJ1QYK6ZIa3M8yNmlbxlSplG4SMj72wCHfoE4pTUg,1088 +filelock/__init__.py,sha256=12pKiL8AjSCkmWLozJhcCg6qQY70_XizYh9AHUvzZ1Q,2009 +filelock/__pycache__/__init__.cpython-310.pyc,, +filelock/__pycache__/_api.cpython-310.pyc,, +filelock/__pycache__/_error.cpython-310.pyc,, +filelock/__pycache__/_read_write.cpython-310.pyc,, +filelock/__pycache__/_soft.cpython-310.pyc,, +filelock/__pycache__/_unix.cpython-310.pyc,, +filelock/__pycache__/_util.cpython-310.pyc,, +filelock/__pycache__/_windows.cpython-310.pyc,, +filelock/__pycache__/asyncio.cpython-310.pyc,, +filelock/__pycache__/version.cpython-310.pyc,, +filelock/_api.py,sha256=l8P7bqosgleI3vPn8OVGQ0-sa2d3WVhvM_j9SCCcTx4,21159 +filelock/_error.py,sha256=mnelOh0EVyVeskG3rksL4kW3OArL4TMb2-PwmzQWIFg,788 +filelock/_read_write.py,sha256=o6pcX04--nXFspDXtV_J113EDH_-rzz-Hvp-cmH76P4,15313 +filelock/_soft.py,sha256=8aSSoyLZjBDV-ql3LJt_Ec_kg7ywHDE8ZZFcCtVWxQ8,4665 +filelock/_unix.py,sha256=DL0mk9OAE21wY-uhdClJ2ezBUFfi-vNszdBJCw7slqE,4233 +filelock/_util.py,sha256=wPf-LvzmdHWHw4DinDeVJlB0GNVOYGNUeNKSnHBQuUU,1716 +filelock/_windows.py,sha256=-CEBIFfFPwmfSSmTmbbGzgUR30E0jBacBW9j4_ER0YM,3508 +filelock/asyncio.py,sha256=NvrDsqS095NZc16l_OjBQcTY-D6xB4Vy7AK3ni8tr8A,13943 +filelock/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +filelock/version.py,sha256=cEVcBJgzKOmekaY1NFrR1sXy1-sBY4F7inrTVfovZUA,706 diff --git a/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/WHEEL b/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..ae8ec1bdaa94d726ceb907542d76cbd5d38cafcd --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.28.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/licenses/LICENSE b/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..291919c0b6f41d014767f6c877af9f7595fcff99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock-3.24.3.dist-info/licenses/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2025 Bernát Gábor and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/filelock/__init__.py b/venv/lib/python3.10/site-packages/filelock/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ce3c8e8a2df62956740156560302a56f5ba7384b --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/__init__.py @@ -0,0 +1,77 @@ +""" +A platform independent file lock that supports the with-statement. + +.. autodata:: filelock.__version__ + :no-value: + +""" + +from __future__ import annotations + +import sys +import warnings +from typing import TYPE_CHECKING + +from ._api import AcquireReturnProxy, BaseFileLock +from ._error import Timeout + +try: + from ._read_write import ReadWriteLock +except ImportError: # sqlite3 may be unavailable if Python was built without it or the C library is missing + ReadWriteLock = None # type: ignore[assignment, misc] + +from ._soft import SoftFileLock +from ._unix import UnixFileLock, has_fcntl +from ._windows import WindowsFileLock +from .asyncio import ( + AsyncAcquireReturnProxy, + AsyncSoftFileLock, + AsyncUnixFileLock, + AsyncWindowsFileLock, + BaseAsyncFileLock, +) +from .version import version + +#: version of the project as a string +__version__: str = version + + +if sys.platform == "win32": # pragma: win32 cover + _FileLock: type[BaseFileLock] = WindowsFileLock + _AsyncFileLock: type[BaseAsyncFileLock] = AsyncWindowsFileLock +else: # pragma: win32 no cover # noqa: PLR5501 + if has_fcntl: + _FileLock: type[BaseFileLock] = UnixFileLock + _AsyncFileLock: type[BaseAsyncFileLock] = AsyncUnixFileLock + else: + _FileLock = SoftFileLock + _AsyncFileLock = AsyncSoftFileLock + if warnings is not None: + warnings.warn("only soft file lock is available", stacklevel=2) + +if TYPE_CHECKING: + FileLock = SoftFileLock + AsyncFileLock = AsyncSoftFileLock +else: + #: Alias for the lock, which should be used for the current platform. + FileLock = _FileLock + AsyncFileLock = _AsyncFileLock + + +__all__ = [ + "AcquireReturnProxy", + "AsyncAcquireReturnProxy", + "AsyncFileLock", + "AsyncSoftFileLock", + "AsyncUnixFileLock", + "AsyncWindowsFileLock", + "BaseAsyncFileLock", + "BaseFileLock", + "FileLock", + "ReadWriteLock", + "SoftFileLock", + "Timeout", + "UnixFileLock", + "WindowsFileLock", + "__version__", +] diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b49b029ba73ff9559ed6ee15983d9fb0067ab10 Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d5dc4673c45ba906a256e42b4487307d1de15507 Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_api.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..93faf06e88240f1a6d651e92dfcd35e6341c3bfa Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_error.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_read_write.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_read_write.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c3e3fe983835628b7324e211cae3ab8b70633534 Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_read_write.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37ae473189f71f6fe254be10f777754ca629904b Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_soft.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..028a76b1f86a5049ae89167a3454f57bbab11660 Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_unix.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a7c1213c4bc47abc3e16b31aa70882c5a64ebcd Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d71de13082e0914131080c4d6e5b478d9027f50e Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/_windows.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/asyncio.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/asyncio.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c03d7193a6c1f3ea05eb18d0160989b82af9c78 Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/asyncio.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc b/venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88d6c525cc1174e4854a629edc9fe82088841cc8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/filelock/__pycache__/version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/filelock/_api.py b/venv/lib/python3.10/site-packages/filelock/_api.py new file mode 100644 index 0000000000000000000000000000000000000000..3bcc94cdf258f808e3f637186182c05dc961203c --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_api.py @@ -0,0 +1,578 @@ +from __future__ import annotations + +import contextlib +import inspect +import logging +import os +import pathlib +import sys +import time +import warnings +from abc import ABCMeta, abstractmethod +from dataclasses import dataclass +from threading import local +from typing import TYPE_CHECKING, Any, cast +from weakref import WeakValueDictionary + +from ._error import Timeout + +#: Sentinel indicating that no explicit file permission mode was passed. +#: When used, lock files are created with 0o666 (letting umask and default ACLs control the final permissions) +#: and fchmod is skipped so that POSIX default ACL inheritance is preserved. +_UNSET_FILE_MODE: int = -1 + +if TYPE_CHECKING: + from collections.abc import Callable + from types import TracebackType + + from ._read_write import ReadWriteLock + + if sys.version_info >= (3, 11): # pragma: no cover (py311+) + from typing import Self + else: # pragma: no cover ( None: + super().__init__() + self.held: dict[str, int] = {} + + +_registry = _ThreadLocalRegistry() + + +# This is a helper class which is returned by :meth:`BaseFileLock.acquire` and wraps the lock to make sure __enter__ +# is not called twice when entering the with statement. If we would simply return *self*, the lock would be acquired +# again in the *__enter__* method of the BaseFileLock, but not released again automatically. issue #37 (memory leak) +class AcquireReturnProxy: + """A context-aware object that will release the lock file when exiting.""" + + def __init__(self, lock: BaseFileLock | ReadWriteLock) -> None: + self.lock: BaseFileLock | ReadWriteLock = lock + + def __enter__(self) -> BaseFileLock | ReadWriteLock: + return self.lock + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.lock.release() + + +@dataclass +class FileLockContext: + """A dataclass which holds the context for a ``BaseFileLock`` object.""" + + # The context is held in a separate class to allow optional use of thread local storage via the + # ThreadLocalFileContext class. + + #: The path to the lock file. + lock_file: str + + #: The default timeout value. + timeout: float + + #: The mode for the lock files + mode: int + + #: Whether the lock should be blocking or not + blocking: bool + + #: The default polling interval value. + poll_interval: float + + #: The lock lifetime in seconds; ``None`` means the lock never expires. + lifetime: float | None = None + + #: The file descriptor for the *_lock_file* as it is returned by the os.open() function, not None when lock held + lock_file_fd: int | None = None + + #: The lock counter is used for implementing the nested locking mechanism. + lock_counter: int = 0 # When the lock is acquired is increased and the lock is only released, when this value is 0 + + +class ThreadLocalFileContext(FileLockContext, local): + """A thread local version of the ``FileLockContext`` class.""" + + +class FileLockMeta(ABCMeta): + _instances: WeakValueDictionary[str, BaseFileLock] + + def __call__( # noqa: PLR0913 + cls, + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = _UNSET_FILE_MODE, + thread_local: bool = True, # noqa: FBT001, FBT002 + *, + blocking: bool = True, + is_singleton: bool = False, + poll_interval: float = 0.05, + lifetime: float | None = None, + **kwargs: Any, # capture remaining kwargs for subclasses # noqa: ANN401 + ) -> BaseFileLock: + if is_singleton: + instance = cls._instances.get(str(lock_file)) + if instance: + params_to_check = { + "thread_local": (thread_local, instance.is_thread_local()), + "timeout": (timeout, instance.timeout), + "mode": (mode, instance._context.mode), # noqa: SLF001 + "blocking": (blocking, instance.blocking), + "poll_interval": (poll_interval, instance.poll_interval), + "lifetime": (lifetime, instance.lifetime), + } + + non_matching_params = { + name: (passed_param, set_param) + for name, (passed_param, set_param) in params_to_check.items() + if passed_param != set_param + } + if not non_matching_params: + return cast("BaseFileLock", instance) + + # parameters do not match; raise error + msg = "Singleton lock instances cannot be initialized with differing arguments" + msg += "\nNon-matching arguments: " + for param_name, (passed_param, set_param) in non_matching_params.items(): + msg += f"\n\t{param_name} (existing lock has {set_param} but {passed_param} was passed)" + raise ValueError(msg) + + # Workaround to make `__init__`'s params optional in subclasses + # E.g. virtualenv changes the signature of the `__init__` method in the `BaseFileLock` class descendant + # (https://github.com/tox-dev/filelock/pull/340) + + all_params = { + "timeout": timeout, + "mode": mode, + "thread_local": thread_local, + "blocking": blocking, + "is_singleton": is_singleton, + "poll_interval": poll_interval, + "lifetime": lifetime, + **kwargs, + } + + present_params = inspect.signature(cls.__init__).parameters + init_params = {key: value for key, value in all_params.items() if key in present_params} + + instance = super().__call__(lock_file, **init_params) + + if is_singleton: + cls._instances[str(lock_file)] = instance + + return cast("BaseFileLock", instance) + + +class BaseFileLock(contextlib.ContextDecorator, metaclass=FileLockMeta): + """ + Abstract base class for a file lock object. + + Provides a reentrant, cross-process exclusive lock backed by OS-level primitives. Subclasses implement the actual + locking mechanism (:class:`UnixFileLock `, :class:`WindowsFileLock + `, :class:`SoftFileLock `). + + """ + + _instances: WeakValueDictionary[str, BaseFileLock] + + def __init_subclass__(cls, **kwargs: dict[str, Any]) -> None: + """Setup unique state for lock subclasses.""" + super().__init_subclass__(**kwargs) + cls._instances = WeakValueDictionary() + + def __init__( # noqa: PLR0913 + self, + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = _UNSET_FILE_MODE, + thread_local: bool = True, # noqa: FBT001, FBT002 + *, + blocking: bool = True, + is_singleton: bool = False, + poll_interval: float = 0.05, + lifetime: float | None = None, + ) -> None: + """ + Create a new lock object. + + :param lock_file: path to the file + :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in the + acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it to a + negative value. A timeout of 0 means that there is exactly one attempt to acquire the file lock. + :param mode: file permissions for the lockfile. When not specified, the OS controls permissions via umask and + default ACLs, preserving POSIX default ACL inheritance in shared directories. + :param thread_local: Whether this object's internal context should be thread local or not. If this is set to + ``False`` then the lock will be reentrant across threads. + :param blocking: whether the lock should be blocking or not + :param is_singleton: If this is set to ``True`` then only one instance of this class will be created per lock + file. This is useful if you want to use the lock object for reentrant locking without needing to pass the + same object around. + :param poll_interval: default interval for polling the lock file, in seconds. It will be used as fallback value + in the acquire method, if no poll_interval value (``None``) is given. + :param lifetime: maximum time in seconds a lock can be held before it is considered expired. When set, a waiting + process will break a lock whose file modification time is older than ``lifetime`` seconds. ``None`` (the + default) means locks never expire. + + """ + self._is_thread_local = thread_local + self._is_singleton = is_singleton + + # Create the context. Note that external code should not work with the context directly and should instead use + # properties of this class. + kwargs: dict[str, Any] = { + "lock_file": os.fspath(lock_file), + "timeout": timeout, + "mode": mode, + "blocking": blocking, + "poll_interval": poll_interval, + "lifetime": lifetime, + } + self._context: FileLockContext = (ThreadLocalFileContext if thread_local else FileLockContext)(**kwargs) + + def is_thread_local(self) -> bool: + """:returns: a flag indicating if this lock is thread local or not""" + return self._is_thread_local + + @property + def is_singleton(self) -> bool: + """ + :returns: a flag indicating if this lock is singleton or not + + .. versionadded:: 3.13.0 + + """ + return self._is_singleton + + @property + def lock_file(self) -> str: + """:returns: path to the lock file""" + return self._context.lock_file + + @property + def timeout(self) -> float: + """ + :returns: the default timeout value, in seconds + + .. versionadded:: 2.0.0 + + """ + return self._context.timeout + + @timeout.setter + def timeout(self, value: float | str) -> None: + """ + Change the default timeout value. + + :param value: the new value, in seconds + + """ + self._context.timeout = float(value) + + @property + def blocking(self) -> bool: + """ + :returns: whether the locking is blocking or not + + .. versionadded:: 3.14.0 + + """ + return self._context.blocking + + @blocking.setter + def blocking(self, value: bool) -> None: + """ + Change the default blocking value. + + :param value: the new value as bool + + """ + self._context.blocking = value + + @property + def poll_interval(self) -> float: + """ + :returns: the default polling interval, in seconds + + .. versionadded:: 3.24.0 + + """ + return self._context.poll_interval + + @poll_interval.setter + def poll_interval(self, value: float) -> None: + """ + Change the default polling interval. + + :param value: the new value, in seconds + + """ + self._context.poll_interval = value + + @property + def lifetime(self) -> float | None: + """ + :returns: the lock lifetime in seconds, or ``None`` if the lock never expires + + .. versionadded:: 3.24.0 + + """ + return self._context.lifetime + + @lifetime.setter + def lifetime(self, value: float | None) -> None: + """ + Change the lock lifetime. + + :param value: the new value in seconds, or ``None`` to disable expiration + + """ + self._context.lifetime = value + + @property + def mode(self) -> int: + """:returns: the file permissions for the lockfile""" + return 0o644 if self._context.mode == _UNSET_FILE_MODE else self._context.mode + + @property + def has_explicit_mode(self) -> bool: + """:returns: whether the file permissions were explicitly set""" + return self._context.mode != _UNSET_FILE_MODE + + def _open_mode(self) -> int: + """:returns: the mode for os.open() — 0o666 when unset (let umask/ACLs decide), else the explicit mode""" + return 0o666 if self._context.mode == _UNSET_FILE_MODE else self._context.mode + + def _try_break_expired_lock(self) -> None: + """Remove the lock file if its modification time exceeds the configured :attr:`lifetime`.""" + if (lifetime := self._context.lifetime) is None: + return + with contextlib.suppress(OSError): + if time.time() - pathlib.Path(self.lock_file).stat().st_mtime < lifetime: + return + break_path = f"{self.lock_file}.break.{os.getpid()}" + pathlib.Path(self.lock_file).rename(break_path) + pathlib.Path(break_path).unlink() + + @abstractmethod + def _acquire(self) -> None: + """If the file lock could be acquired, self._context.lock_file_fd holds the file descriptor of the lock file.""" + raise NotImplementedError + + @abstractmethod + def _release(self) -> None: + """Releases the lock and sets self._context.lock_file_fd to None.""" + raise NotImplementedError + + @property + def is_locked(self) -> bool: + """ + :returns: A boolean indicating if the lock file is holding the lock currently. + + .. versionchanged:: 2.0.0 + + This was previously a method and is now a property. + + """ + return self._context.lock_file_fd is not None + + @property + def lock_counter(self) -> int: + """:returns: The number of times this lock has been acquired (but not yet released).""" + return self._context.lock_counter + + @staticmethod + def _check_give_up( # noqa: PLR0913 + lock_id: int, + lock_filename: str, + *, + blocking: bool, + cancel_check: Callable[[], bool] | None, + timeout: float, + start_time: float, + ) -> bool: + if blocking is False: + _LOGGER.debug("Failed to immediately acquire lock %s on %s", lock_id, lock_filename) + return True + if cancel_check is not None and cancel_check(): + _LOGGER.debug("Cancellation requested for lock %s on %s", lock_id, lock_filename) + return True + if 0 <= timeout < time.perf_counter() - start_time: + _LOGGER.debug("Timeout on acquiring lock %s on %s", lock_id, lock_filename) + return True + return False + + def acquire( # noqa: C901 + self, + timeout: float | None = None, + poll_interval: float | None = None, + *, + poll_intervall: float | None = None, + blocking: bool | None = None, + cancel_check: Callable[[], bool] | None = None, + ) -> AcquireReturnProxy: + """ + Try to acquire the file lock. + + :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default :attr:`~timeout` is and + if ``timeout < 0``, there is no timeout and this method will block until the lock could be acquired + :param poll_interval: interval of trying to acquire the lock file, ``None`` means use the default + :attr:`~poll_interval` + :param poll_intervall: deprecated, kept for backwards compatibility, use ``poll_interval`` instead + :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the + first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired. + :param cancel_check: a callable returning ``True`` when the acquisition should be canceled. Checked on each poll + iteration. When triggered, raises :class:`~Timeout` just like an expired timeout. + + :returns: a context object that will unlock the file when the context is exited + + :raises Timeout: if fails to acquire lock within the timeout period + + .. code-block:: python + + # You can use this method in the context manager (recommended) + with lock.acquire(): + pass + + # Or use an equivalent try-finally construct: + lock.acquire() + try: + pass + finally: + lock.release() + + .. versionchanged:: 2.0.0 + + This method returns now a *proxy* object instead of *self*, so that it can be used in a with statement + without side effects. + + """ + # Use the default timeout, if no timeout is provided. + if timeout is None: + timeout = self._context.timeout + + if blocking is None: + blocking = self._context.blocking + + if poll_intervall is not None: + msg = "use poll_interval instead of poll_intervall" + warnings.warn(msg, DeprecationWarning, stacklevel=2) + poll_interval = poll_intervall + + poll_interval = poll_interval if poll_interval is not None else self._context.poll_interval + + # Increment the number right at the beginning. We can still undo it, if something fails. + self._context.lock_counter += 1 + + lock_id = id(self) + lock_filename = self.lock_file + canonical = _canonical(lock_filename) + + would_block = self._context.lock_counter == 1 and not self.is_locked and timeout < 0 and blocking + if would_block and (existing := _registry.held.get(canonical)) is not None and existing != lock_id: + self._context.lock_counter -= 1 + msg = ( + f"Deadlock: lock '{lock_filename}' is already held by a different " + f"FileLock instance in this thread. Use is_singleton=True to " + f"enable reentrant locking across instances." + ) + raise RuntimeError(msg) + + start_time = time.perf_counter() + try: + while True: + if not self.is_locked: + self._try_break_expired_lock() + _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) + self._acquire() + if self.is_locked: + _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) + break + if self._check_give_up( + lock_id, + lock_filename, + blocking=blocking, + cancel_check=cancel_check, + timeout=timeout, + start_time=start_time, + ): + raise Timeout(lock_filename) # noqa: TRY301 + msg = "Lock %s not acquired on %s, waiting %s seconds ..." + _LOGGER.debug(msg, lock_id, lock_filename, poll_interval) + time.sleep(poll_interval) + except BaseException: + self._context.lock_counter = max(0, self._context.lock_counter - 1) + if self._context.lock_counter == 0: + _registry.held.pop(canonical, None) + raise + if self._context.lock_counter == 1: + _registry.held[canonical] = lock_id + return AcquireReturnProxy(lock=self) + + def release(self, force: bool = False) -> None: # noqa: FBT001, FBT002 + """ + Release the file lock. The lock is only completely released when the lock counter reaches 0. The lock file + itself is not automatically deleted. + + :param force: If true, the lock counter is ignored and the lock is released in every case. + + """ + if self.is_locked: + self._context.lock_counter -= 1 + + if self._context.lock_counter == 0 or force: + lock_id, lock_filename = id(self), self.lock_file + + _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) + self._release() + self._context.lock_counter = 0 + _registry.held.pop(_canonical(lock_filename), None) + _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) + + def __enter__(self) -> Self: + """ + Acquire the lock. + + :returns: the lock object + + """ + self.acquire() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + """ + Release the lock. + + :param exc_type: the exception type if raised + :param exc_value: the exception value if raised + :param traceback: the exception traceback if raised + + """ + self.release() + + def __del__(self) -> None: + """Called when the lock object is deleted.""" + self.release(force=True) + + +__all__ = [ + "_UNSET_FILE_MODE", + "AcquireReturnProxy", + "BaseFileLock", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_error.py b/venv/lib/python3.10/site-packages/filelock/_error.py new file mode 100644 index 0000000000000000000000000000000000000000..7aaac6b005091938e4907fedb6889a00475c082e --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_error.py @@ -0,0 +1,30 @@ +from __future__ import annotations + +from typing import Any + + +class Timeout(TimeoutError): # noqa: N818 + """Raised when the lock could not be acquired in *timeout* seconds.""" + + def __init__(self, lock_file: str) -> None: + super().__init__() + self._lock_file = lock_file + + def __reduce__(self) -> str | tuple[Any, ...]: + return self.__class__, (self._lock_file,) # Properly pickle the exception + + def __str__(self) -> str: + return f"The file lock '{self._lock_file}' could not be acquired." + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.lock_file!r})" + + @property + def lock_file(self) -> str: + """:returns: The path of the file lock.""" + return self._lock_file + + +__all__ = [ + "Timeout", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_read_write.py b/venv/lib/python3.10/site-packages/filelock/_read_write.py new file mode 100644 index 0000000000000000000000000000000000000000..bf9943332c1ff90ce66a4180a2b271dba85d72e1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_read_write.py @@ -0,0 +1,363 @@ +from __future__ import annotations + +import atexit +import logging +import os +import pathlib +import sqlite3 +import threading +import time +from contextlib import contextmanager, suppress +from typing import TYPE_CHECKING, Literal +from weakref import WeakValueDictionary + +from ._api import AcquireReturnProxy +from ._error import Timeout + +if TYPE_CHECKING: + from collections.abc import Generator + +_LOGGER = logging.getLogger("filelock") + +_all_connections: set[sqlite3.Connection] = set() +_all_connections_lock = threading.Lock() + + +def _cleanup_connections() -> None: + with _all_connections_lock: + for con in list(_all_connections): + with suppress(Exception): + con.close() + _all_connections.clear() + + +atexit.register(_cleanup_connections) + +# sqlite3_busy_timeout() accepts a C int, max 2_147_483_647 on 32-bit. Use a lower value to be safe (~23 days). +_MAX_SQLITE_TIMEOUT_MS = 2_000_000_000 - 1 + + +def timeout_for_sqlite(timeout: float, *, blocking: bool, already_waited: float) -> int: + if blocking is False: + return 0 + + if timeout == -1: + return _MAX_SQLITE_TIMEOUT_MS + + if timeout < 0: + msg = "timeout must be a non-negative number or -1" + raise ValueError(msg) + + remaining = max(timeout - already_waited, 0) if timeout > 0 else timeout + timeout_ms = int(remaining * 1000) + if timeout_ms > _MAX_SQLITE_TIMEOUT_MS or timeout_ms < 0: + _LOGGER.warning("timeout %s is too large for SQLite, using %s ms instead", timeout, _MAX_SQLITE_TIMEOUT_MS) + return _MAX_SQLITE_TIMEOUT_MS + return timeout_ms + + +class _ReadWriteLockMeta(type): + """ + Metaclass that handles singleton resolution when is_singleton=True. + + Singleton logic lives here rather than in ReadWriteLock.get_lock so that ``ReadWriteLock(path)`` transparently + returns cached instances without a 2-arg ``super()`` call that type checkers cannot verify. + + """ + + _instances: WeakValueDictionary[pathlib.Path, ReadWriteLock] + _instances_lock: threading.Lock + + def __call__( + cls, + lock_file: str | os.PathLike[str], + timeout: float = -1, + *, + blocking: bool = True, + is_singleton: bool = True, + ) -> ReadWriteLock: + if not is_singleton: + return super().__call__(lock_file, timeout, blocking=blocking, is_singleton=is_singleton) + + normalized = pathlib.Path(lock_file).resolve() + with cls._instances_lock: + if normalized not in cls._instances: + instance = super().__call__(lock_file, timeout, blocking=blocking, is_singleton=is_singleton) + cls._instances[normalized] = instance + else: + instance = cls._instances[normalized] + + if instance.timeout != timeout or instance.blocking != blocking: + msg = ( + f"Singleton lock created with timeout={instance.timeout}, blocking={instance.blocking}," + f" cannot be changed to timeout={timeout}, blocking={blocking}" + ) + raise ValueError(msg) + return instance + + +class ReadWriteLock(metaclass=_ReadWriteLockMeta): + """ + Cross-process read-write lock backed by SQLite. + + Allows concurrent shared readers or a single exclusive writer. The lock is reentrant within the same mode (multiple + ``acquire_read`` calls nest, as do multiple ``acquire_write`` calls from the same thread), but upgrading from read + to write or downgrading from write to read raises :class:`RuntimeError`. Write locks are pinned to the thread that + acquired them. + + By default, ``is_singleton=True``: calling ``ReadWriteLock(path)`` with the same resolved path returns the same + instance. The lock file must use a ``.db`` extension (SQLite database). + + :param lock_file: path to the SQLite database file used as the lock + :param timeout: maximum wait time in seconds; ``-1`` means block indefinitely + :param blocking: if ``False``, raise :class:`~filelock.Timeout` immediately when the lock is unavailable + :param is_singleton: if ``True``, reuse existing instances for the same resolved path + + .. versionadded:: 3.21.0 + + """ + + _instances: WeakValueDictionary[pathlib.Path, ReadWriteLock] = WeakValueDictionary() + _instances_lock = threading.Lock() + + @classmethod + def get_lock( + cls, lock_file: str | os.PathLike[str], timeout: float = -1, *, blocking: bool = True + ) -> ReadWriteLock: + """ + Return the singleton :class:`ReadWriteLock` for *lock_file*. + + :param lock_file: path to the SQLite database file used as the lock + :param timeout: maximum wait time in seconds; ``-1`` means block indefinitely + :param blocking: if ``False``, raise :class:`~filelock.Timeout` immediately when the lock is unavailable + + :returns: the singleton lock instance + + :raises ValueError: if an instance already exists for this path with different *timeout* or *blocking* values + + """ + return cls(lock_file, timeout, blocking=blocking) + + def __init__( + self, + lock_file: str | os.PathLike[str], + timeout: float = -1, + *, + blocking: bool = True, + is_singleton: bool = True, # noqa: ARG002 # consumed by _ReadWriteLockMeta.__call__ + ) -> None: + self.lock_file = os.fspath(lock_file) + self.timeout = timeout + self.blocking = blocking + self._transaction_lock = threading.Lock() # serializes the (possibly blocking) SQLite transaction work + self._internal_lock = threading.Lock() # protects _lock_level / _current_mode updates and rollback + self._lock_level = 0 + self._current_mode: Literal["read", "write"] | None = None + self._write_thread_id: int | None = None + self._con = sqlite3.connect(self.lock_file, check_same_thread=False) + with _all_connections_lock: + _all_connections.add(self._con) + + def _acquire_transaction_lock(self, *, blocking: bool, timeout: float) -> None: + if timeout == -1: + # blocking=True with no timeout means wait indefinitely per threading.Lock.acquire semantics + acquired = self._transaction_lock.acquire(blocking) + else: + acquired = self._transaction_lock.acquire(blocking, timeout) + if not acquired: + raise Timeout(self.lock_file) from None + + def _validate_reentrant(self, mode: Literal["read", "write"], opposite: str, direction: str) -> AcquireReturnProxy: + if self._current_mode != mode: + msg = ( + f"Cannot acquire {mode} lock on {self.lock_file} (lock id: {id(self)}): " + f"already holding a {opposite} lock ({direction} not allowed)" + ) + raise RuntimeError(msg) + if mode == "write" and (cur := threading.get_ident()) != self._write_thread_id: + msg = ( + f"Cannot acquire write lock on {self.lock_file} (lock id: {id(self)}) " + f"from thread {cur} while it is held by thread {self._write_thread_id}" + ) + raise RuntimeError(msg) + self._lock_level += 1 + return AcquireReturnProxy(lock=self) + + def _configure_and_begin( + self, mode: Literal["read", "write"], timeout: float, *, blocking: bool, start_time: float + ) -> None: + waited = time.perf_counter() - start_time + timeout_ms = timeout_for_sqlite(timeout, blocking=blocking, already_waited=waited) + self._con.execute(f"PRAGMA busy_timeout={timeout_ms};").close() + # Use legacy journal mode (not WAL) because WAL does not block readers when a concurrent EXCLUSIVE + # write transaction is active, making read-write locking impossible without modifying table data. + # MEMORY is safe here since no actual writes happen — crashes cannot corrupt the DB. + # See https://sqlite.org/lang_transaction.html#deferred_immediate_and_exclusive_transactions + # + # Set here (not in __init__) because this pragma itself may block on a locked database, + # so it must run after busy_timeout is configured above. + self._con.execute("PRAGMA journal_mode=MEMORY;").close() + # Recompute remaining timeout after the potentially blocking journal_mode pragma. + waited = time.perf_counter() - start_time + if (recomputed := timeout_for_sqlite(timeout, blocking=blocking, already_waited=waited)) != timeout_ms: + self._con.execute(f"PRAGMA busy_timeout={recomputed};").close() + stmt = "BEGIN EXCLUSIVE TRANSACTION;" if mode == "write" else "BEGIN TRANSACTION;" + self._con.execute(stmt).close() + if mode == "read": + # A SELECT is needed to force SQLite to actually acquire the SHARED lock on the database. + # https://www.sqlite.org/lockingv3.html#transaction_control + self._con.execute("SELECT name FROM sqlite_schema LIMIT 1;").close() + + def _acquire(self, mode: Literal["read", "write"], timeout: float, *, blocking: bool) -> AcquireReturnProxy: + opposite = "write" if mode == "read" else "read" + direction = "downgrade" if mode == "read" else "upgrade" + + with self._internal_lock: + if self._lock_level > 0: + return self._validate_reentrant(mode, opposite, direction) + + start_time = time.perf_counter() + self._acquire_transaction_lock(blocking=blocking, timeout=timeout) + try: + # Double-check: another thread may have acquired the lock while we waited on _transaction_lock. + with self._internal_lock: + if self._lock_level > 0: + return self._validate_reentrant(mode, opposite, direction) + + self._configure_and_begin(mode, timeout, blocking=blocking, start_time=start_time) + + with self._internal_lock: + self._current_mode = mode + self._lock_level = 1 + if mode == "write": + self._write_thread_id = threading.get_ident() + + return AcquireReturnProxy(lock=self) + + except sqlite3.OperationalError as exc: + if "database is locked" not in str(exc): + raise + raise Timeout(self.lock_file) from None + finally: + self._transaction_lock.release() + + def acquire_read(self, timeout: float = -1, *, blocking: bool = True) -> AcquireReturnProxy: + """ + Acquire a shared read lock. + + If this instance already holds a read lock, the lock level is incremented (reentrant). Attempting to acquire a + read lock while holding a write lock raises :class:`RuntimeError` (downgrade not allowed). + + :param timeout: maximum wait time in seconds; ``-1`` means block indefinitely + :param blocking: if ``False``, raise :class:`~filelock.Timeout` immediately when the lock is unavailable + + :returns: a proxy that can be used as a context manager to release the lock + + :raises RuntimeError: if a write lock is already held on this instance + :raises Timeout: if the lock cannot be acquired within *timeout* seconds + + """ + return self._acquire("read", timeout, blocking=blocking) + + def acquire_write(self, timeout: float = -1, *, blocking: bool = True) -> AcquireReturnProxy: + """ + Acquire an exclusive write lock. + + If this instance already holds a write lock from the same thread, the lock level is incremented (reentrant). + Attempting to acquire a write lock while holding a read lock raises :class:`RuntimeError` (upgrade not allowed). + Write locks are pinned to the acquiring thread: a different thread trying to re-enter also raises + :class:`RuntimeError`. + + :param timeout: maximum wait time in seconds; ``-1`` means block indefinitely + :param blocking: if ``False``, raise :class:`~filelock.Timeout` immediately when the lock is unavailable + + :returns: a proxy that can be used as a context manager to release the lock + + :raises RuntimeError: if a read lock is already held, or a write lock is held by a different thread + :raises Timeout: if the lock cannot be acquired within *timeout* seconds + + """ + return self._acquire("write", timeout, blocking=blocking) + + def release(self, *, force: bool = False) -> None: + """ + Release one level of the current lock. + + When the lock level reaches zero the underlying SQLite transaction is rolled back, releasing the database lock. + + :param force: if ``True``, release the lock completely regardless of the current lock level + + :raises RuntimeError: if no lock is currently held and *force* is ``False`` + + """ + should_rollback = False + with self._internal_lock: + if self._lock_level == 0: + if force: + return + msg = f"Cannot release a lock on {self.lock_file} (lock id: {id(self)}) that is not held" + raise RuntimeError(msg) + if force: + self._lock_level = 0 + else: + self._lock_level -= 1 + if self._lock_level == 0: + self._current_mode = None + self._write_thread_id = None + should_rollback = True + if should_rollback: + self._con.rollback() + + @contextmanager + def read_lock(self, timeout: float | None = None, *, blocking: bool | None = None) -> Generator[None]: + """ + Context manager that acquires and releases a shared read lock. + + Falls back to instance defaults for *timeout* and *blocking* when ``None``. + + :param timeout: maximum wait time in seconds, or ``None`` to use the instance default + :param blocking: if ``False``, raise :class:`~filelock.Timeout` immediately; ``None`` uses the instance default + + """ + if timeout is None: + timeout = self.timeout + if blocking is None: + blocking = self.blocking + self.acquire_read(timeout, blocking=blocking) + try: + yield + finally: + self.release() + + @contextmanager + def write_lock(self, timeout: float | None = None, *, blocking: bool | None = None) -> Generator[None]: + """ + Context manager that acquires and releases an exclusive write lock. + + Falls back to instance defaults for *timeout* and *blocking* when ``None``. + + :param timeout: maximum wait time in seconds, or ``None`` to use the instance default + :param blocking: if ``False``, raise :class:`~filelock.Timeout` immediately; ``None`` uses the instance default + + """ + if timeout is None: + timeout = self.timeout + if blocking is None: + blocking = self.blocking + self.acquire_write(timeout, blocking=blocking) + try: + yield + finally: + self.release() + + def close(self) -> None: + """ + Release the lock (if held) and close the underlying SQLite connection. + + After calling this method, the lock instance is no longer usable. + + """ + self.release(force=True) + self._con.close() + with _all_connections_lock: + _all_connections.discard(self._con) diff --git a/venv/lib/python3.10/site-packages/filelock/_soft.py b/venv/lib/python3.10/site-packages/filelock/_soft.py new file mode 100644 index 0000000000000000000000000000000000000000..3bec57aafd38a2d659ba46bab06fa8d045c35af2 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_soft.py @@ -0,0 +1,127 @@ +from __future__ import annotations + +import os +import socket +import sys +import time +from contextlib import suppress +from errno import EACCES, EEXIST, EPERM, ESRCH +from pathlib import Path + +from ._api import BaseFileLock +from ._util import ensure_directory_exists, raise_on_not_writable_file + +_WIN_SYNCHRONIZE = 0x100000 +_WIN_ERROR_INVALID_PARAMETER = 87 + + +class SoftFileLock(BaseFileLock): + """ + Portable file lock based on file existence. + + Unlike :class:`UnixFileLock ` and :class:`WindowsFileLock `, this + lock does not use OS-level locking primitives. Instead, it creates the lock file with ``O_CREAT | O_EXCL`` and + treats its existence as the lock indicator. This makes it work on any filesystem but leaves stale lock files behind + if the process crashes without releasing the lock. + + To mitigate stale locks, the lock file contains the PID and hostname of the holding process. On contention, if the + holder is on the same host and its PID no longer exists, the stale lock is broken automatically. + + """ + + def _acquire(self) -> None: + raise_on_not_writable_file(self.lock_file) + ensure_directory_exists(self.lock_file) + flags = ( + os.O_WRONLY # open for writing only + | os.O_CREAT + | os.O_EXCL # together with above raise EEXIST if the file specified by filename exists + | os.O_TRUNC # truncate the file to zero byte + ) + if (o_nofollow := getattr(os, "O_NOFOLLOW", None)) is not None: + flags |= o_nofollow + try: + file_handler = os.open(self.lock_file, flags, self._open_mode()) + except OSError as exception: + if not ( + exception.errno == EEXIST or (exception.errno == EACCES and sys.platform == "win32") + ): # pragma: win32 no cover + raise + if exception.errno == EEXIST and sys.platform != "win32": # pragma: win32 no cover + self._try_break_stale_lock() + else: + self._write_lock_info(file_handler) + self._context.lock_file_fd = file_handler + + def _try_break_stale_lock(self) -> None: + with suppress(OSError): + content = Path(self.lock_file).read_text(encoding="utf-8") + lines = content.strip().splitlines() + if len(lines) != 2: # noqa: PLR2004 + return + pid_str, hostname = lines + if hostname != socket.gethostname(): + return + pid = int(pid_str) + if self._is_process_alive(pid): + return + break_path = f"{self.lock_file}.break.{os.getpid()}" + Path(self.lock_file).rename(break_path) + Path(break_path).unlink() + + @staticmethod + def _is_process_alive(pid: int) -> bool: + if sys.platform == "win32": # pragma: win32 cover + import ctypes # noqa: PLC0415 + + kernel32 = ctypes.windll.kernel32 + handle = kernel32.OpenProcess(_WIN_SYNCHRONIZE, 0, pid) + if handle: + kernel32.CloseHandle(handle) + return True + return kernel32.GetLastError() != _WIN_ERROR_INVALID_PARAMETER + try: + os.kill(pid, 0) + except OSError as exc: + if exc.errno == ESRCH: + return False + if exc.errno == EPERM: + return True + raise + return True + + @staticmethod + def _write_lock_info(fd: int) -> None: + with suppress(OSError): + os.write(fd, f"{os.getpid()}\n{socket.gethostname()}\n".encode()) + + def _release(self) -> None: + assert self._context.lock_file_fd is not None # noqa: S101 + os.close(self._context.lock_file_fd) + self._context.lock_file_fd = None + if sys.platform == "win32": + self._windows_unlink_with_retry() + else: + with suppress(OSError): + Path(self.lock_file).unlink() + + def _windows_unlink_with_retry(self) -> None: + max_retries = 10 + retry_delay = 0.001 + for attempt in range(max_retries): + # Windows doesn't immediately release file handles after close, causing EACCES/EPERM on unlink + try: + Path(self.lock_file).unlink() + except OSError as exc: # noqa: PERF203 + if exc.errno not in {EACCES, EPERM}: + return + if attempt < max_retries - 1: + time.sleep(retry_delay) + retry_delay *= 2 + else: + return + + +__all__ = [ + "SoftFileLock", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_unix.py b/venv/lib/python3.10/site-packages/filelock/_unix.py new file mode 100644 index 0000000000000000000000000000000000000000..7f8e8b007fd8d3488ebfb3b7a7f7a20921837e9e --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_unix.py @@ -0,0 +1,109 @@ +from __future__ import annotations + +import os +import sys +import warnings +from contextlib import suppress +from errno import EAGAIN, ENOSYS, EWOULDBLOCK +from pathlib import Path +from typing import cast + +from ._api import BaseFileLock +from ._util import ensure_directory_exists + +#: a flag to indicate if the fcntl API is available +has_fcntl = False +if sys.platform == "win32": # pragma: win32 cover + + class UnixFileLock(BaseFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + def _acquire(self) -> None: + raise NotImplementedError + + def _release(self) -> None: + raise NotImplementedError + +else: # pragma: win32 no cover + try: + import fcntl + + _ = (fcntl.flock, fcntl.LOCK_EX, fcntl.LOCK_NB, fcntl.LOCK_UN) + except (ImportError, AttributeError): + pass + else: + has_fcntl = True + + class UnixFileLock(BaseFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + def _acquire(self) -> None: # noqa: C901, PLR0912 + ensure_directory_exists(self.lock_file) + open_flags = os.O_RDWR | os.O_TRUNC + o_nofollow = getattr(os, "O_NOFOLLOW", None) + if o_nofollow is not None: + open_flags |= o_nofollow + open_flags |= os.O_CREAT + open_mode = self._open_mode() + try: + fd = os.open(self.lock_file, open_flags, open_mode) + except FileNotFoundError: + # On FUSE/NFS, os.open(O_CREAT) is not atomic: LOOKUP + CREATE can be split, allowing a concurrent + # unlink() to delete the file between them. For valid paths, treat ENOENT as transient contention. + # For invalid paths (e.g., empty string), re-raise to avoid infinite retry loops. + if self.lock_file and Path(self.lock_file).parent.exists(): + return + raise + except PermissionError: + # Sticky-bit dirs (e.g. /tmp): O_CREAT fails if the file is owned by another user (#317). + # Fall back to opening the existing file without O_CREAT. + if not Path(self.lock_file).exists(): + raise + try: + fd = os.open(self.lock_file, open_flags & ~os.O_CREAT, open_mode) + except FileNotFoundError: + return + if self.has_explicit_mode: + with suppress(PermissionError): + os.fchmod(fd, self._context.mode) + try: + fcntl.flock(fd, fcntl.LOCK_EX | fcntl.LOCK_NB) + except OSError as exception: + os.close(fd) + if exception.errno == ENOSYS: + with suppress(OSError): + Path(self.lock_file).unlink() + self._fallback_to_soft_lock() + self._acquire() + return + if exception.errno not in {EAGAIN, EWOULDBLOCK}: + raise + else: + # The file may have been unlinked by a concurrent _release() between our open() and flock(). + # A lock on an unlinked inode is useless — discard and let the retry loop start fresh. + if os.fstat(fd).st_nlink == 0: + os.close(fd) + else: + self._context.lock_file_fd = fd + + def _fallback_to_soft_lock(self) -> None: + from ._soft import SoftFileLock # noqa: PLC0415 + + warnings.warn("flock not supported on this filesystem, falling back to SoftFileLock", stacklevel=2) + from .asyncio import AsyncSoftFileLock, BaseAsyncFileLock # noqa: PLC0415 + + self.__class__ = AsyncSoftFileLock if isinstance(self, BaseAsyncFileLock) else SoftFileLock + + def _release(self) -> None: + fd = cast("int", self._context.lock_file_fd) + self._context.lock_file_fd = None + with suppress(OSError): + Path(self.lock_file).unlink() + fcntl.flock(fd, fcntl.LOCK_UN) + os.close(fd) + + +__all__ = [ + "UnixFileLock", + "has_fcntl", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_util.py b/venv/lib/python3.10/site-packages/filelock/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..670152393ed76d2cecf4cf5e774360af4380f319 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_util.py @@ -0,0 +1,53 @@ +from __future__ import annotations + +import os +import stat +import sys +from errno import EACCES, EISDIR +from pathlib import Path + + +def raise_on_not_writable_file(filename: str) -> None: + """ + Raise an exception if attempting to open the file for writing would fail. + + This is done so files that will never be writable can be separated from files that are writable but currently + locked. + + :param filename: file to check + + :raises OSError: as if the file was opened for writing. + + """ + try: # use stat to do exists + can write to check without race condition + file_stat = os.stat(filename) # noqa: PTH116 + except OSError: + return # swallow does not exist or other errors + + if file_stat.st_mtime != 0: # if os.stat returns but modification is zero that's an invalid os.stat - ignore it + if not (file_stat.st_mode & stat.S_IWUSR): + raise PermissionError(EACCES, "Permission denied", filename) + + if stat.S_ISDIR(file_stat.st_mode): + if sys.platform == "win32": # pragma: win32 cover + # On Windows, this is PermissionError + raise PermissionError(EACCES, "Permission denied", filename) + else: # pragma: win32 no cover # noqa: RET506 + # On linux / macOS, this is IsADirectoryError + raise IsADirectoryError(EISDIR, "Is a directory", filename) + + +def ensure_directory_exists(filename: Path | str) -> None: + """ + Ensure the directory containing the file exists (create it if necessary). + + :param filename: file. + + """ + Path(filename).parent.mkdir(parents=True, exist_ok=True) + + +__all__ = [ + "ensure_directory_exists", + "raise_on_not_writable_file", +] diff --git a/venv/lib/python3.10/site-packages/filelock/_windows.py b/venv/lib/python3.10/site-packages/filelock/_windows.py new file mode 100644 index 0000000000000000000000000000000000000000..23bff364a6fa025458d7188861a6e1a7677606f1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/_windows.py @@ -0,0 +1,100 @@ +from __future__ import annotations + +import os +import sys +from errno import EACCES +from typing import cast + +from ._api import BaseFileLock +from ._util import ensure_directory_exists, raise_on_not_writable_file + +if sys.platform == "win32": # pragma: win32 cover + import ctypes + import msvcrt + from ctypes import wintypes + + # Windows API constants for reparse point detection + FILE_ATTRIBUTE_REPARSE_POINT = 0x00000400 + INVALID_FILE_ATTRIBUTES = 0xFFFFFFFF + + # Load kernel32.dll + _kernel32 = ctypes.WinDLL("kernel32", use_last_error=True) + _kernel32.GetFileAttributesW.argtypes = [wintypes.LPCWSTR] + _kernel32.GetFileAttributesW.restype = wintypes.DWORD + + def _is_reparse_point(path: str) -> bool: + """ + Check if a path is a reparse point (symlink, junction, etc.) on Windows. + + :param path: Path to check + + :returns: True if path is a reparse point, False otherwise + + :raises OSError: If GetFileAttributesW fails for reasons other than file-not-found + + """ + attrs = _kernel32.GetFileAttributesW(path) + if attrs == INVALID_FILE_ATTRIBUTES: + # File doesn't exist yet - that's fine, we'll create it + err = ctypes.get_last_error() + if err == 2: # noqa: PLR2004 # ERROR_FILE_NOT_FOUND + return False + if err == 3: # noqa: PLR2004 # ERROR_PATH_NOT_FOUND + return False + # Some other error - let caller handle it + return False + return bool(attrs & FILE_ATTRIBUTE_REPARSE_POINT) + + class WindowsFileLock(BaseFileLock): + """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems.""" + + def _acquire(self) -> None: + raise_on_not_writable_file(self.lock_file) + ensure_directory_exists(self.lock_file) + + # Security check: Refuse to open reparse points (symlinks, junctions) + # This prevents TOCTOU symlink attacks (CVE-TBD) + if _is_reparse_point(self.lock_file): + msg = f"Lock file is a reparse point (symlink/junction): {self.lock_file}" + raise OSError(msg) + + flags = ( + os.O_RDWR # open for read and write + | os.O_CREAT # create file if not exists + ) + try: + fd = os.open(self.lock_file, flags, self._open_mode()) + except OSError as exception: + if exception.errno != EACCES: # has no access to this lock + raise + else: + try: + msvcrt.locking(fd, msvcrt.LK_NBLCK, 1) + except OSError as exception: + os.close(fd) # close file first + if exception.errno != EACCES: # file is already locked + raise + else: + self._context.lock_file_fd = fd + + def _release(self) -> None: + fd = cast("int", self._context.lock_file_fd) + self._context.lock_file_fd = None + msvcrt.locking(fd, msvcrt.LK_UNLCK, 1) + os.close(fd) + +else: # pragma: win32 no cover + + class WindowsFileLock(BaseFileLock): + """Uses the :func:`msvcrt.locking` function to hard lock the lock file on Windows systems.""" + + def _acquire(self) -> None: + raise NotImplementedError + + def _release(self) -> None: + raise NotImplementedError + + +__all__ = [ + "WindowsFileLock", +] diff --git a/venv/lib/python3.10/site-packages/filelock/asyncio.py b/venv/lib/python3.10/site-packages/filelock/asyncio.py new file mode 100644 index 0000000000000000000000000000000000000000..81743adff7e9684ff3d3ce6c23a0220bc95e7a68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/asyncio.py @@ -0,0 +1,376 @@ +"""An asyncio-based implementation of the file lock.""" + +from __future__ import annotations + +import asyncio +import contextlib +import logging +import os +import time +from dataclasses import dataclass +from inspect import iscoroutinefunction +from threading import local +from typing import TYPE_CHECKING, Any, NoReturn, cast + +from ._api import _UNSET_FILE_MODE, BaseFileLock, FileLockContext, FileLockMeta +from ._error import Timeout +from ._soft import SoftFileLock +from ._unix import UnixFileLock +from ._windows import WindowsFileLock + +if TYPE_CHECKING: + import sys + from collections.abc import Callable + from concurrent import futures + from types import TracebackType + + if sys.version_info >= (3, 11): # pragma: no cover (py311+) + from typing import Self + else: # pragma: no cover ( None: # noqa: D107 + self.lock = lock + + async def __aenter__(self) -> BaseAsyncFileLock: # noqa: D105 + return self.lock + + async def __aexit__( # noqa: D105 + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + await self.lock.release() + + +class AsyncFileLockMeta(FileLockMeta): + def __call__( # ty: ignore[invalid-method-override] # noqa: PLR0913 + cls, # noqa: N805 + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = _UNSET_FILE_MODE, + thread_local: bool = False, # noqa: FBT001, FBT002 + *, + blocking: bool = True, + is_singleton: bool = False, + poll_interval: float = 0.05, + lifetime: float | None = None, + loop: asyncio.AbstractEventLoop | None = None, + run_in_executor: bool = True, + executor: futures.Executor | None = None, + ) -> BaseAsyncFileLock: + if thread_local and run_in_executor: + msg = "run_in_executor is not supported when thread_local is True" + raise ValueError(msg) + instance = super().__call__( + lock_file=lock_file, + timeout=timeout, + mode=mode, + thread_local=thread_local, + blocking=blocking, + is_singleton=is_singleton, + poll_interval=poll_interval, + lifetime=lifetime, + loop=loop, + run_in_executor=run_in_executor, + executor=executor, + ) + return cast("BaseAsyncFileLock", instance) + + +class BaseAsyncFileLock(BaseFileLock, metaclass=AsyncFileLockMeta): + """ + Base class for asynchronous file locks. + + .. versionadded:: 3.15.0 + + """ + + def __init__( # noqa: PLR0913 + self, + lock_file: str | os.PathLike[str], + timeout: float = -1, + mode: int = _UNSET_FILE_MODE, + thread_local: bool = False, # noqa: FBT001, FBT002 + *, + blocking: bool = True, + is_singleton: bool = False, + poll_interval: float = 0.05, + lifetime: float | None = None, + loop: asyncio.AbstractEventLoop | None = None, + run_in_executor: bool = True, + executor: futures.Executor | None = None, + ) -> None: + """ + Create a new lock object. + + :param lock_file: path to the file + :param timeout: default timeout when acquiring the lock, in seconds. It will be used as fallback value in the + acquire method, if no timeout value (``None``) is given. If you want to disable the timeout, set it to a + negative value. A timeout of 0 means that there is exactly one attempt to acquire the file lock. + :param mode: file permissions for the lockfile. When not specified, the OS controls permissions via umask and + default ACLs, preserving POSIX default ACL inheritance in shared directories. + :param thread_local: Whether this object's internal context should be thread local or not. If this is set to + ``False`` then the lock will be reentrant across threads. + :param blocking: whether the lock should be blocking or not + :param is_singleton: If this is set to ``True`` then only one instance of this class will be created per lock + file. This is useful if you want to use the lock object for reentrant locking without needing to pass the + same object around. + :param poll_interval: default interval for polling the lock file, in seconds. It will be used as fallback value + in the acquire method, if no poll_interval value (``None``) is given. + :param lifetime: maximum time in seconds a lock can be held before it is considered expired. When set, a waiting + process will break a lock whose file modification time is older than ``lifetime`` seconds. ``None`` (the + default) means locks never expire. + :param loop: The event loop to use. If not specified, the running event loop will be used. + :param run_in_executor: If this is set to ``True`` then the lock will be acquired in an executor. + :param executor: The executor to use. If not specified, the default executor will be used. + + """ + self._is_thread_local = thread_local + self._is_singleton = is_singleton + + # Create the context. Note that external code should not work with the context directly and should instead use + # properties of this class. + kwargs: dict[str, Any] = { + "lock_file": os.fspath(lock_file), + "timeout": timeout, + "mode": mode, + "blocking": blocking, + "poll_interval": poll_interval, + "lifetime": lifetime, + "loop": loop, + "run_in_executor": run_in_executor, + "executor": executor, + } + self._context: AsyncFileLockContext = (AsyncThreadLocalFileContext if thread_local else AsyncFileLockContext)( + **kwargs + ) + + @property + def run_in_executor(self) -> bool: + """:returns: whether run in executor.""" + return self._context.run_in_executor + + @property + def executor(self) -> futures.Executor | None: + """:returns: the executor.""" + return self._context.executor + + @executor.setter + def executor(self, value: futures.Executor | None) -> None: # pragma: no cover + """ + Change the executor. + + :param futures.Executor | None value: the new executor or ``None`` + + """ + self._context.executor = value + + @property + def loop(self) -> asyncio.AbstractEventLoop | None: + """:returns: the event loop.""" + return self._context.loop + + async def acquire( # ty: ignore[invalid-method-override] + self, + timeout: float | None = None, + poll_interval: float | None = None, + *, + blocking: bool | None = None, + cancel_check: Callable[[], bool] | None = None, + ) -> AsyncAcquireReturnProxy: + """ + Try to acquire the file lock. + + :param timeout: maximum wait time for acquiring the lock, ``None`` means use the default + :attr:`~BaseFileLock.timeout` is and if ``timeout < 0``, there is no timeout and this method will block + until the lock could be acquired + :param poll_interval: interval of trying to acquire the lock file, ``None`` means use the default + :attr:`~BaseFileLock.poll_interval` + :param blocking: defaults to True. If False, function will return immediately if it cannot obtain a lock on the + first attempt. Otherwise, this method will block until the timeout expires or the lock is acquired. + :param cancel_check: a callable returning ``True`` when the acquisition should be canceled. Checked on each poll + iteration. When triggered, raises :class:`~Timeout` just like an expired timeout. + + :returns: a context object that will unlock the file when the context is exited + + :raises Timeout: if fails to acquire lock within the timeout period + + .. code-block:: python + + # You can use this method in the context manager (recommended) + with lock.acquire(): + pass + + # Or use an equivalent try-finally construct: + lock.acquire() + try: + pass + finally: + lock.release() + + """ + # Use the default timeout, if no timeout is provided. + if timeout is None: + timeout = self._context.timeout + + if blocking is None: + blocking = self._context.blocking + + if poll_interval is None: + poll_interval = self._context.poll_interval + + # Increment the number right at the beginning. We can still undo it, if something fails. + self._context.lock_counter += 1 + + lock_id = id(self) + lock_filename = self.lock_file + start_time = time.perf_counter() + try: + while True: + if not self.is_locked: + self._try_break_expired_lock() + _LOGGER.debug("Attempting to acquire lock %s on %s", lock_id, lock_filename) + await self._run_internal_method(self._acquire) + if self.is_locked: + _LOGGER.debug("Lock %s acquired on %s", lock_id, lock_filename) + break + if self._check_give_up( + lock_id, + lock_filename, + blocking=blocking, + cancel_check=cancel_check, + timeout=timeout, + start_time=start_time, + ): + raise Timeout(lock_filename) # noqa: TRY301 + msg = "Lock %s not acquired on %s, waiting %s seconds ..." + _LOGGER.debug(msg, lock_id, lock_filename, poll_interval) + await asyncio.sleep(poll_interval) + except BaseException: # Something did go wrong, so decrement the counter. + self._context.lock_counter = max(0, self._context.lock_counter - 1) + raise + return AsyncAcquireReturnProxy(lock=self) + + async def release(self, force: bool = False) -> None: # ty: ignore[invalid-method-override] # noqa: FBT001, FBT002 + """ + Release the file lock. The lock is only completely released when the lock counter reaches 0. The lock file + itself is not automatically deleted. + + :param force: If true, the lock counter is ignored and the lock is released in every case. + + """ + if self.is_locked: + self._context.lock_counter -= 1 + + if self._context.lock_counter == 0 or force: + lock_id, lock_filename = id(self), self.lock_file + + _LOGGER.debug("Attempting to release lock %s on %s", lock_id, lock_filename) + await self._run_internal_method(self._release) + self._context.lock_counter = 0 + _LOGGER.debug("Lock %s released on %s", lock_id, lock_filename) + + async def _run_internal_method(self, method: Callable[[], Any]) -> None: + if iscoroutinefunction(method): + await method() + elif self.run_in_executor: + loop = self.loop or asyncio.get_running_loop() + await loop.run_in_executor(self.executor, method) + else: + method() + + def __enter__(self) -> NoReturn: + """ + Replace old __enter__ method to avoid using it. + + NOTE: DO NOT USE `with` FOR ASYNCIO LOCKS, USE `async with` INSTEAD. + + :returns: none + :rtype: NoReturn + + """ + msg = "Do not use `with` for asyncio locks, use `async with` instead." + raise NotImplementedError(msg) + + async def __aenter__(self) -> Self: + """ + Acquire the lock. + + :returns: the lock object + + """ + await self.acquire() + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + """ + Release the lock. + + :param exc_type: the exception type if raised + :param exc_value: the exception value if raised + :param traceback: the exception traceback if raised + + """ + await self.release() + + def __del__(self) -> None: + """Called when the lock object is deleted.""" + with contextlib.suppress(RuntimeError): + loop = self.loop or asyncio.get_running_loop() + if not loop.is_running(): # pragma: no cover + loop.run_until_complete(self.release(force=True)) + else: + loop.create_task(self.release(force=True)) + + +class AsyncSoftFileLock(SoftFileLock, BaseAsyncFileLock): + """Simply watches the existence of the lock file.""" + + +class AsyncUnixFileLock(UnixFileLock, BaseAsyncFileLock): + """Uses the :func:`fcntl.flock` to hard lock the lock file on unix systems.""" + + +class AsyncWindowsFileLock(WindowsFileLock, BaseAsyncFileLock): + """Uses the :func:`msvcrt.locking` to hard lock the lock file on windows systems.""" + + +__all__ = [ + "AsyncAcquireReturnProxy", + "AsyncSoftFileLock", + "AsyncUnixFileLock", + "AsyncWindowsFileLock", + "BaseAsyncFileLock", +] diff --git a/venv/lib/python3.10/site-packages/filelock/py.typed b/venv/lib/python3.10/site-packages/filelock/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/venv/lib/python3.10/site-packages/filelock/version.py b/venv/lib/python3.10/site-packages/filelock/version.py new file mode 100644 index 0000000000000000000000000000000000000000..9702f54520ecd2906c28c83c9a17bb3bb87d7ecd --- /dev/null +++ b/venv/lib/python3.10/site-packages/filelock/version.py @@ -0,0 +1,34 @@ +# file generated by setuptools-scm +# don't change, don't track in version control + +__all__ = [ + "__version__", + "__version_tuple__", + "version", + "version_tuple", + "__commit_id__", + "commit_id", +] + +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple + from typing import Union + + VERSION_TUPLE = Tuple[Union[int, str], ...] + COMMIT_ID = Union[str, None] +else: + VERSION_TUPLE = object + COMMIT_ID = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE +commit_id: COMMIT_ID +__commit_id__: COMMIT_ID + +__version__ = version = '3.24.3' +__version_tuple__ = version_tuple = (3, 24, 3) + +__commit_id__ = commit_id = None diff --git a/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..bb96eb3cf23a281f3ea8c7e1fe1c8a2a1fcaa2f5 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/METADATA @@ -0,0 +1,257 @@ +Metadata-Version: 2.4 +Name: fsspec +Version: 2026.2.0 +Summary: File-system specification +Project-URL: Changelog, https://filesystem-spec.readthedocs.io/en/latest/changelog.html +Project-URL: Documentation, https://filesystem-spec.readthedocs.io/en/latest/ +Project-URL: Homepage, https://github.com/fsspec/filesystem_spec +Maintainer-email: Martin Durant +License-Expression: BSD-3-Clause +License-File: LICENSE +Keywords: file +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Requires-Python: >=3.10 +Provides-Extra: abfs +Requires-Dist: adlfs; extra == 'abfs' +Provides-Extra: adl +Requires-Dist: adlfs; extra == 'adl' +Provides-Extra: arrow +Requires-Dist: pyarrow>=1; extra == 'arrow' +Provides-Extra: dask +Requires-Dist: dask; extra == 'dask' +Requires-Dist: distributed; extra == 'dask' +Provides-Extra: dev +Requires-Dist: pre-commit; extra == 'dev' +Requires-Dist: ruff>=0.5; extra == 'dev' +Provides-Extra: doc +Requires-Dist: numpydoc; extra == 'doc' +Requires-Dist: sphinx; extra == 'doc' +Requires-Dist: sphinx-design; extra == 'doc' +Requires-Dist: sphinx-rtd-theme; extra == 'doc' +Requires-Dist: yarl; extra == 'doc' +Provides-Extra: dropbox +Requires-Dist: dropbox; extra == 'dropbox' +Requires-Dist: dropboxdrivefs; extra == 'dropbox' +Requires-Dist: requests; extra == 'dropbox' +Provides-Extra: entrypoints +Provides-Extra: full +Requires-Dist: adlfs; extra == 'full' +Requires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'full' +Requires-Dist: dask; extra == 'full' +Requires-Dist: distributed; extra == 'full' +Requires-Dist: dropbox; extra == 'full' +Requires-Dist: dropboxdrivefs; extra == 'full' +Requires-Dist: fusepy; extra == 'full' +Requires-Dist: gcsfs>2024.2.0; extra == 'full' +Requires-Dist: libarchive-c; extra == 'full' +Requires-Dist: ocifs; extra == 'full' +Requires-Dist: panel; extra == 'full' +Requires-Dist: paramiko; extra == 'full' +Requires-Dist: pyarrow>=1; extra == 'full' +Requires-Dist: pygit2; extra == 'full' +Requires-Dist: requests; extra == 'full' +Requires-Dist: s3fs>2024.2.0; extra == 'full' +Requires-Dist: smbprotocol; extra == 'full' +Requires-Dist: tqdm; extra == 'full' +Provides-Extra: fuse +Requires-Dist: fusepy; extra == 'fuse' +Provides-Extra: gcs +Requires-Dist: gcsfs>2024.2.0; extra == 'gcs' +Provides-Extra: git +Requires-Dist: pygit2; extra == 'git' +Provides-Extra: github +Requires-Dist: requests; extra == 'github' +Provides-Extra: gs +Requires-Dist: gcsfs; extra == 'gs' +Provides-Extra: gui +Requires-Dist: panel; extra == 'gui' +Provides-Extra: hdfs +Requires-Dist: pyarrow>=1; extra == 'hdfs' +Provides-Extra: http +Requires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'http' +Provides-Extra: libarchive +Requires-Dist: libarchive-c; extra == 'libarchive' +Provides-Extra: oci +Requires-Dist: ocifs; extra == 'oci' +Provides-Extra: s3 +Requires-Dist: s3fs>2024.2.0; extra == 's3' +Provides-Extra: sftp +Requires-Dist: paramiko; extra == 'sftp' +Provides-Extra: smb +Requires-Dist: smbprotocol; extra == 'smb' +Provides-Extra: ssh +Requires-Dist: paramiko; extra == 'ssh' +Provides-Extra: test +Requires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'test' +Requires-Dist: numpy; extra == 'test' +Requires-Dist: pytest; extra == 'test' +Requires-Dist: pytest-asyncio!=0.22.0; extra == 'test' +Requires-Dist: pytest-benchmark; extra == 'test' +Requires-Dist: pytest-cov; extra == 'test' +Requires-Dist: pytest-mock; extra == 'test' +Requires-Dist: pytest-recording; extra == 'test' +Requires-Dist: pytest-rerunfailures; extra == 'test' +Requires-Dist: requests; extra == 'test' +Provides-Extra: test-downstream +Requires-Dist: aiobotocore<3.0.0,>=2.5.4; extra == 'test-downstream' +Requires-Dist: dask[dataframe,test]; extra == 'test-downstream' +Requires-Dist: moto[server]<5,>4; extra == 'test-downstream' +Requires-Dist: pytest-timeout; extra == 'test-downstream' +Requires-Dist: xarray; extra == 'test-downstream' +Provides-Extra: test-full +Requires-Dist: adlfs; extra == 'test-full' +Requires-Dist: aiohttp!=4.0.0a0,!=4.0.0a1; extra == 'test-full' +Requires-Dist: backports-zstd; (python_version < '3.14') and extra == 'test-full' +Requires-Dist: cloudpickle; extra == 'test-full' +Requires-Dist: dask; extra == 'test-full' +Requires-Dist: distributed; extra == 'test-full' +Requires-Dist: dropbox; extra == 'test-full' +Requires-Dist: dropboxdrivefs; extra == 'test-full' +Requires-Dist: fastparquet; extra == 'test-full' +Requires-Dist: fusepy; extra == 'test-full' +Requires-Dist: gcsfs; extra == 'test-full' +Requires-Dist: jinja2; extra == 'test-full' +Requires-Dist: kerchunk; extra == 'test-full' +Requires-Dist: libarchive-c; extra == 'test-full' +Requires-Dist: lz4; extra == 'test-full' +Requires-Dist: notebook; extra == 'test-full' +Requires-Dist: numpy; extra == 'test-full' +Requires-Dist: ocifs; extra == 'test-full' +Requires-Dist: pandas<3.0.0; extra == 'test-full' +Requires-Dist: panel; extra == 'test-full' +Requires-Dist: paramiko; extra == 'test-full' +Requires-Dist: pyarrow; extra == 'test-full' +Requires-Dist: pyarrow>=1; extra == 'test-full' +Requires-Dist: pyftpdlib; extra == 'test-full' +Requires-Dist: pygit2; extra == 'test-full' +Requires-Dist: pytest; extra == 'test-full' +Requires-Dist: pytest-asyncio!=0.22.0; extra == 'test-full' +Requires-Dist: pytest-benchmark; extra == 'test-full' +Requires-Dist: pytest-cov; extra == 'test-full' +Requires-Dist: pytest-mock; extra == 'test-full' +Requires-Dist: pytest-recording; extra == 'test-full' +Requires-Dist: pytest-rerunfailures; extra == 'test-full' +Requires-Dist: python-snappy; extra == 'test-full' +Requires-Dist: requests; extra == 'test-full' +Requires-Dist: smbprotocol; extra == 'test-full' +Requires-Dist: tqdm; extra == 'test-full' +Requires-Dist: urllib3; extra == 'test-full' +Requires-Dist: zarr; extra == 'test-full' +Requires-Dist: zstandard; (python_version < '3.14') and extra == 'test-full' +Provides-Extra: tqdm +Requires-Dist: tqdm; extra == 'tqdm' +Description-Content-Type: text/markdown + +# filesystem_spec + +[![PyPI version](https://badge.fury.io/py/fsspec.svg)](https://pypi.python.org/pypi/fsspec/) +[![Anaconda-Server Badge](https://anaconda.org/conda-forge/fsspec/badges/version.svg)](https://anaconda.org/conda-forge/fsspec) +![Build](https://github.com/fsspec/filesystem_spec/workflows/CI/badge.svg) +[![Docs](https://readthedocs.org/projects/filesystem-spec/badge/?version=latest)](https://filesystem-spec.readthedocs.io/en/latest/?badge=latest) + +A specification for pythonic filesystems. + +## Install + +```bash +pip install fsspec +``` + +would install the base fsspec. Various optionally supported features might require specification of custom +extra require, e.g. `pip install fsspec[ssh]` will install dependencies for `ssh` backends support. +Use `pip install fsspec[full]` for installation of all known extra dependencies. + +Up-to-date package also provided through conda-forge distribution: + +```bash +conda install -c conda-forge fsspec +``` + + +## Purpose + +To produce a template or specification for a file-system interface, that specific implementations should follow, +so that applications making use of them can rely on a common behaviour and not have to worry about the specific +internal implementation decisions with any given backend. Many such implementations are included in this package, +or in sister projects such as `s3fs` and `gcsfs`. + +In addition, if this is well-designed, then additional functionality, such as a key-value store or FUSE +mounting of the file-system implementation may be available for all implementations "for free". + +## Documentation + +Please refer to [RTD](https://filesystem-spec.readthedocs.io/en/latest/?badge=latest) + +## Develop + +fsspec uses GitHub Actions for CI. Environment files can be found +in the "ci/" directory. Note that the main environment is called "py38", +but it is expected that the version of python installed be adjustable at +CI runtime. For local use, pick a version suitable for you. + +```bash +# For a new environment (mamba / conda). +mamba create -n fsspec -c conda-forge python=3.10 -y +conda activate fsspec + +# Standard dev install with docs and tests. +pip install -e ".[dev,doc,test]" + +# Full tests except for downstream +pip install s3fs +pip uninstall s3fs +pip install -e .[dev,doc,test_full] +pip install s3fs --no-deps +pytest -v + +# Downstream tests. +sh install_s3fs.sh +# Windows powershell. +install_s3fs.sh +``` + +### Testing + +Tests can be run in the dev environment, if activated, via ``pytest fsspec``. + +The full fsspec suite requires a system-level docker, docker-compose, and fuse +installation. If only making changes to one backend implementation, it is +not generally necessary to run all tests locally. + +It is expected that contributors ensure that any change to fsspec does not +cause issues or regressions for either other fsspec-related packages such +as gcsfs and s3fs, nor for downstream users of fsspec. The "downstream" CI +run and corresponding environment file run a set of tests from the dask +test suite, and very minimal tests against pandas and zarr from the +test_downstream.py module in this repo. + +### Code Formatting + +fsspec uses [Black](https://black.readthedocs.io/en/stable) to ensure +a consistent code format throughout the project. +Run ``black fsspec`` from the root of the filesystem_spec repository to +auto-format your code. Additionally, many editors have plugins that will apply +``black`` as you edit files. ``black`` is included in the ``tox`` environments. + +Optionally, you may wish to setup [pre-commit hooks](https://pre-commit.com) to +automatically run ``black`` when you make a git commit. +Run ``pre-commit install --install-hooks`` from the root of the +filesystem_spec repository to setup pre-commit hooks. ``black`` will now be run +before you commit, reformatting any changed files. You can format without +committing via ``pre-commit run`` or skip these checks with ``git commit +--no-verify``. + +## Support + +Work on this repository is supported in part by: + +"Anaconda, Inc. - Advancing AI through open source." + +anaconda logo diff --git a/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..b3a1b4939fa41a1630cd6b3215ec59bc98be245e --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/RECORD @@ -0,0 +1,119 @@ +fsspec-2026.2.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +fsspec-2026.2.0.dist-info/METADATA,sha256=Pw6QhbyXeg-elb0hpWPiQRpAeMD_ApJ9vdNFoYCnrPs,10524 +fsspec-2026.2.0.dist-info/RECORD,, +fsspec-2026.2.0.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87 +fsspec-2026.2.0.dist-info/licenses/LICENSE,sha256=LcNUls5TpzB5FcAIqESq1T53K0mzTN0ARFBnaRQH7JQ,1513 +fsspec/__init__.py,sha256=L7qwNBU1iMNQd8Of87HYSNFT9gWlNMSESaJC8fY0AaQ,2053 +fsspec/__pycache__/__init__.cpython-310.pyc,, +fsspec/__pycache__/_version.cpython-310.pyc,, +fsspec/__pycache__/archive.cpython-310.pyc,, +fsspec/__pycache__/asyn.cpython-310.pyc,, +fsspec/__pycache__/caching.cpython-310.pyc,, +fsspec/__pycache__/callbacks.cpython-310.pyc,, +fsspec/__pycache__/compression.cpython-310.pyc,, +fsspec/__pycache__/config.cpython-310.pyc,, +fsspec/__pycache__/conftest.cpython-310.pyc,, +fsspec/__pycache__/core.cpython-310.pyc,, +fsspec/__pycache__/dircache.cpython-310.pyc,, +fsspec/__pycache__/exceptions.cpython-310.pyc,, +fsspec/__pycache__/fuse.cpython-310.pyc,, +fsspec/__pycache__/generic.cpython-310.pyc,, +fsspec/__pycache__/gui.cpython-310.pyc,, +fsspec/__pycache__/json.cpython-310.pyc,, +fsspec/__pycache__/mapping.cpython-310.pyc,, +fsspec/__pycache__/parquet.cpython-310.pyc,, +fsspec/__pycache__/registry.cpython-310.pyc,, +fsspec/__pycache__/spec.cpython-310.pyc,, +fsspec/__pycache__/transaction.cpython-310.pyc,, +fsspec/__pycache__/utils.cpython-310.pyc,, +fsspec/_version.py,sha256=AEamMn8IHx_wGXrogRYVBycv6M5u2_UCmFedn7R8hYI,710 +fsspec/archive.py,sha256=vM6t_lgV6lBWbBYwpm3S4ofBQFQxUPr5KkDQrrQcQro,2411 +fsspec/asyn.py,sha256=LP_OicTWXmKHe31wBoYs2MrrNf8rmlhjVeGg5AqvVy8,36630 +fsspec/caching.py,sha256=8IJ4rgcWnvq_b_DqlcMGJ-K59d4Db5O9Gz8PkATAgHo,34023 +fsspec/callbacks.py,sha256=BDIwLzK6rr_0V5ch557fSzsivCElpdqhXr5dZ9Te-EE,9210 +fsspec/compression.py,sha256=3v_Fe39gzRRWfaeXpzNjAGPqgTzmETYRCo3qHVqD3po,5132 +fsspec/config.py,sha256=LF4Zmu1vhJW7Je9Q-cwkRc3xP7Rhyy7Xnwj26Z6sv2g,4279 +fsspec/conftest.py,sha256=uWfm_Qs5alPRxOhRpDfQ0-1jqSJ54pni4y96IxOREXM,3446 +fsspec/core.py,sha256=lc7XSnZU6_C6xljp7Z_xEGN3V7704hbeQLkxvPP0wds,24173 +fsspec/dircache.py,sha256=YzogWJrhEastHU7vWz-cJiJ7sdtLXFXhEpInGKd4EcM,2717 +fsspec/exceptions.py,sha256=pauSLDMxzTJMOjvX1WEUK0cMyFkrFxpWJsyFywav7A8,331 +fsspec/fuse.py,sha256=Q-3NOOyLqBfYa4Db5E19z_ZY36zzYHtIs1mOUasItBQ,10177 +fsspec/generic.py,sha256=9QHQYMNb-8w8-eYuIqShcTjO_LeHXFoQTyt8J5oEq5Q,13482 +fsspec/gui.py,sha256=CQ7QsrTpaDlWSLNOpwNoJc7khOcYXIZxmrAJN9bHWQU,14002 +fsspec/implementations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +fsspec/implementations/__pycache__/__init__.cpython-310.pyc,, +fsspec/implementations/__pycache__/arrow.cpython-310.pyc,, +fsspec/implementations/__pycache__/asyn_wrapper.cpython-310.pyc,, +fsspec/implementations/__pycache__/cache_mapper.cpython-310.pyc,, +fsspec/implementations/__pycache__/cache_metadata.cpython-310.pyc,, +fsspec/implementations/__pycache__/cached.cpython-310.pyc,, +fsspec/implementations/__pycache__/chained.cpython-310.pyc,, +fsspec/implementations/__pycache__/dask.cpython-310.pyc,, +fsspec/implementations/__pycache__/data.cpython-310.pyc,, +fsspec/implementations/__pycache__/dbfs.cpython-310.pyc,, +fsspec/implementations/__pycache__/dirfs.cpython-310.pyc,, +fsspec/implementations/__pycache__/ftp.cpython-310.pyc,, +fsspec/implementations/__pycache__/gist.cpython-310.pyc,, +fsspec/implementations/__pycache__/git.cpython-310.pyc,, +fsspec/implementations/__pycache__/github.cpython-310.pyc,, +fsspec/implementations/__pycache__/http.cpython-310.pyc,, +fsspec/implementations/__pycache__/http_sync.cpython-310.pyc,, +fsspec/implementations/__pycache__/jupyter.cpython-310.pyc,, +fsspec/implementations/__pycache__/libarchive.cpython-310.pyc,, +fsspec/implementations/__pycache__/local.cpython-310.pyc,, +fsspec/implementations/__pycache__/memory.cpython-310.pyc,, +fsspec/implementations/__pycache__/reference.cpython-310.pyc,, +fsspec/implementations/__pycache__/sftp.cpython-310.pyc,, +fsspec/implementations/__pycache__/smb.cpython-310.pyc,, +fsspec/implementations/__pycache__/tar.cpython-310.pyc,, +fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc,, +fsspec/implementations/__pycache__/zip.cpython-310.pyc,, +fsspec/implementations/arrow.py,sha256=8FhvcvOYLZNMMegCYFFCEHgEqig8AkOU7Ehb8XfcgnA,8890 +fsspec/implementations/asyn_wrapper.py,sha256=3lfJkGs6D_AwRBdxTSYlL-RCVdaXBZ9Itys2P5o5Si0,3738 +fsspec/implementations/cache_mapper.py,sha256=W4wlxyPxZbSp9ItJ0pYRVBMh6bw9eFypgP6kUYuuiI4,2421 +fsspec/implementations/cache_metadata.py,sha256=ipIe4S8nlU_M9oRJkvTqr-b0tcbXVZsxH3GxaelaNOY,8502 +fsspec/implementations/cached.py,sha256=67ipbj-3o8O1zMGR11rZ_IWCi_7h-VRYpEAowFXqrvA,36175 +fsspec/implementations/chained.py,sha256=iGivpNaHUFjB_ea0-HAPhcmm6CL8qnDf270PSj7JwuE,680 +fsspec/implementations/dask.py,sha256=CXZbJzIVOhKV8ILcxuy3bTvcacCueAbyQxmvAkbPkrk,4466 +fsspec/implementations/data.py,sha256=IhOGDkacYp5gkl9jhEu4msQfZPb0gS5Q_ml7Mbr6dgQ,1627 +fsspec/implementations/dbfs.py,sha256=1cvvC6KBWOb8pBVpc01xavVbEPXO1xsgZvPD7H73M9k,16217 +fsspec/implementations/dirfs.py,sha256=VNj6gPMfmmLPK4wxbtxt7mUqW7xkh2XDgMmEmSK_E1c,12166 +fsspec/implementations/ftp.py,sha256=fJhaMIKq2RvzYlLwG3bewy2jq4iRqjVt1aIpwtUIRwI,13235 +fsspec/implementations/gist.py,sha256=Y6jTDrE-wuTwvpPyAQDuuOMBGxlajafKWoB1_yX6jdY,8528 +fsspec/implementations/git.py,sha256=qBDWMz5LNllPqVjr5jf_1FuNha4P5lyQI3IlhYg-wUE,3731 +fsspec/implementations/github.py,sha256=aCsZL8UvXZgdkcB1RUs3DdLeNrjLKcFsFYeQFDWbBFo,11653 +fsspec/implementations/http.py,sha256=-AV5qeNpBWqnsmgnIO9Ily9B6--SR4sQJ7G4cBHarGE,30675 +fsspec/implementations/http_sync.py,sha256=UmBqd938ebwVjYgVtzg-ysG3ZoGhIJw0wFtQAfxV3Aw,30332 +fsspec/implementations/jupyter.py,sha256=q1PlQ66AAswGFyr8MFKWyobaV2YekMWRtqENBDQtD28,4002 +fsspec/implementations/libarchive.py,sha256=SpIA1F-zf7kb2-VYUVuhMrXTBOhBxUXKgEW1RaAdDoA,7098 +fsspec/implementations/local.py,sha256=ERDUdXdRI8AvRX06icXaDKwO-hcQgivc7EorqnayFFM,17028 +fsspec/implementations/memory.py,sha256=TDdLtSPWXxZKrrVGwmc3uS3oK_2mlcVTk2BiqR8IeII,10507 +fsspec/implementations/reference.py,sha256=xSUpB8o_QFAZiVJE2dt78QZMCUMLo5TaJ27e5DwDAfg,48814 +fsspec/implementations/sftp.py,sha256=L9pZOa6eLUWfJNtxkxeG2YI96SQwrM5Hj6ocyUZXUbg,5923 +fsspec/implementations/smb.py,sha256=5fhu8h06nOLBPh2c48aT7WBRqh9cEcbIwtyu06wTjec,15236 +fsspec/implementations/tar.py,sha256=dam78Tp_CozybNqCY2JYgGBS3Uc9FuJUAT9oB0lolOs,4111 +fsspec/implementations/webhdfs.py,sha256=osF2m0nhDil6sbMzYW_4DZzhxF4ygtb59XDiybd9Fyg,17589 +fsspec/implementations/zip.py,sha256=6f3z0s12tDbz1RMx7iDc3JDx730IAaKDdx7tf_XYDp0,6151 +fsspec/json.py,sha256=4EBZ-xOmRiyxmIqPIwxmDImosRQ7io7qBM2xjJPsEE4,3768 +fsspec/mapping.py,sha256=m2ndB_gtRBXYmNJg0Ie1-BVR75TFleHmIQBzC-yWhjU,8343 +fsspec/parquet.py,sha256=xGW3xfd9js7hrre7qN85XpSM0A1FObqkTcAv_H2xSwY,20505 +fsspec/registry.py,sha256=o7EGl8TEaLkcwN53X_103arzuzJeeOoVaNUWnPiXgf0,12148 +fsspec/spec.py,sha256=Ym-Ust6LRjHgbhrmvNqwOBZxoVnaw3g3xHXMZGHx_xg,77692 +fsspec/tests/abstract/__init__.py,sha256=4xUJrv7gDgc85xAOz1p-V_K1hrsdMWTSa0rviALlJk8,10181 +fsspec/tests/abstract/__pycache__/__init__.cpython-310.pyc,, +fsspec/tests/abstract/__pycache__/common.cpython-310.pyc,, +fsspec/tests/abstract/__pycache__/copy.cpython-310.pyc,, +fsspec/tests/abstract/__pycache__/get.cpython-310.pyc,, +fsspec/tests/abstract/__pycache__/mv.cpython-310.pyc,, +fsspec/tests/abstract/__pycache__/open.cpython-310.pyc,, +fsspec/tests/abstract/__pycache__/pipe.cpython-310.pyc,, +fsspec/tests/abstract/__pycache__/put.cpython-310.pyc,, +fsspec/tests/abstract/common.py,sha256=1GQwNo5AONzAnzZj0fWgn8NJPLXALehbsuGxS3FzWVU,4973 +fsspec/tests/abstract/copy.py,sha256=gU5-d97U3RSde35Vp4RxPY4rWwL744HiSrJ8IBOp9-8,19967 +fsspec/tests/abstract/get.py,sha256=vNR4HztvTR7Cj56AMo7_tx7TeYz1Jgr_2Wb8Lv-UiBY,20755 +fsspec/tests/abstract/mv.py,sha256=k8eUEBIrRrGMsBY5OOaDXdGnQUKGwDIfQyduB6YD3Ns,1982 +fsspec/tests/abstract/open.py,sha256=Fi2PBPYLbRqysF8cFm0rwnB41kMdQVYjq8cGyDXp3BU,329 +fsspec/tests/abstract/pipe.py,sha256=LFzIrLCB5GLXf9rzFKJmE8AdG7LQ_h4bJo70r8FLPqM,402 +fsspec/tests/abstract/put.py,sha256=7aih17OKB_IZZh1Mkq1eBDIjobhtMQmI8x-Pw-S_aZk,21201 +fsspec/transaction.py,sha256=xliRG6U2Zf3khG4xcw9WiB-yAoqJSHEGK_VjHOdtgo0,2398 +fsspec/utils.py,sha256=E24ji0XLWC6n3bw2sHA28OYxrGU9Wy_al2XydsRgrRk,23623 diff --git a/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..ae8ec1bdaa94d726ceb907542d76cbd5d38cafcd --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.28.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/licenses/LICENSE b/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..67590a5e5be5a5a2dde3fe53a7512e404a896c22 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec-2026.2.0.dist-info/licenses/LICENSE @@ -0,0 +1,29 @@ +BSD 3-Clause License + +Copyright (c) 2018, Martin Durant +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/venv/lib/python3.10/site-packages/fsspec/__init__.py b/venv/lib/python3.10/site-packages/fsspec/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..452c78a055e72a6d04f1013d1a98fda33fdc449e --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/__init__.py @@ -0,0 +1,71 @@ +from . import caching +from ._version import __version__ # noqa: F401 +from .callbacks import Callback +from .compression import available_compressions +from .core import get_fs_token_paths, open, open_files, open_local, url_to_fs +from .exceptions import FSTimeoutError +from .mapping import FSMap, get_mapper +from .registry import ( + available_protocols, + filesystem, + get_filesystem_class, + register_implementation, + registry, +) +from .spec import AbstractFileSystem + +__all__ = [ + "AbstractFileSystem", + "FSTimeoutError", + "FSMap", + "filesystem", + "register_implementation", + "get_filesystem_class", + "get_fs_token_paths", + "get_mapper", + "open", + "open_files", + "open_local", + "registry", + "caching", + "Callback", + "available_protocols", + "available_compressions", + "url_to_fs", +] + + +def process_entries(): + try: + from importlib.metadata import entry_points + except ImportError: + return + if entry_points is not None: + try: + eps = entry_points() + except TypeError: + pass # importlib-metadata < 0.8 + else: + if hasattr(eps, "select"): # Python 3.10+ / importlib_metadata >= 3.9.0 + specs = eps.select(group="fsspec.specs") + else: + specs = eps.get("fsspec.specs", []) + registered_names = {} + for spec in specs: + err_msg = f"Unable to load filesystem from {spec}" + name = spec.name + if name in registered_names: + continue + registered_names[name] = True + register_implementation( + name, + spec.value.replace(":", "."), + errtxt=err_msg, + # We take our implementations as the ones to overload with if + # for some reason we encounter some, may be the same, already + # registered + clobber=True, + ) + + +process_entries() diff --git a/venv/lib/python3.10/site-packages/fsspec/_version.py b/venv/lib/python3.10/site-packages/fsspec/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..43aa383e6690a59bd71080d1dcffa276ea25ad29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/_version.py @@ -0,0 +1,34 @@ +# file generated by setuptools-scm +# don't change, don't track in version control + +__all__ = [ + "__version__", + "__version_tuple__", + "version", + "version_tuple", + "__commit_id__", + "commit_id", +] + +TYPE_CHECKING = False +if TYPE_CHECKING: + from typing import Tuple + from typing import Union + + VERSION_TUPLE = Tuple[Union[int, str], ...] + COMMIT_ID = Union[str, None] +else: + VERSION_TUPLE = object + COMMIT_ID = object + +version: str +__version__: str +__version_tuple__: VERSION_TUPLE +version_tuple: VERSION_TUPLE +commit_id: COMMIT_ID +__commit_id__: COMMIT_ID + +__version__ = version = '2026.2.0' +__version_tuple__ = version_tuple = (2026, 2, 0) + +__commit_id__ = commit_id = None diff --git a/venv/lib/python3.10/site-packages/fsspec/archive.py b/venv/lib/python3.10/site-packages/fsspec/archive.py new file mode 100644 index 0000000000000000000000000000000000000000..13a4da8df7c9405297cdd7d37476be2f725b2f57 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/archive.py @@ -0,0 +1,75 @@ +import operator + +from fsspec import AbstractFileSystem +from fsspec.utils import tokenize + + +class AbstractArchiveFileSystem(AbstractFileSystem): + """ + A generic superclass for implementing Archive-based filesystems. + + Currently, it is shared amongst + :class:`~fsspec.implementations.zip.ZipFileSystem`, + :class:`~fsspec.implementations.libarchive.LibArchiveFileSystem` and + :class:`~fsspec.implementations.tar.TarFileSystem`. + """ + + def __str__(self): + return f"" + + __repr__ = __str__ + + def ukey(self, path): + return tokenize(path, self.fo, self.protocol) + + def _all_dirnames(self, paths): + """Returns *all* directory names for each path in paths, including intermediate + ones. + + Parameters + ---------- + paths: Iterable of path strings + """ + if len(paths) == 0: + return set() + + dirnames = {self._parent(path) for path in paths} - {self.root_marker} + return dirnames | self._all_dirnames(dirnames) + + def info(self, path, **kwargs): + self._get_dirs() + path = self._strip_protocol(path) + if path in {"", "/"} and self.dir_cache: + return {"name": "", "type": "directory", "size": 0} + if path in self.dir_cache: + return self.dir_cache[path] + elif path + "/" in self.dir_cache: + return self.dir_cache[path + "/"] + else: + raise FileNotFoundError(path) + + def ls(self, path, detail=True, **kwargs): + self._get_dirs() + paths = {} + for p, f in self.dir_cache.items(): + p = p.rstrip("/") + if "/" in p: + root = p.rsplit("/", 1)[0] + else: + root = "" + if root == path.rstrip("/"): + paths[p] = f + elif all( + (a == b) + for a, b in zip(path.split("/"), [""] + p.strip("/").split("/")) + ): + # root directory entry + ppath = p.rstrip("/").split("/", 1)[0] + if ppath not in paths: + out = {"name": ppath, "size": 0, "type": "directory"} + paths[ppath] = out + if detail: + out = sorted(paths.values(), key=operator.itemgetter("name")) + return out + else: + return sorted(paths) diff --git a/venv/lib/python3.10/site-packages/fsspec/asyn.py b/venv/lib/python3.10/site-packages/fsspec/asyn.py new file mode 100644 index 0000000000000000000000000000000000000000..360758ac64608331d0976b8ee17c2c02d1f3e6d7 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/asyn.py @@ -0,0 +1,1103 @@ +import asyncio +import asyncio.events +import functools +import inspect +import io +import numbers +import os +import re +import threading +from collections.abc import Iterable +from glob import has_magic +from typing import TYPE_CHECKING + +from .callbacks import DEFAULT_CALLBACK +from .exceptions import FSTimeoutError +from .implementations.local import LocalFileSystem, make_path_posix, trailing_sep +from .spec import AbstractBufferedFile, AbstractFileSystem +from .utils import glob_translate, is_exception, other_paths + +private = re.compile("_[^_]") +iothread = [None] # dedicated fsspec IO thread +loop = [None] # global event loop for any non-async instance +_lock = None # global lock placeholder +get_running_loop = asyncio.get_running_loop + + +def get_lock(): + """Allocate or return a threading lock. + + The lock is allocated on first use to allow setting one lock per forked process. + """ + global _lock + if not _lock: + _lock = threading.Lock() + return _lock + + +def reset_lock(): + """Reset the global lock. + + This should be called only on the init of a forked process to reset the lock to + None, enabling the new forked process to get a new lock. + """ + global _lock + + iothread[0] = None + loop[0] = None + _lock = None + + +async def _runner(event, coro, result, timeout=None): + timeout = timeout if timeout else None # convert 0 or 0.0 to None + if timeout is not None: + coro = asyncio.wait_for(coro, timeout=timeout) + try: + result[0] = await coro + except Exception as ex: + result[0] = ex + finally: + event.set() + + +def sync(loop, func, *args, timeout=None, **kwargs): + """ + Make loop run coroutine until it returns. Runs in other thread + + Examples + -------- + >>> fsspec.asyn.sync(fsspec.asyn.get_loop(), func, *args, + timeout=timeout, **kwargs) + """ + timeout = timeout if timeout else None # convert 0 or 0.0 to None + # NB: if the loop is not running *yet*, it is OK to submit work + # and we will wait for it + if loop is None or loop.is_closed(): + raise RuntimeError("Loop is not running") + try: + loop0 = asyncio.events.get_running_loop() + if loop0 is loop: + raise NotImplementedError("Calling sync() from within a running loop") + except NotImplementedError: + raise + except RuntimeError: + pass + coro = func(*args, **kwargs) + result = [None] + event = threading.Event() + asyncio.run_coroutine_threadsafe(_runner(event, coro, result, timeout), loop) + while True: + # this loops allows thread to get interrupted + if event.wait(1): + break + if timeout is not None: + timeout -= 1 + if timeout < 0: + raise FSTimeoutError + + return_result = result[0] + if isinstance(return_result, asyncio.TimeoutError): + # suppress asyncio.TimeoutError, raise FSTimeoutError + raise FSTimeoutError from return_result + elif isinstance(return_result, BaseException): + raise return_result + else: + return return_result + + +def sync_wrapper(func, obj=None): + """Given a function, make so can be called in blocking contexts + + Leave obj=None if defining within a class. Pass the instance if attaching + as an attribute of the instance. + """ + + @functools.wraps(func) + def wrapper(*args, **kwargs): + self = obj or args[0] + return sync(self.loop, func, *args, **kwargs) + + return wrapper + + +def get_loop(): + """Create or return the default fsspec IO loop + + The loop will be running on a separate thread. + """ + if loop[0] is None: + with get_lock(): + # repeat the check just in case the loop got filled between the + # previous two calls from another thread + if loop[0] is None: + loop[0] = asyncio.new_event_loop() + th = threading.Thread(target=loop[0].run_forever, name="fsspecIO") + th.daemon = True + th.start() + iothread[0] = th + return loop[0] + + +def reset_after_fork(): + global lock + loop[0] = None + iothread[0] = None + lock = None + + +if hasattr(os, "register_at_fork"): + # should be posix; this will do nothing for spawn or forkserver subprocesses + os.register_at_fork(after_in_child=reset_after_fork) + + +if TYPE_CHECKING: + import resource + + ResourceError = resource.error +else: + try: + import resource + except ImportError: + resource = None + ResourceError = OSError + else: + ResourceError = getattr(resource, "error", OSError) + +_DEFAULT_BATCH_SIZE = 128 +_NOFILES_DEFAULT_BATCH_SIZE = 1280 + + +def _get_batch_size(nofiles=False): + from fsspec.config import conf + + if nofiles: + if "nofiles_gather_batch_size" in conf: + return conf["nofiles_gather_batch_size"] + else: + if "gather_batch_size" in conf: + return conf["gather_batch_size"] + if nofiles: + return _NOFILES_DEFAULT_BATCH_SIZE + if resource is None: + return _DEFAULT_BATCH_SIZE + + try: + soft_limit, _ = resource.getrlimit(resource.RLIMIT_NOFILE) + except (ImportError, ValueError, ResourceError): + return _DEFAULT_BATCH_SIZE + + if soft_limit == resource.RLIM_INFINITY: + return -1 + else: + return soft_limit // 8 + + +def running_async() -> bool: + """Being executed by an event loop?""" + try: + asyncio.get_running_loop() + return True + except RuntimeError: + return False + + +async def _run_coros_in_chunks( + coros, + batch_size=None, + callback=DEFAULT_CALLBACK, + timeout=None, + return_exceptions=False, + nofiles=False, +): + """Run the given coroutines in chunks. + + Parameters + ---------- + coros: list of coroutines to run + batch_size: int or None + Number of coroutines to submit/wait on simultaneously. + If -1, then it will not be any throttling. If + None, it will be inferred from _get_batch_size() + callback: fsspec.callbacks.Callback instance + Gets a relative_update when each coroutine completes + timeout: number or None + If given, each coroutine times out after this time. Note that, since + there are multiple batches, the total run time of this function will in + general be longer + return_exceptions: bool + Same meaning as in asyncio.gather + nofiles: bool + If inferring the batch_size, does this operation involve local files? + If yes, you normally expect smaller batches. + """ + + if batch_size is None: + batch_size = _get_batch_size(nofiles=nofiles) + + if batch_size == -1: + batch_size = len(coros) + + assert batch_size > 0 + + async def _run_coro(coro, i): + try: + return await asyncio.wait_for(coro, timeout=timeout), i + except Exception as e: + if not return_exceptions: + raise + return e, i + finally: + callback.relative_update(1) + + i = 0 + n = len(coros) + results = [None] * n + pending = set() + + while pending or i < n: + while len(pending) < batch_size and i < n: + pending.add(asyncio.ensure_future(_run_coro(coros[i], i))) + i += 1 + + if not pending: + break + + done, pending = await asyncio.wait(pending, return_when=asyncio.FIRST_COMPLETED) + while done: + result, k = await done.pop() + results[k] = result + + return results + + +# these methods should be implemented as async by any async-able backend +async_methods = [ + "_ls", + "_cat_file", + "_get_file", + "_put_file", + "_rm_file", + "_cp_file", + "_pipe_file", + "_expand_path", + "_info", + "_isfile", + "_isdir", + "_exists", + "_walk", + "_glob", + "_find", + "_du", + "_size", + "_mkdir", + "_makedirs", +] + + +class AsyncFileSystem(AbstractFileSystem): + """Async file operations, default implementations + + Passes bulk operations to asyncio.gather for concurrent operation. + + Implementations that have concurrent batch operations and/or async methods + should inherit from this class instead of AbstractFileSystem. Docstrings are + copied from the un-underscored method in AbstractFileSystem, if not given. + """ + + # note that methods do not have docstring here; they will be copied + # for _* methods and inferred for overridden methods. + + async_impl = True + mirror_sync_methods = True + disable_throttling = False + + def __init__(self, *args, asynchronous=False, loop=None, batch_size=None, **kwargs): + self.asynchronous = asynchronous + self._pid = os.getpid() + if not asynchronous: + self._loop = loop or get_loop() + else: + self._loop = None + self.batch_size = batch_size + super().__init__(*args, **kwargs) + + @property + def loop(self): + if self._pid != os.getpid(): + raise RuntimeError("This class is not fork-safe") + return self._loop + + async def _rm_file(self, path, **kwargs): + if ( + inspect.iscoroutinefunction(self._rm) + and type(self)._rm is not AsyncFileSystem._rm + ): + return await self._rm(path, recursive=False, batch_size=1, **kwargs) + raise NotImplementedError + + async def _rm(self, path, recursive=False, batch_size=None, **kwargs): + # TODO: implement on_error + batch_size = batch_size or self.batch_size + path = await self._expand_path(path, recursive=recursive) + return await _run_coros_in_chunks( + [self._rm_file(p, **kwargs) for p in reversed(path)], + batch_size=batch_size, + nofiles=True, + ) + + async def _cp_file(self, path1, path2, **kwargs): + raise NotImplementedError + + async def _mv_file(self, path1, path2): + await self._cp_file(path1, path2) + await self._rm_file(path1) + + async def _copy( + self, + path1, + path2, + recursive=False, + on_error=None, + maxdepth=None, + batch_size=None, + **kwargs, + ): + if on_error is None and recursive: + on_error = "ignore" + elif on_error is None: + on_error = "raise" + + if isinstance(path1, list) and isinstance(path2, list): + # No need to expand paths when both source and destination + # are provided as lists + paths1 = path1 + paths2 = path2 + else: + source_is_str = isinstance(path1, str) + paths1 = await self._expand_path( + path1, maxdepth=maxdepth, recursive=recursive + ) + if source_is_str and (not recursive or maxdepth is not None): + # Non-recursive glob does not copy directories + paths1 = [ + p for p in paths1 if not (trailing_sep(p) or await self._isdir(p)) + ] + if not paths1: + return + + source_is_file = len(paths1) == 1 + dest_is_dir = isinstance(path2, str) and ( + trailing_sep(path2) or await self._isdir(path2) + ) + + exists = source_is_str and ( + (has_magic(path1) and source_is_file) + or (not has_magic(path1) and dest_is_dir and not trailing_sep(path1)) + ) + paths2 = other_paths( + paths1, + path2, + exists=exists, + flatten=not source_is_str, + ) + + batch_size = batch_size or self.batch_size + coros = [self._cp_file(p1, p2, **kwargs) for p1, p2 in zip(paths1, paths2)] + result = await _run_coros_in_chunks( + coros, batch_size=batch_size, return_exceptions=True, nofiles=True + ) + + for ex in filter(is_exception, result): + if on_error == "ignore" and isinstance(ex, FileNotFoundError): + continue + raise ex + + async def _pipe_file(self, path, value, mode="overwrite", **kwargs): + raise NotImplementedError + + async def _pipe(self, path, value=None, batch_size=None, **kwargs): + if isinstance(path, str): + path = {path: value} + batch_size = batch_size or self.batch_size + return await _run_coros_in_chunks( + [self._pipe_file(k, v, **kwargs) for k, v in path.items()], + batch_size=batch_size, + nofiles=True, + ) + + async def _process_limits(self, url, start, end): + """Helper for "Range"-based _cat_file""" + size = None + suff = False + if start is not None and start < 0: + # if start is negative and end None, end is the "suffix length" + if end is None: + end = -start + start = "" + suff = True + else: + size = size or (await self._info(url))["size"] + start = size + start + elif start is None: + start = 0 + if not suff: + if end is not None and end < 0: + if start is not None: + size = size or (await self._info(url))["size"] + end = size + end + elif end is None: + end = "" + if isinstance(end, numbers.Integral): + end -= 1 # bytes range is inclusive + return f"bytes={start}-{end}" + + async def _cat_file(self, path, start=None, end=None, **kwargs): + raise NotImplementedError + + async def _cat( + self, path, recursive=False, on_error="raise", batch_size=None, **kwargs + ): + paths = await self._expand_path(path, recursive=recursive) + coros = [self._cat_file(path, **kwargs) for path in paths] + batch_size = batch_size or self.batch_size + out = await _run_coros_in_chunks( + coros, batch_size=batch_size, nofiles=True, return_exceptions=True + ) + if on_error == "raise": + ex = next(filter(is_exception, out), False) + if ex: + raise ex + if ( + len(paths) > 1 + or isinstance(path, list) + or paths[0] != self._strip_protocol(path) + ): + return { + k: v + for k, v in zip(paths, out) + if on_error != "omit" or not is_exception(v) + } + else: + return out[0] + + async def _cat_ranges( + self, + paths, + starts, + ends, + max_gap=None, + batch_size=None, + on_error="return", + **kwargs, + ): + """Get the contents of byte ranges from one or more files + + Parameters + ---------- + paths: list + A list of of filepaths on this filesystems + starts, ends: int or list + Bytes limits of the read. If using a single int, the same value will be + used to read all the specified files. + """ + # TODO: on_error + if max_gap is not None: + # use utils.merge_offset_ranges + raise NotImplementedError + if not isinstance(paths, list): + raise TypeError + if not isinstance(starts, Iterable): + starts = [starts] * len(paths) + if not isinstance(ends, Iterable): + ends = [ends] * len(paths) + if len(starts) != len(paths) or len(ends) != len(paths): + raise ValueError + coros = [ + self._cat_file(p, start=s, end=e, **kwargs) + for p, s, e in zip(paths, starts, ends) + ] + batch_size = batch_size or self.batch_size + return await _run_coros_in_chunks( + coros, batch_size=batch_size, nofiles=True, return_exceptions=True + ) + + async def _put_file(self, lpath, rpath, mode="overwrite", **kwargs): + raise NotImplementedError + + async def _put( + self, + lpath, + rpath, + recursive=False, + callback=DEFAULT_CALLBACK, + batch_size=None, + maxdepth=None, + **kwargs, + ): + """Copy file(s) from local. + + Copies a specific file or tree of files (if recursive=True). If rpath + ends with a "/", it will be assumed to be a directory, and target files + will go within. + + The put_file method will be called concurrently on a batch of files. The + batch_size option can configure the amount of futures that can be executed + at the same time. If it is -1, then all the files will be uploaded concurrently. + The default can be set for this instance by passing "batch_size" in the + constructor, or for all instances by setting the "gather_batch_size" key + in ``fsspec.config.conf``, falling back to 1/8th of the system limit . + """ + if isinstance(lpath, list) and isinstance(rpath, list): + # No need to expand paths when both source and destination + # are provided as lists + rpaths = rpath + lpaths = lpath + else: + source_is_str = isinstance(lpath, str) + if source_is_str: + lpath = make_path_posix(lpath) + fs = LocalFileSystem() + lpaths = fs.expand_path(lpath, recursive=recursive, maxdepth=maxdepth) + if source_is_str and (not recursive or maxdepth is not None): + # Non-recursive glob does not copy directories + lpaths = [p for p in lpaths if not (trailing_sep(p) or fs.isdir(p))] + if not lpaths: + return + + source_is_file = len(lpaths) == 1 + dest_is_dir = isinstance(rpath, str) and ( + trailing_sep(rpath) or await self._isdir(rpath) + ) + + rpath = self._strip_protocol(rpath) + exists = source_is_str and ( + (has_magic(lpath) and source_is_file) + or (not has_magic(lpath) and dest_is_dir and not trailing_sep(lpath)) + ) + rpaths = other_paths( + lpaths, + rpath, + exists=exists, + flatten=not source_is_str, + ) + + is_dir = {l: os.path.isdir(l) for l in lpaths} + rdirs = [r for l, r in zip(lpaths, rpaths) if is_dir[l]] + file_pairs = [(l, r) for l, r in zip(lpaths, rpaths) if not is_dir[l]] + + await asyncio.gather(*[self._makedirs(d, exist_ok=True) for d in rdirs]) + batch_size = batch_size or self.batch_size + + coros = [] + callback.set_size(len(file_pairs)) + for lfile, rfile in file_pairs: + put_file = callback.branch_coro(self._put_file) + coros.append(put_file(lfile, rfile, **kwargs)) + + return await _run_coros_in_chunks( + coros, batch_size=batch_size, callback=callback + ) + + async def _get_file(self, rpath, lpath, **kwargs): + raise NotImplementedError + + async def _get( + self, + rpath, + lpath, + recursive=False, + callback=DEFAULT_CALLBACK, + maxdepth=None, + **kwargs, + ): + """Copy file(s) to local. + + Copies a specific file or tree of files (if recursive=True). If lpath + ends with a "/", it will be assumed to be a directory, and target files + will go within. Can submit a list of paths, which may be glob-patterns + and will be expanded. + + The get_file method will be called concurrently on a batch of files. The + batch_size option can configure the amount of futures that can be executed + at the same time. If it is -1, then all the files will be uploaded concurrently. + The default can be set for this instance by passing "batch_size" in the + constructor, or for all instances by setting the "gather_batch_size" key + in ``fsspec.config.conf``, falling back to 1/8th of the system limit . + """ + if isinstance(lpath, list) and isinstance(rpath, list): + # No need to expand paths when both source and destination + # are provided as lists + rpaths = rpath + lpaths = lpath + else: + source_is_str = isinstance(rpath, str) + # First check for rpath trailing slash as _strip_protocol removes it. + source_not_trailing_sep = source_is_str and not trailing_sep(rpath) + rpath = self._strip_protocol(rpath) + rpaths = await self._expand_path( + rpath, recursive=recursive, maxdepth=maxdepth + ) + if source_is_str and (not recursive or maxdepth is not None): + # Non-recursive glob does not copy directories + rpaths = [ + p for p in rpaths if not (trailing_sep(p) or await self._isdir(p)) + ] + if not rpaths: + return + + lpath = make_path_posix(lpath) + source_is_file = len(rpaths) == 1 + dest_is_dir = isinstance(lpath, str) and ( + trailing_sep(lpath) or LocalFileSystem().isdir(lpath) + ) + + exists = source_is_str and ( + (has_magic(rpath) and source_is_file) + or (not has_magic(rpath) and dest_is_dir and source_not_trailing_sep) + ) + lpaths = other_paths( + rpaths, + lpath, + exists=exists, + flatten=not source_is_str, + ) + + [os.makedirs(os.path.dirname(lp), exist_ok=True) for lp in lpaths] + batch_size = kwargs.pop("batch_size", self.batch_size) + + coros = [] + callback.set_size(len(lpaths)) + for lpath, rpath in zip(lpaths, rpaths): + get_file = callback.branch_coro(self._get_file) + coros.append(get_file(rpath, lpath, **kwargs)) + return await _run_coros_in_chunks( + coros, batch_size=batch_size, callback=callback + ) + + async def _isfile(self, path): + try: + return (await self._info(path))["type"] == "file" + except: # noqa: E722 + return False + + async def _isdir(self, path): + try: + return (await self._info(path))["type"] == "directory" + except OSError: + return False + + async def _size(self, path): + return (await self._info(path)).get("size", None) + + async def _sizes(self, paths, batch_size=None): + batch_size = batch_size or self.batch_size + return await _run_coros_in_chunks( + [self._size(p) for p in paths], batch_size=batch_size + ) + + async def _exists(self, path, **kwargs): + try: + await self._info(path, **kwargs) + return True + except FileNotFoundError: + return False + + async def _info(self, path, **kwargs): + raise NotImplementedError + + async def _ls(self, path, detail=True, **kwargs): + raise NotImplementedError + + async def _walk(self, path, maxdepth=None, on_error="omit", **kwargs): + if maxdepth is not None and maxdepth < 1: + raise ValueError("maxdepth must be at least 1") + + path = self._strip_protocol(path) + full_dirs = {} + dirs = {} + files = {} + + detail = kwargs.pop("detail", False) + try: + listing = await self._ls(path, detail=True, **kwargs) + except (FileNotFoundError, OSError) as e: + if on_error == "raise": + raise + elif callable(on_error): + on_error(e) + if detail: + yield path, {}, {} + else: + yield path, [], [] + return + + for info in listing: + # each info name must be at least [path]/part , but here + # we check also for names like [path]/part/ + pathname = info["name"].rstrip("/") + name = pathname.rsplit("/", 1)[-1] + if info["type"] == "directory" and pathname != path: + # do not include "self" path + full_dirs[name] = pathname + dirs[name] = info + elif pathname == path: + # file-like with same name as give path + files[""] = info + else: + files[name] = info + + if detail: + yield path, dirs, files + else: + yield path, list(dirs), list(files) + + if maxdepth is not None: + maxdepth -= 1 + if maxdepth < 1: + return + + for d in dirs: + async for _ in self._walk( + full_dirs[d], maxdepth=maxdepth, detail=detail, **kwargs + ): + yield _ + + async def _glob(self, path, maxdepth=None, **kwargs): + if maxdepth is not None and maxdepth < 1: + raise ValueError("maxdepth must be at least 1") + + import re + + seps = (os.path.sep, os.path.altsep) if os.path.altsep else (os.path.sep,) + ends_with_sep = path.endswith(seps) # _strip_protocol strips trailing slash + path = self._strip_protocol(path) + append_slash_to_dirname = ends_with_sep or path.endswith( + tuple(sep + "**" for sep in seps) + ) + idx_star = path.find("*") if path.find("*") >= 0 else len(path) + idx_qmark = path.find("?") if path.find("?") >= 0 else len(path) + idx_brace = path.find("[") if path.find("[") >= 0 else len(path) + + min_idx = min(idx_star, idx_qmark, idx_brace) + + detail = kwargs.pop("detail", False) + withdirs = kwargs.pop("withdirs", True) + + if not has_magic(path): + if await self._exists(path, **kwargs): + if not detail: + return [path] + else: + return {path: await self._info(path, **kwargs)} + else: + if not detail: + return [] # glob of non-existent returns empty + else: + return {} + elif "/" in path[:min_idx]: + min_idx = path[:min_idx].rindex("/") + root = path[: min_idx + 1] + depth = path[min_idx + 1 :].count("/") + 1 + else: + root = "" + depth = path[min_idx + 1 :].count("/") + 1 + + if "**" in path: + if maxdepth is not None: + idx_double_stars = path.find("**") + depth_double_stars = path[idx_double_stars:].count("/") + 1 + depth = depth - depth_double_stars + maxdepth + else: + depth = None + + allpaths = await self._find( + root, maxdepth=depth, withdirs=withdirs, detail=True, **kwargs + ) + + pattern = glob_translate(path + ("/" if ends_with_sep else "")) + pattern = re.compile(pattern) + + out = { + p: info + for p, info in sorted(allpaths.items()) + if pattern.match( + p + "/" + if append_slash_to_dirname and info["type"] == "directory" + else p + ) + } + + if detail: + return out + else: + return list(out) + + async def _du(self, path, total=True, maxdepth=None, **kwargs): + sizes = {} + # async for? + for f in await self._find(path, maxdepth=maxdepth, **kwargs): + info = await self._info(f) + sizes[info["name"]] = info["size"] + if total: + return sum(sizes.values()) + else: + return sizes + + async def _find(self, path, maxdepth=None, withdirs=False, **kwargs): + path = self._strip_protocol(path) + out = {} + detail = kwargs.pop("detail", False) + + # Add the root directory if withdirs is requested + # This is needed for posix glob compliance + if withdirs and path != "" and await self._isdir(path): + out[path] = await self._info(path) + + # async for? + async for _, dirs, files in self._walk(path, maxdepth, detail=True, **kwargs): + if withdirs: + files.update(dirs) + out.update({info["name"]: info for name, info in files.items()}) + if not out and (await self._isfile(path)): + # walk works on directories, but find should also return [path] + # when path happens to be a file + out[path] = {} + names = sorted(out) + if not detail: + return names + else: + return {name: out[name] for name in names} + + async def _expand_path(self, path, recursive=False, maxdepth=None): + if maxdepth is not None and maxdepth < 1: + raise ValueError("maxdepth must be at least 1") + + if isinstance(path, str): + out = await self._expand_path([path], recursive, maxdepth) + else: + out = set() + path = [self._strip_protocol(p) for p in path] + for p in path: # can gather here + if has_magic(p): + bit = set(await self._glob(p, maxdepth=maxdepth)) + out |= bit + if recursive: + # glob call above expanded one depth so if maxdepth is defined + # then decrement it in expand_path call below. If it is zero + # after decrementing then avoid expand_path call. + if maxdepth is not None and maxdepth <= 1: + continue + out |= set( + await self._expand_path( + list(bit), + recursive=recursive, + maxdepth=maxdepth - 1 if maxdepth is not None else None, + ) + ) + continue + elif recursive: + rec = set(await self._find(p, maxdepth=maxdepth, withdirs=True)) + out |= rec + if p not in out and (recursive is False or (await self._exists(p))): + # should only check once, for the root + out.add(p) + if not out: + raise FileNotFoundError(path) + return sorted(out) + + async def _mkdir(self, path, create_parents=True, **kwargs): + pass # not necessary to implement, may not have directories + + async def _makedirs(self, path, exist_ok=False): + pass # not necessary to implement, may not have directories + + async def open_async(self, path, mode="rb", **kwargs): + if "b" not in mode or kwargs.get("compression"): + raise ValueError + raise NotImplementedError + + +def mirror_sync_methods(obj): + """Populate sync and async methods for obj + + For each method will create a sync version if the name refers to an async method + (coroutine) and there is no override in the child class; will create an async + method for the corresponding sync method if there is no implementation. + + Uses the methods specified in + - async_methods: the set that an implementation is expected to provide + - default_async_methods: that can be derived from their sync version in + AbstractFileSystem + - AsyncFileSystem: async-specific default coroutines + """ + from fsspec import AbstractFileSystem + + for method in async_methods + dir(AsyncFileSystem): + if not method.startswith("_"): + continue + smethod = method[1:] + if private.match(method): + isco = inspect.iscoroutinefunction(getattr(obj, method, None)) + unsync = getattr(getattr(obj, smethod, False), "__func__", None) + is_default = unsync is getattr(AbstractFileSystem, smethod, "") + if isco and is_default: + mth = sync_wrapper(getattr(obj, method), obj=obj) + setattr(obj, smethod, mth) + if not mth.__doc__: + mth.__doc__ = getattr( + getattr(AbstractFileSystem, smethod, None), "__doc__", "" + ) + + +class FSSpecCoroutineCancel(Exception): + pass + + +def _dump_running_tasks( + printout=True, cancel=True, exc=FSSpecCoroutineCancel, with_task=False +): + import traceback + + tasks = [t for t in asyncio.tasks.all_tasks(loop[0]) if not t.done()] + if printout: + [task.print_stack() for task in tasks] + out = [ + { + "locals": task._coro.cr_frame.f_locals, + "file": task._coro.cr_frame.f_code.co_filename, + "firstline": task._coro.cr_frame.f_code.co_firstlineno, + "linelo": task._coro.cr_frame.f_lineno, + "stack": traceback.format_stack(task._coro.cr_frame), + "task": task if with_task else None, + } + for task in tasks + ] + if cancel: + for t in tasks: + cbs = t._callbacks + t.cancel() + asyncio.futures.Future.set_exception(t, exc) + asyncio.futures.Future.cancel(t) + [cb[0](t) for cb in cbs] # cancels any dependent concurrent.futures + try: + t._coro.throw(exc) # exits coro, unless explicitly handled + except exc: + pass + return out + + +class AbstractAsyncStreamedFile(AbstractBufferedFile): + # no read buffering, and always auto-commit + # TODO: readahead might still be useful here, but needs async version + + async def read(self, length=-1): + """ + Return data from cache, or fetch pieces as necessary + + Parameters + ---------- + length: int (-1) + Number of bytes to read; if <0, all remaining bytes. + """ + length = -1 if length is None else int(length) + if self.mode != "rb": + raise ValueError("File not in read mode") + if length < 0: + length = self.size - self.loc + if self.closed: + raise ValueError("I/O operation on closed file.") + if length == 0: + # don't even bother calling fetch + return b"" + out = await self._fetch_range(self.loc, self.loc + length) + self.loc += len(out) + return out + + async def write(self, data): + """ + Write data to buffer. + + Buffer only sent on flush() or if buffer is greater than + or equal to blocksize. + + Parameters + ---------- + data: bytes + Set of bytes to be written. + """ + if self.mode not in {"wb", "ab"}: + raise ValueError("File not in write mode") + if self.closed: + raise ValueError("I/O operation on closed file.") + if self.forced: + raise ValueError("This file has been force-flushed, can only close") + out = self.buffer.write(data) + self.loc += out + if self.buffer.tell() >= self.blocksize: + await self.flush() + return out + + async def close(self): + """Close file + + Finalizes writes, discards cache + """ + if getattr(self, "_unclosable", False): + return + if self.closed: + return + if self.mode == "rb": + self.cache = None + else: + if not self.forced: + await self.flush(force=True) + + if self.fs is not None: + self.fs.invalidate_cache(self.path) + self.fs.invalidate_cache(self.fs._parent(self.path)) + + self.closed = True + + async def flush(self, force=False): + if self.closed: + raise ValueError("Flush on closed file") + if force and self.forced: + raise ValueError("Force flush cannot be called more than once") + if force: + self.forced = True + + if self.mode not in {"wb", "ab"}: + # no-op to flush on read-mode + return + + if not force and self.buffer.tell() < self.blocksize: + # Defer write on small block + return + + if self.offset is None: + # Initialize a multipart upload + self.offset = 0 + try: + await self._initiate_upload() + except: + self.closed = True + raise + + if await self._upload_chunk(final=force) is not False: + self.offset += self.buffer.seek(0, 2) + self.buffer = io.BytesIO() + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + await self.close() + + async def _fetch_range(self, start, end): + raise NotImplementedError + + async def _initiate_upload(self): + pass + + async def _upload_chunk(self, final=False): + raise NotImplementedError diff --git a/venv/lib/python3.10/site-packages/fsspec/caching.py b/venv/lib/python3.10/site-packages/fsspec/caching.py new file mode 100644 index 0000000000000000000000000000000000000000..9b49d7f5e236c11f6ff625721d02e8ab42086a7f --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/caching.py @@ -0,0 +1,1004 @@ +from __future__ import annotations + +import collections +import functools +import logging +import math +import os +import threading +from collections import OrderedDict +from collections.abc import Callable +from concurrent.futures import Future, ThreadPoolExecutor +from itertools import groupby +from operator import itemgetter +from typing import TYPE_CHECKING, Any, ClassVar, Generic, NamedTuple, TypeVar + +if TYPE_CHECKING: + import mmap + + from typing_extensions import ParamSpec + + P = ParamSpec("P") +else: + P = TypeVar("P") + +T = TypeVar("T") + + +logger = logging.getLogger("fsspec.caching") + +Fetcher = Callable[[int, int], bytes] # Maps (start, end) to bytes +MultiFetcher = Callable[[list[int, int]], bytes] # Maps [(start, end)] to bytes + + +class BaseCache: + """Pass-though cache: doesn't keep anything, calls every time + + Acts as base class for other cachers + + Parameters + ---------- + blocksize: int + How far to read ahead in numbers of bytes + fetcher: func + Function of the form f(start, end) which gets bytes from remote as + specified + size: int + How big this file is + """ + + name: ClassVar[str] = "none" + + def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None: + self.blocksize = blocksize + self.nblocks = 0 + self.fetcher = fetcher + self.size = size + self.hit_count = 0 + self.miss_count = 0 + # the bytes that we actually requested + self.total_requested_bytes = 0 + + def _fetch(self, start: int | None, stop: int | None) -> bytes: + if start is None: + start = 0 + if stop is None: + stop = self.size + if start >= self.size or start >= stop: + return b"" + return self.fetcher(start, stop) + + def _reset_stats(self) -> None: + """Reset hit and miss counts for a more ganular report e.g. by file.""" + self.hit_count = 0 + self.miss_count = 0 + self.total_requested_bytes = 0 + + def _log_stats(self) -> str: + """Return a formatted string of the cache statistics.""" + if self.hit_count == 0 and self.miss_count == 0: + # a cache that does nothing, this is for logs only + return "" + return f" , {self.name}: {self.hit_count} hits, {self.miss_count} misses, {self.total_requested_bytes} total requested bytes" + + def __repr__(self) -> str: + # TODO: use rich for better formatting + return f""" + <{self.__class__.__name__}: + block size : {self.blocksize} + block count : {self.nblocks} + file size : {self.size} + cache hits : {self.hit_count} + cache misses: {self.miss_count} + total requested bytes: {self.total_requested_bytes}> + """ + + +class MMapCache(BaseCache): + """memory-mapped sparse file cache + + Opens temporary file, which is filled blocks-wise when data is requested. + Ensure there is enough disc space in the temporary location. + + This cache method might only work on posix + + Parameters + ---------- + blocksize: int + How far to read ahead in numbers of bytes + fetcher: Fetcher + Function of the form f(start, end) which gets bytes from remote as + specified + size: int + How big this file is + location: str + Where to create the temporary file. If None, a temporary file is + created using tempfile.TemporaryFile(). + blocks: set[int] + Set of block numbers that have already been fetched. If None, an empty + set is created. + multi_fetcher: MultiFetcher + Function of the form f([(start, end)]) which gets bytes from remote + as specified. This function is used to fetch multiple blocks at once. + If not specified, the fetcher function is used instead. + """ + + name = "mmap" + + def __init__( + self, + blocksize: int, + fetcher: Fetcher, + size: int, + location: str | None = None, + blocks: set[int] | None = None, + multi_fetcher: MultiFetcher | None = None, + ) -> None: + super().__init__(blocksize, fetcher, size) + self.blocks = set() if blocks is None else blocks + self.location = location + self.multi_fetcher = multi_fetcher + self.cache = self._makefile() + + def _makefile(self) -> mmap.mmap | bytearray: + import mmap + import tempfile + + if self.size == 0: + return bytearray() + + # posix version + if self.location is None or not os.path.exists(self.location): + if self.location is None: + fd = tempfile.TemporaryFile() + self.blocks = set() + else: + fd = open(self.location, "wb+") + fd.seek(self.size - 1) + fd.write(b"1") + fd.flush() + else: + fd = open(self.location, "r+b") + + return mmap.mmap(fd.fileno(), self.size) + + def _fetch(self, start: int | None, end: int | None) -> bytes: + logger.debug(f"MMap cache fetching {start}-{end}") + if start is None: + start = 0 + if end is None: + end = self.size + if start >= self.size or start >= end: + return b"" + start_block = start // self.blocksize + end_block = end // self.blocksize + block_range = range(start_block, end_block + 1) + # Determine which blocks need to be fetched. This sequence is sorted by construction. + need = (i for i in block_range if i not in self.blocks) + # Count the number of blocks already cached + self.hit_count += sum(1 for i in block_range if i in self.blocks) + + ranges = [] + + # Consolidate needed blocks. + # Algorithm adapted from Python 2.x itertools documentation. + # We are grouping an enumerated sequence of blocks. By comparing when the difference + # between an ascending range (provided by enumerate) and the needed block numbers + # we can detect when the block number skips values. The key computes this difference. + # Whenever the difference changes, we know that we have previously cached block(s), + # and a new group is started. In other words, this algorithm neatly groups + # runs of consecutive block numbers so they can be fetched together. + for _, _blocks in groupby(enumerate(need), key=lambda x: x[0] - x[1]): + # Extract the blocks from the enumerated sequence + _blocks = tuple(map(itemgetter(1), _blocks)) + # Compute start of first block + sstart = _blocks[0] * self.blocksize + # Compute the end of the last block. Last block may not be full size. + send = min(_blocks[-1] * self.blocksize + self.blocksize, self.size) + + # Fetch bytes (could be multiple consecutive blocks) + self.total_requested_bytes += send - sstart + logger.debug( + f"MMap get blocks {_blocks[0]}-{_blocks[-1]} ({sstart}-{send})" + ) + ranges.append((sstart, send)) + + # Update set of cached blocks + self.blocks.update(_blocks) + # Update cache statistics with number of blocks we had to cache + self.miss_count += len(_blocks) + + if not ranges: + return self.cache[start:end] + + if self.multi_fetcher: + logger.debug(f"MMap get blocks {ranges}") + for idx, r in enumerate(self.multi_fetcher(ranges)): + sstart, send = ranges[idx] + logger.debug(f"MMap copy block ({sstart}-{send}") + self.cache[sstart:send] = r + else: + for sstart, send in ranges: + logger.debug(f"MMap get block ({sstart}-{send}") + self.cache[sstart:send] = self.fetcher(sstart, send) + + return self.cache[start:end] + + def __getstate__(self) -> dict[str, Any]: + state = self.__dict__.copy() + # Remove the unpicklable entries. + del state["cache"] + return state + + def __setstate__(self, state: dict[str, Any]) -> None: + # Restore instance attributes + self.__dict__.update(state) + self.cache = self._makefile() + + +class ReadAheadCache(BaseCache): + """Cache which reads only when we get beyond a block of data + + This is a much simpler version of BytesCache, and does not attempt to + fill holes in the cache or keep fragments alive. It is best suited to + many small reads in a sequential order (e.g., reading lines from a file). + """ + + name = "readahead" + + def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None: + super().__init__(blocksize, fetcher, size) + self.cache = b"" + self.start = 0 + self.end = 0 + + def _fetch(self, start: int | None, end: int | None) -> bytes: + if start is None: + start = 0 + if end is None or end > self.size: + end = self.size + if start >= self.size or start >= end: + return b"" + l = end - start + if start >= self.start and end <= self.end: + # cache hit + self.hit_count += 1 + return self.cache[start - self.start : end - self.start] + elif self.start <= start < self.end: + # partial hit + self.miss_count += 1 + part = self.cache[start - self.start :] + l -= len(part) + start = self.end + else: + # miss + self.miss_count += 1 + part = b"" + end = min(self.size, end + self.blocksize) + self.total_requested_bytes += end - start + self.cache = self.fetcher(start, end) # new block replaces old + self.start = start + self.end = self.start + len(self.cache) + return part + self.cache[:l] + + +class FirstChunkCache(BaseCache): + """Caches the first block of a file only + + This may be useful for file types where the metadata is stored in the header, + but is randomly accessed. + """ + + name = "first" + + def __init__(self, blocksize: int, fetcher: Fetcher, size: int) -> None: + if blocksize > size: + # this will buffer the whole thing + blocksize = size + super().__init__(blocksize, fetcher, size) + self.cache: bytes | None = None + + def _fetch(self, start: int | None, end: int | None) -> bytes: + start = start or 0 + if start > self.size: + logger.debug("FirstChunkCache: requested start > file size") + return b"" + + end = min(end, self.size) + + if start < self.blocksize: + if self.cache is None: + self.miss_count += 1 + if end > self.blocksize: + self.total_requested_bytes += end + data = self.fetcher(0, end) + self.cache = data[: self.blocksize] + return data[start:] + self.cache = self.fetcher(0, self.blocksize) + self.total_requested_bytes += self.blocksize + part = self.cache[start:end] + if end > self.blocksize: + self.total_requested_bytes += end - self.blocksize + part += self.fetcher(self.blocksize, end) + self.hit_count += 1 + return part + else: + self.miss_count += 1 + self.total_requested_bytes += end - start + return self.fetcher(start, end) + + +class BlockCache(BaseCache): + """ + Cache holding memory as a set of blocks. + + Requests are only ever made ``blocksize`` at a time, and are + stored in an LRU cache. The least recently accessed block is + discarded when more than ``maxblocks`` are stored. + + Parameters + ---------- + blocksize : int + The number of bytes to store in each block. + Requests are only ever made for ``blocksize``, so this + should balance the overhead of making a request against + the granularity of the blocks. + fetcher : Callable + size : int + The total size of the file being cached. + maxblocks : int + The maximum number of blocks to cache for. The maximum memory + use for this cache is then ``blocksize * maxblocks``. + """ + + name = "blockcache" + + def __init__( + self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32 + ) -> None: + super().__init__(blocksize, fetcher, size) + self.nblocks = math.ceil(size / blocksize) + self.maxblocks = maxblocks + self._fetch_block_cached = functools.lru_cache(maxblocks)(self._fetch_block) + + def cache_info(self): + """ + The statistics on the block cache. + + Returns + ------- + NamedTuple + Returned directly from the LRU Cache used internally. + """ + return self._fetch_block_cached.cache_info() + + def __getstate__(self) -> dict[str, Any]: + state = self.__dict__ + del state["_fetch_block_cached"] + return state + + def __setstate__(self, state: dict[str, Any]) -> None: + self.__dict__.update(state) + self._fetch_block_cached = functools.lru_cache(state["maxblocks"])( + self._fetch_block + ) + + def _fetch(self, start: int | None, end: int | None) -> bytes: + if start is None: + start = 0 + if end is None: + end = self.size + if start >= self.size or start >= end: + return b"" + + return self._read_cache( + start, end, start // self.blocksize, (end - 1) // self.blocksize + ) + + def _fetch_block(self, block_number: int) -> bytes: + """ + Fetch the block of data for `block_number`. + """ + if block_number > self.nblocks: + raise ValueError( + f"'block_number={block_number}' is greater than " + f"the number of blocks ({self.nblocks})" + ) + + start = block_number * self.blocksize + end = start + self.blocksize + self.total_requested_bytes += end - start + self.miss_count += 1 + logger.info("BlockCache fetching block %d", block_number) + block_contents = super()._fetch(start, end) + return block_contents + + def _read_cache( + self, start: int, end: int, start_block_number: int, end_block_number: int + ) -> bytes: + """ + Read from our block cache. + + Parameters + ---------- + start, end : int + The start and end byte positions. + start_block_number, end_block_number : int + The start and end block numbers. + """ + start_pos = start % self.blocksize + end_pos = end % self.blocksize + if end_pos == 0: + end_pos = self.blocksize + + self.hit_count += 1 + if start_block_number == end_block_number: + block: bytes = self._fetch_block_cached(start_block_number) + return block[start_pos:end_pos] + + else: + # read from the initial + out = [self._fetch_block_cached(start_block_number)[start_pos:]] + + # intermediate blocks + # Note: it'd be nice to combine these into one big request. However + # that doesn't play nicely with our LRU cache. + out.extend( + map( + self._fetch_block_cached, + range(start_block_number + 1, end_block_number), + ) + ) + + # final block + out.append(self._fetch_block_cached(end_block_number)[:end_pos]) + + return b"".join(out) + + +class BytesCache(BaseCache): + """Cache which holds data in a in-memory bytes object + + Implements read-ahead by the block size, for semi-random reads progressing + through the file. + + Parameters + ---------- + trim: bool + As we read more data, whether to discard the start of the buffer when + we are more than a blocksize ahead of it. + """ + + name: ClassVar[str] = "bytes" + + def __init__( + self, blocksize: int, fetcher: Fetcher, size: int, trim: bool = True + ) -> None: + super().__init__(blocksize, fetcher, size) + self.cache = b"" + self.start: int | None = None + self.end: int | None = None + self.trim = trim + + def _fetch(self, start: int | None, end: int | None) -> bytes: + # TODO: only set start/end after fetch, in case it fails? + # is this where retry logic might go? + if start is None: + start = 0 + if end is None: + end = self.size + if start >= self.size or start >= end: + return b"" + if ( + self.start is not None + and start >= self.start + and self.end is not None + and end < self.end + ): + # cache hit: we have all the required data + offset = start - self.start + self.hit_count += 1 + return self.cache[offset : offset + end - start] + + if self.blocksize: + bend = min(self.size, end + self.blocksize) + else: + bend = end + + if bend == start or start > self.size: + return b"" + + if (self.start is None or start < self.start) and ( + self.end is None or end > self.end + ): + # First read, or extending both before and after + self.total_requested_bytes += bend - start + self.miss_count += 1 + self.cache = self.fetcher(start, bend) + self.start = start + else: + assert self.start is not None + assert self.end is not None + self.miss_count += 1 + + if start < self.start: + if self.end is None or self.end - end > self.blocksize: + self.total_requested_bytes += bend - start + self.cache = self.fetcher(start, bend) + self.start = start + else: + self.total_requested_bytes += self.start - start + new = self.fetcher(start, self.start) + self.start = start + self.cache = new + self.cache + elif self.end is not None and bend > self.end: + if self.end > self.size: + pass + elif end - self.end > self.blocksize: + self.total_requested_bytes += bend - start + self.cache = self.fetcher(start, bend) + self.start = start + else: + self.total_requested_bytes += bend - self.end + new = self.fetcher(self.end, bend) + self.cache = self.cache + new + + self.end = self.start + len(self.cache) + offset = start - self.start + out = self.cache[offset : offset + end - start] + if self.trim: + num = (self.end - self.start) // (self.blocksize + 1) + if num > 1: + self.start += self.blocksize * num + self.cache = self.cache[self.blocksize * num :] + return out + + def __len__(self) -> int: + return len(self.cache) + + +class AllBytes(BaseCache): + """Cache entire contents of the file""" + + name: ClassVar[str] = "all" + + def __init__( + self, + blocksize: int | None = None, + fetcher: Fetcher | None = None, + size: int | None = None, + data: bytes | None = None, + ) -> None: + super().__init__(blocksize, fetcher, size) # type: ignore[arg-type] + if data is None: + self.miss_count += 1 + self.total_requested_bytes += self.size + data = self.fetcher(0, self.size) + self.data = data + + def _fetch(self, start: int | None, stop: int | None) -> bytes: + self.hit_count += 1 + return self.data[start:stop] + + +class KnownPartsOfAFile(BaseCache): + """ + Cache holding known file parts. + + Parameters + ---------- + blocksize: int + How far to read ahead in numbers of bytes + fetcher: func + Function of the form f(start, end) which gets bytes from remote as + specified + size: int + How big this file is + data: dict + A dictionary mapping explicit `(start, stop)` file-offset tuples + with known bytes. + strict: bool, default True + Whether to fetch reads that go beyond a known byte-range boundary. + If `False`, any read that ends outside a known part will be zero + padded. Note that zero padding will not be used for reads that + begin outside a known byte-range. + """ + + name: ClassVar[str] = "parts" + + def __init__( + self, + blocksize: int, + fetcher: Fetcher, + size: int, + data: dict[tuple[int, int], bytes] | None = None, + strict: bool = False, + **_: Any, + ): + super().__init__(blocksize, fetcher, size) + self.strict = strict + + # simple consolidation of contiguous blocks + if data: + old_offsets = sorted(data.keys()) + offsets = [old_offsets[0]] + blocks = [data.pop(old_offsets[0])] + for start, stop in old_offsets[1:]: + start0, stop0 = offsets[-1] + if start == stop0: + offsets[-1] = (start0, stop) + blocks[-1] += data.pop((start, stop)) + else: + offsets.append((start, stop)) + blocks.append(data.pop((start, stop))) + + self.data = dict(zip(offsets, blocks)) + else: + self.data = {} + + @property + def size(self): + return sum(_[1] - _[0] for _ in self.data) + + @size.setter + def size(self, value): + pass + + @property + def nblocks(self): + return len(self.data) + + @nblocks.setter + def nblocks(self, value): + pass + + def _fetch(self, start: int | None, stop: int | None) -> bytes: + logger.debug("Known parts request %s %s", start, stop) + if start is None: + start = 0 + if stop is None: + stop = self.size + self.total_requested_bytes += stop - start + out = b"" + started = False + loc_old = 0 + for loc0, loc1 in sorted(self.data): + if (loc0 <= start < loc1) and (loc0 <= stop <= loc1): + # entirely within the block + off = start - loc0 + self.hit_count += 1 + return self.data[(loc0, loc1)][off : off + stop - start] + if stop <= loc0: + break + if started and loc0 > loc_old: + # a gap where we need data + self.miss_count += 1 + if self.strict: + raise ValueError + out += b"\x00" * (loc0 - loc_old) + if loc0 <= start < loc1: + # found the start + self.hit_count += 1 + off = start - loc0 + out = self.data[(loc0, loc1)][off : off + stop - start] + started = True + elif start < loc0 and stop > loc1: + # the whole block + self.hit_count += 1 + out += self.data[(loc0, loc1)] + elif loc0 <= stop <= loc1: + # end block + self.hit_count += 1 + out = out + self.data[(loc0, loc1)][: stop - loc0] + return out + loc_old = loc1 + self.miss_count += 1 + if started and not self.strict: + out = out + b"\x00" * (stop - loc_old) + return out + raise ValueError + + +class UpdatableLRU(Generic[P, T]): + """ + Custom implementation of LRU cache that allows updating keys + + Used by BackgroudBlockCache + """ + + class CacheInfo(NamedTuple): + hits: int + misses: int + maxsize: int + currsize: int + + def __init__(self, func: Callable[P, T], max_size: int = 128) -> None: + self._cache: OrderedDict[Any, T] = collections.OrderedDict() + self._func = func + self._max_size = max_size + self._hits = 0 + self._misses = 0 + self._lock = threading.Lock() + + def __call__(self, *args: P.args, **kwargs: P.kwargs) -> T: + if kwargs: + raise TypeError(f"Got unexpected keyword argument {kwargs.keys()}") + with self._lock: + if args in self._cache: + self._cache.move_to_end(args) + self._hits += 1 + return self._cache[args] + + result = self._func(*args, **kwargs) + + with self._lock: + self._cache[args] = result + self._misses += 1 + if len(self._cache) > self._max_size: + self._cache.popitem(last=False) + + return result + + def is_key_cached(self, *args: Any) -> bool: + with self._lock: + return args in self._cache + + def add_key(self, result: T, *args: Any) -> None: + with self._lock: + self._cache[args] = result + if len(self._cache) > self._max_size: + self._cache.popitem(last=False) + + def cache_info(self) -> UpdatableLRU.CacheInfo: + with self._lock: + return self.CacheInfo( + maxsize=self._max_size, + currsize=len(self._cache), + hits=self._hits, + misses=self._misses, + ) + + +class BackgroundBlockCache(BaseCache): + """ + Cache holding memory as a set of blocks with pre-loading of + the next block in the background. + + Requests are only ever made ``blocksize`` at a time, and are + stored in an LRU cache. The least recently accessed block is + discarded when more than ``maxblocks`` are stored. If the + next block is not in cache, it is loaded in a separate thread + in non-blocking way. + + Parameters + ---------- + blocksize : int + The number of bytes to store in each block. + Requests are only ever made for ``blocksize``, so this + should balance the overhead of making a request against + the granularity of the blocks. + fetcher : Callable + size : int + The total size of the file being cached. + maxblocks : int + The maximum number of blocks to cache for. The maximum memory + use for this cache is then ``blocksize * maxblocks``. + """ + + name: ClassVar[str] = "background" + + def __init__( + self, blocksize: int, fetcher: Fetcher, size: int, maxblocks: int = 32 + ) -> None: + super().__init__(blocksize, fetcher, size) + self.nblocks = math.ceil(size / blocksize) + self.maxblocks = maxblocks + self._fetch_block_cached = UpdatableLRU(self._fetch_block, maxblocks) + + self._thread_executor = ThreadPoolExecutor(max_workers=1) + self._fetch_future_block_number: int | None = None + self._fetch_future: Future[bytes] | None = None + self._fetch_future_lock = threading.Lock() + + def cache_info(self) -> UpdatableLRU.CacheInfo: + """ + The statistics on the block cache. + + Returns + ------- + NamedTuple + Returned directly from the LRU Cache used internally. + """ + return self._fetch_block_cached.cache_info() + + def __getstate__(self) -> dict[str, Any]: + state = self.__dict__ + del state["_fetch_block_cached"] + del state["_thread_executor"] + del state["_fetch_future_block_number"] + del state["_fetch_future"] + del state["_fetch_future_lock"] + return state + + def __setstate__(self, state) -> None: + self.__dict__.update(state) + self._fetch_block_cached = UpdatableLRU(self._fetch_block, state["maxblocks"]) + self._thread_executor = ThreadPoolExecutor(max_workers=1) + self._fetch_future_block_number = None + self._fetch_future = None + self._fetch_future_lock = threading.Lock() + + def _fetch(self, start: int | None, end: int | None) -> bytes: + if start is None: + start = 0 + if end is None: + end = self.size + if start >= self.size or start >= end: + return b"" + + # byte position -> block numbers + start_block_number = start // self.blocksize + end_block_number = end // self.blocksize + + fetch_future_block_number = None + fetch_future = None + with self._fetch_future_lock: + # Background thread is running. Check we we can or must join it. + if self._fetch_future is not None: + assert self._fetch_future_block_number is not None + if self._fetch_future.done(): + logger.info("BlockCache joined background fetch without waiting.") + self._fetch_block_cached.add_key( + self._fetch_future.result(), self._fetch_future_block_number + ) + # Cleanup the fetch variables. Done with fetching the block. + self._fetch_future_block_number = None + self._fetch_future = None + else: + # Must join if we need the block for the current fetch + must_join = bool( + start_block_number + <= self._fetch_future_block_number + <= end_block_number + ) + if must_join: + # Copy to the local variables to release lock + # before waiting for result + fetch_future_block_number = self._fetch_future_block_number + fetch_future = self._fetch_future + + # Cleanup the fetch variables. Have a local copy. + self._fetch_future_block_number = None + self._fetch_future = None + + # Need to wait for the future for the current read + if fetch_future is not None: + logger.info("BlockCache waiting for background fetch.") + # Wait until result and put it in cache + self._fetch_block_cached.add_key( + fetch_future.result(), fetch_future_block_number + ) + + # these are cached, so safe to do multiple calls for the same start and end. + for block_number in range(start_block_number, end_block_number + 1): + self._fetch_block_cached(block_number) + + # fetch next block in the background if nothing is running in the background, + # the block is within file and it is not already cached + end_block_plus_1 = end_block_number + 1 + with self._fetch_future_lock: + if ( + self._fetch_future is None + and end_block_plus_1 <= self.nblocks + and not self._fetch_block_cached.is_key_cached(end_block_plus_1) + ): + self._fetch_future_block_number = end_block_plus_1 + self._fetch_future = self._thread_executor.submit( + self._fetch_block, end_block_plus_1, "async" + ) + + return self._read_cache( + start, + end, + start_block_number=start_block_number, + end_block_number=end_block_number, + ) + + def _fetch_block(self, block_number: int, log_info: str = "sync") -> bytes: + """ + Fetch the block of data for `block_number`. + """ + if block_number > self.nblocks: + raise ValueError( + f"'block_number={block_number}' is greater than " + f"the number of blocks ({self.nblocks})" + ) + + start = block_number * self.blocksize + end = start + self.blocksize + logger.info("BlockCache fetching block (%s) %d", log_info, block_number) + self.total_requested_bytes += end - start + self.miss_count += 1 + block_contents = super()._fetch(start, end) + return block_contents + + def _read_cache( + self, start: int, end: int, start_block_number: int, end_block_number: int + ) -> bytes: + """ + Read from our block cache. + + Parameters + ---------- + start, end : int + The start and end byte positions. + start_block_number, end_block_number : int + The start and end block numbers. + """ + start_pos = start % self.blocksize + end_pos = end % self.blocksize + + # kind of pointless to count this as a hit, but it is + self.hit_count += 1 + + if start_block_number == end_block_number: + block = self._fetch_block_cached(start_block_number) + return block[start_pos:end_pos] + + else: + # read from the initial + out = [self._fetch_block_cached(start_block_number)[start_pos:]] + + # intermediate blocks + # Note: it'd be nice to combine these into one big request. However + # that doesn't play nicely with our LRU cache. + out.extend( + map( + self._fetch_block_cached, + range(start_block_number + 1, end_block_number), + ) + ) + + # final block + out.append(self._fetch_block_cached(end_block_number)[:end_pos]) + + return b"".join(out) + + +caches: dict[str | None, type[BaseCache]] = { + # one custom case + None: BaseCache, +} + + +def register_cache(cls: type[BaseCache], clobber: bool = False) -> None: + """'Register' cache implementation. + + Parameters + ---------- + clobber: bool, optional + If set to True (default is False) - allow to overwrite existing + entry. + + Raises + ------ + ValueError + """ + name = cls.name + if not clobber and name in caches: + raise ValueError(f"Cache with name {name!r} is already known: {caches[name]}") + caches[name] = cls + + +for c in ( + BaseCache, + MMapCache, + BytesCache, + ReadAheadCache, + BlockCache, + FirstChunkCache, + AllBytes, + KnownPartsOfAFile, + BackgroundBlockCache, +): + register_cache(c) diff --git a/venv/lib/python3.10/site-packages/fsspec/callbacks.py b/venv/lib/python3.10/site-packages/fsspec/callbacks.py new file mode 100644 index 0000000000000000000000000000000000000000..7ca99ca6ac3cd69b28bcd1550f6550e8e648c5fe --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/callbacks.py @@ -0,0 +1,324 @@ +from functools import wraps + + +class Callback: + """ + Base class and interface for callback mechanism + + This class can be used directly for monitoring file transfers by + providing ``callback=Callback(hooks=...)`` (see the ``hooks`` argument, + below), or subclassed for more specialised behaviour. + + Parameters + ---------- + size: int (optional) + Nominal quantity for the value that corresponds to a complete + transfer, e.g., total number of tiles or total number of + bytes + value: int (0) + Starting internal counter value + hooks: dict or None + A dict of named functions to be called on each update. The signature + of these must be ``f(size, value, **kwargs)`` + """ + + def __init__(self, size=None, value=0, hooks=None, **kwargs): + self.size = size + self.value = value + self.hooks = hooks or {} + self.kw = kwargs + + def __enter__(self): + return self + + def __exit__(self, *exc_args): + self.close() + + def close(self): + """Close callback.""" + + def branched(self, path_1, path_2, **kwargs): + """ + Return callback for child transfers + + If this callback is operating at a higher level, e.g., put, which may + trigger transfers that can also be monitored. The function returns a callback + that has to be passed to the child method, e.g., put_file, + as `callback=` argument. + + The implementation uses `callback.branch` for compatibility. + When implementing callbacks, it is recommended to override this function instead + of `branch` and avoid calling `super().branched(...)`. + + Prefer using this function over `branch`. + + Parameters + ---------- + path_1: str + Child's source path + path_2: str + Child's destination path + **kwargs: + Arbitrary keyword arguments + + Returns + ------- + callback: Callback + A callback instance to be passed to the child method + """ + self.branch(path_1, path_2, kwargs) + # mutate kwargs so that we can force the caller to pass "callback=" explicitly + return kwargs.pop("callback", DEFAULT_CALLBACK) + + def branch_coro(self, fn): + """ + Wraps a coroutine, and pass a new child callback to it. + """ + + @wraps(fn) + async def func(path1, path2: str, **kwargs): + with self.branched(path1, path2, **kwargs) as child: + return await fn(path1, path2, callback=child, **kwargs) + + return func + + def set_size(self, size): + """ + Set the internal maximum size attribute + + Usually called if not initially set at instantiation. Note that this + triggers a ``call()``. + + Parameters + ---------- + size: int + """ + self.size = size + self.call() + + def absolute_update(self, value): + """ + Set the internal value state + + Triggers ``call()`` + + Parameters + ---------- + value: int + """ + self.value = value + self.call() + + def relative_update(self, inc=1): + """ + Delta increment the internal counter + + Triggers ``call()`` + + Parameters + ---------- + inc: int + """ + self.value += inc + self.call() + + def call(self, hook_name=None, **kwargs): + """ + Execute hook(s) with current state + + Each function is passed the internal size and current value + + Parameters + ---------- + hook_name: str or None + If given, execute on this hook + kwargs: passed on to (all) hook(s) + """ + if not self.hooks: + return + kw = self.kw.copy() + kw.update(kwargs) + if hook_name: + if hook_name not in self.hooks: + return + return self.hooks[hook_name](self.size, self.value, **kw) + for hook in self.hooks.values() or []: + hook(self.size, self.value, **kw) + + def wrap(self, iterable): + """ + Wrap an iterable to call ``relative_update`` on each iterations + + Parameters + ---------- + iterable: Iterable + The iterable that is being wrapped + """ + for item in iterable: + self.relative_update() + yield item + + def branch(self, path_1, path_2, kwargs): + """ + Set callbacks for child transfers + + If this callback is operating at a higher level, e.g., put, which may + trigger transfers that can also be monitored. The passed kwargs are + to be *mutated* to add ``callback=``, if this class supports branching + to children. + + Parameters + ---------- + path_1: str + Child's source path + path_2: str + Child's destination path + kwargs: dict + arguments passed to child method, e.g., put_file. + + Returns + ------- + + """ + return None + + def no_op(self, *_, **__): + pass + + def __getattr__(self, item): + """ + If undefined methods are called on this class, nothing happens + """ + return self.no_op + + @classmethod + def as_callback(cls, maybe_callback=None): + """Transform callback=... into Callback instance + + For the special value of ``None``, return the global instance of + ``NoOpCallback``. This is an alternative to including + ``callback=DEFAULT_CALLBACK`` directly in a method signature. + """ + if maybe_callback is None: + return DEFAULT_CALLBACK + return maybe_callback + + +class NoOpCallback(Callback): + """ + This implementation of Callback does exactly nothing + """ + + def call(self, *args, **kwargs): + return None + + +class DotPrinterCallback(Callback): + """ + Simple example Callback implementation + + Almost identical to Callback with a hook that prints a char; here we + demonstrate how the outer layer may print "#" and the inner layer "." + """ + + def __init__(self, chr_to_print="#", **kwargs): + self.chr = chr_to_print + super().__init__(**kwargs) + + def branch(self, path_1, path_2, kwargs): + """Mutate kwargs to add new instance with different print char""" + kwargs["callback"] = DotPrinterCallback(".") + + def call(self, **kwargs): + """Just outputs a character""" + print(self.chr, end="") + + +class TqdmCallback(Callback): + """ + A callback to display a progress bar using tqdm + + Parameters + ---------- + tqdm_kwargs : dict, (optional) + Any argument accepted by the tqdm constructor. + See the `tqdm doc `_. + Will be forwarded to `tqdm_cls`. + tqdm_cls: (optional) + subclass of `tqdm.tqdm`. If not passed, it will default to `tqdm.tqdm`. + + Examples + -------- + >>> import fsspec + >>> from fsspec.callbacks import TqdmCallback + >>> fs = fsspec.filesystem("memory") + >>> path2distant_data = "/your-path" + >>> fs.upload( + ".", + path2distant_data, + recursive=True, + callback=TqdmCallback(), + ) + + You can forward args to tqdm using the ``tqdm_kwargs`` parameter. + + >>> fs.upload( + ".", + path2distant_data, + recursive=True, + callback=TqdmCallback(tqdm_kwargs={"desc": "Your tqdm description"}), + ) + + You can also customize the progress bar by passing a subclass of `tqdm`. + + .. code-block:: python + + class TqdmFormat(tqdm): + '''Provides a `total_time` format parameter''' + @property + def format_dict(self): + d = super().format_dict + total_time = d["elapsed"] * (d["total"] or 0) / max(d["n"], 1) + d.update(total_time=self.format_interval(total_time) + " in total") + return d + + >>> with TqdmCallback( + tqdm_kwargs={ + "desc": "desc", + "bar_format": "{total_time}: {percentage:.0f}%|{bar}{r_bar}", + }, + tqdm_cls=TqdmFormat, + ) as callback: + fs.upload(".", path2distant_data, recursive=True, callback=callback) + """ + + def __init__(self, tqdm_kwargs=None, *args, **kwargs): + try: + from tqdm import tqdm + + except ImportError as exce: + raise ImportError( + "Using TqdmCallback requires tqdm to be installed" + ) from exce + + self._tqdm_cls = kwargs.pop("tqdm_cls", tqdm) + self._tqdm_kwargs = tqdm_kwargs or {} + self.tqdm = None + super().__init__(*args, **kwargs) + + def call(self, *args, **kwargs): + if self.tqdm is None: + self.tqdm = self._tqdm_cls(total=self.size, **self._tqdm_kwargs) + self.tqdm.total = self.size + self.tqdm.update(self.value - self.tqdm.n) + + def close(self): + if self.tqdm is not None: + self.tqdm.close() + self.tqdm = None + + def __del__(self): + return self.close() + + +DEFAULT_CALLBACK = _DEFAULT_CALLBACK = NoOpCallback() diff --git a/venv/lib/python3.10/site-packages/fsspec/compression.py b/venv/lib/python3.10/site-packages/fsspec/compression.py new file mode 100644 index 0000000000000000000000000000000000000000..11c2e3d3f142d95186663fa5a747911e66832266 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/compression.py @@ -0,0 +1,185 @@ +"""Helper functions for a standard streaming compression API""" + +import sys +from zipfile import ZipFile + +import fsspec.utils +from fsspec.spec import AbstractBufferedFile + + +def noop_file(file, mode, **kwargs): + return file + + +# TODO: files should also be available as contexts +# should be functions of the form func(infile, mode=, **kwargs) -> file-like +compr = {None: noop_file} + + +def register_compression(name, callback, extensions, force=False): + """Register an "inferable" file compression type. + + Registers transparent file compression type for use with fsspec.open. + Compression can be specified by name in open, or "infer"-ed for any files + ending with the given extensions. + + Args: + name: (str) The compression type name. Eg. "gzip". + callback: A callable of form (infile, mode, **kwargs) -> file-like. + Accepts an input file-like object, the target mode and kwargs. + Returns a wrapped file-like object. + extensions: (str, Iterable[str]) A file extension, or list of file + extensions for which to infer this compression scheme. Eg. "gz". + force: (bool) Force re-registration of compression type or extensions. + + Raises: + ValueError: If name or extensions already registered, and not force. + + """ + if isinstance(extensions, str): + extensions = [extensions] + + # Validate registration + if name in compr and not force: + raise ValueError(f"Duplicate compression registration: {name}") + + for ext in extensions: + if ext in fsspec.utils.compressions and not force: + raise ValueError(f"Duplicate compression file extension: {ext} ({name})") + + compr[name] = callback + + for ext in extensions: + fsspec.utils.compressions[ext] = name + + +def unzip(infile, mode="rb", filename=None, **kwargs): + if "r" not in mode: + filename = filename or "file" + z = ZipFile(infile, mode="w", **kwargs) + fo = z.open(filename, mode="w") + fo.close = lambda closer=fo.close: closer() or z.close() + return fo + z = ZipFile(infile) + if filename is None: + filename = z.namelist()[0] + return z.open(filename, mode="r", **kwargs) + + +register_compression("zip", unzip, "zip") + +try: + from bz2 import BZ2File +except ImportError: + pass +else: + register_compression("bz2", BZ2File, "bz2") + +try: # pragma: no cover + from isal import igzip + + def isal(infile, mode="rb", **kwargs): + return igzip.IGzipFile(fileobj=infile, mode=mode, **kwargs) + + register_compression("gzip", isal, "gz") +except ImportError: + from gzip import GzipFile + + register_compression( + "gzip", lambda f, **kwargs: GzipFile(fileobj=f, **kwargs), "gz" + ) + +try: + from lzma import LZMAFile + + register_compression("lzma", LZMAFile, "lzma") + register_compression("xz", LZMAFile, "xz") +except ImportError: + pass + +try: + import lzmaffi + + register_compression("lzma", lzmaffi.LZMAFile, "lzma", force=True) + register_compression("xz", lzmaffi.LZMAFile, "xz", force=True) +except ImportError: + pass + + +class SnappyFile(AbstractBufferedFile): + def __init__(self, infile, mode, **kwargs): + import snappy + + super().__init__( + fs=None, path="snappy", mode=mode.strip("b") + "b", size=999999999, **kwargs + ) + self.infile = infile + if "r" in mode: + self.codec = snappy.StreamDecompressor() + else: + self.codec = snappy.StreamCompressor() + + def _upload_chunk(self, final=False): + self.buffer.seek(0) + out = self.codec.add_chunk(self.buffer.read()) + self.infile.write(out) + return True + + def seek(self, loc, whence=0): + raise NotImplementedError("SnappyFile is not seekable") + + def seekable(self): + return False + + def _fetch_range(self, start, end): + """Get the specified set of bytes from remote""" + data = self.infile.read(end - start) + return self.codec.decompress(data) + + +try: + import snappy + + snappy.compress(b"") + # Snappy may use the .sz file extension, but this is not part of the + # standard implementation. + register_compression("snappy", SnappyFile, []) + +except (ImportError, NameError, AttributeError): + pass + +try: + import lz4.frame + + register_compression("lz4", lz4.frame.open, "lz4") +except ImportError: + pass + +try: + if sys.version_info >= (3, 14): + from compression import zstd + else: + from backports import zstd + + register_compression("zstd", zstd.ZstdFile, "zst") +except ImportError: + try: + import zstandard as zstd + + def zstandard_file(infile, mode="rb"): + if "r" in mode: + cctx = zstd.ZstdDecompressor() + return cctx.stream_reader(infile) + else: + cctx = zstd.ZstdCompressor(level=10) + return cctx.stream_writer(infile) + + register_compression("zstd", zstandard_file, "zst") + except ImportError: + pass + pass + + +def available_compressions(): + """Return a list of the implemented compressions.""" + return list(compr) diff --git a/venv/lib/python3.10/site-packages/fsspec/config.py b/venv/lib/python3.10/site-packages/fsspec/config.py new file mode 100644 index 0000000000000000000000000000000000000000..76d9af14aaf7df47c4551c169f27b05abf9c269e --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/config.py @@ -0,0 +1,131 @@ +from __future__ import annotations + +import configparser +import json +import os +import warnings +from typing import Any + +conf: dict[str, dict[str, Any]] = {} +default_conf_dir = os.path.join(os.path.expanduser("~"), ".config/fsspec") +conf_dir = os.environ.get("FSSPEC_CONFIG_DIR", default_conf_dir) + + +def set_conf_env(conf_dict, envdict=os.environ): + """Set config values from environment variables + + Looks for variables of the form ``FSSPEC_`` and + ``FSSPEC__``. For ``FSSPEC_`` the value is parsed + as a json dictionary and used to ``update`` the config of the + corresponding protocol. For ``FSSPEC__`` there is no + attempt to convert the string value, but the kwarg keys will be lower-cased. + + The ``FSSPEC__`` variables are applied after the + ``FSSPEC_`` ones. + + Parameters + ---------- + conf_dict : dict(str, dict) + This dict will be mutated + envdict : dict-like(str, str) + Source for the values - usually the real environment + """ + kwarg_keys = [] + for key in envdict: + if key.startswith("FSSPEC_") and len(key) > 7 and key[7] != "_": + if key.count("_") > 1: + kwarg_keys.append(key) + continue + try: + value = json.loads(envdict[key]) + except json.decoder.JSONDecodeError as ex: + warnings.warn( + f"Ignoring environment variable {key} due to a parse failure: {ex}" + ) + else: + if isinstance(value, dict): + _, proto = key.split("_", 1) + conf_dict.setdefault(proto.lower(), {}).update(value) + else: + warnings.warn( + f"Ignoring environment variable {key} due to not being a dict:" + f" {type(value)}" + ) + elif key.startswith("FSSPEC"): + warnings.warn( + f"Ignoring environment variable {key} due to having an unexpected name" + ) + + for key in kwarg_keys: + _, proto, kwarg = key.split("_", 2) + conf_dict.setdefault(proto.lower(), {})[kwarg.lower()] = envdict[key] + + +def set_conf_files(cdir, conf_dict): + """Set config values from files + + Scans for INI and JSON files in the given dictionary, and uses their + contents to set the config. In case of repeated values, later values + win. + + In the case of INI files, all values are strings, and these will not + be converted. + + Parameters + ---------- + cdir : str + Directory to search + conf_dict : dict(str, dict) + This dict will be mutated + """ + if not os.path.isdir(cdir): + return + allfiles = sorted(os.listdir(cdir)) + for fn in allfiles: + if fn.endswith(".ini"): + ini = configparser.ConfigParser() + ini.read(os.path.join(cdir, fn)) + for key in ini: + if key == "DEFAULT": + continue + conf_dict.setdefault(key, {}).update(dict(ini[key])) + if fn.endswith(".json"): + with open(os.path.join(cdir, fn)) as f: + js = json.load(f) + for key in js: + conf_dict.setdefault(key, {}).update(dict(js[key])) + + +def apply_config(cls, kwargs, conf_dict=None): + """Supply default values for kwargs when instantiating class + + Augments the passed kwargs, by finding entries in the config dict + which match the classes ``.protocol`` attribute (one or more str) + + Parameters + ---------- + cls : file system implementation + kwargs : dict + conf_dict : dict of dict + Typically this is the global configuration + + Returns + ------- + dict : the modified set of kwargs + """ + if conf_dict is None: + conf_dict = conf + protos = cls.protocol if isinstance(cls.protocol, (tuple, list)) else [cls.protocol] + kw = {} + for proto in protos: + # default kwargs from the current state of the config + if proto in conf_dict: + kw.update(conf_dict[proto]) + # explicit kwargs always win + kw.update(**kwargs) + kwargs = kw + return kwargs + + +set_conf_files(conf_dir, conf) +set_conf_env(conf) diff --git a/venv/lib/python3.10/site-packages/fsspec/conftest.py b/venv/lib/python3.10/site-packages/fsspec/conftest.py new file mode 100644 index 0000000000000000000000000000000000000000..f05eb5c30d42b0c1c5cc432f9c217d8f0e01f412 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/conftest.py @@ -0,0 +1,125 @@ +import os +import shutil +import subprocess +import sys +import time +from collections import deque +from collections.abc import Generator, Sequence + +import pytest + +import fsspec + + +@pytest.fixture() +def m(): + """ + Fixture providing a memory filesystem. + """ + m = fsspec.filesystem("memory") + m.store.clear() + m.pseudo_dirs.clear() + m.pseudo_dirs.append("") + try: + yield m + finally: + m.store.clear() + m.pseudo_dirs.clear() + m.pseudo_dirs.append("") + + +class InstanceCacheInspector: + """ + Helper class to inspect instance caches of filesystem classes in tests. + """ + + def clear(self) -> None: + """ + Clear instance caches of all currently imported filesystem classes. + """ + classes = deque([fsspec.spec.AbstractFileSystem]) + while classes: + cls = classes.popleft() + cls.clear_instance_cache() + classes.extend(cls.__subclasses__()) + + def gather_counts(self, *, omit_zero: bool = True) -> dict[str, int]: + """ + Gather counts of filesystem instances in the instance caches + of all currently imported filesystem classes. + + Parameters + ---------- + omit_zero: + Whether to omit instance types with no cached instances. + """ + out: dict[str, int] = {} + classes = deque([fsspec.spec.AbstractFileSystem]) + while classes: + cls = classes.popleft() + count = len(cls._cache) # there is no public interface for the cache + # note: skip intermediate AbstractFileSystem subclasses + # if they proxy the protocol attribute via a property. + if isinstance(cls.protocol, (Sequence, str)): + key = cls.protocol if isinstance(cls.protocol, str) else cls.protocol[0] + if count or not omit_zero: + out[key] = count + classes.extend(cls.__subclasses__()) + return out + + +@pytest.fixture(scope="function", autouse=True) +def instance_caches() -> Generator[InstanceCacheInspector, None, None]: + """ + Fixture to ensure empty filesystem instance caches before and after a test. + + Used by default for all tests. + Clears caches of all imported filesystem classes. + Can be used to write test assertions about instance caches. + + Usage: + + def test_something(instance_caches): + # Test code here + fsspec.open("file://abc") + fsspec.open("memory://foo/bar") + + # Test assertion + assert instance_caches.gather_counts() == {"file": 1, "memory": 1} + + Returns + ------- + instance_caches: An instance cache inspector for clearing and inspecting caches. + """ + ic = InstanceCacheInspector() + + ic.clear() + try: + yield ic + finally: + ic.clear() + + +@pytest.fixture(scope="function") +def ftp_writable(tmpdir): + """ + Fixture providing a writable FTP filesystem. + """ + pytest.importorskip("pyftpdlib") + + d = str(tmpdir) + with open(os.path.join(d, "out"), "wb") as f: + f.write(b"hello" * 10000) + P = subprocess.Popen( + [sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"] + ) + try: + time.sleep(1) + yield "localhost", 2121, "user", "pass" + finally: + P.terminate() + P.wait() + try: + shutil.rmtree(tmpdir) + except Exception: + pass diff --git a/venv/lib/python3.10/site-packages/fsspec/core.py b/venv/lib/python3.10/site-packages/fsspec/core.py new file mode 100644 index 0000000000000000000000000000000000000000..5876bfcefc176b3b3aed0e16b54fa3809a5a0eee --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/core.py @@ -0,0 +1,760 @@ +from __future__ import annotations + +import io +import logging +import os +import re +from glob import has_magic +from pathlib import Path + +# for backwards compat, we export cache things from here too +from fsspec.caching import ( # noqa: F401 + BaseCache, + BlockCache, + BytesCache, + MMapCache, + ReadAheadCache, + caches, +) +from fsspec.compression import compr +from fsspec.config import conf +from fsspec.registry import available_protocols, filesystem, get_filesystem_class +from fsspec.utils import ( + _unstrip_protocol, + build_name_function, + infer_compression, + stringify_path, +) + +logger = logging.getLogger("fsspec") + + +class OpenFile: + """ + File-like object to be used in a context + + Can layer (buffered) text-mode and compression over any file-system, which + are typically binary-only. + + These instances are safe to serialize, as the low-level file object + is not created until invoked using ``with``. + + Parameters + ---------- + fs: FileSystem + The file system to use for opening the file. Should be a subclass or duck-type + with ``fsspec.spec.AbstractFileSystem`` + path: str + Location to open + mode: str like 'rb', optional + Mode of the opened file + compression: str or None, optional + Compression to apply + encoding: str or None, optional + The encoding to use if opened in text mode. + errors: str or None, optional + How to handle encoding errors if opened in text mode. + newline: None or str + Passed to TextIOWrapper in text mode, how to handle line endings. + autoopen: bool + If True, calls open() immediately. Mostly used by pickle + pos: int + If given and autoopen is True, seek to this location immediately + """ + + def __init__( + self, + fs, + path, + mode="rb", + compression=None, + encoding=None, + errors=None, + newline=None, + ): + self.fs = fs + self.path = path + self.mode = mode + self.compression = get_compression(path, compression) + self.encoding = encoding + self.errors = errors + self.newline = newline + self.fobjects = [] + + def __reduce__(self): + return ( + OpenFile, + ( + self.fs, + self.path, + self.mode, + self.compression, + self.encoding, + self.errors, + self.newline, + ), + ) + + def __repr__(self): + return f"" + + def __enter__(self): + mode = self.mode.replace("t", "").replace("b", "") + "b" + + try: + f = self.fs.open(self.path, mode=mode) + except FileNotFoundError as e: + if has_magic(self.path): + raise FileNotFoundError( + "%s not found. The URL contains glob characters: you maybe needed\n" + "to pass expand=True in fsspec.open() or the storage_options of \n" + "your library. You can also set the config value 'open_expand'\n" + "before import, or fsspec.core.DEFAULT_EXPAND at runtime, to True.", + self.path, + ) from e + raise + + self.fobjects = [f] + + if self.compression is not None: + compress = compr[self.compression] + f = compress(f, mode=mode[0]) + self.fobjects.append(f) + + if "b" not in self.mode: + # assume, for example, that 'r' is equivalent to 'rt' as in builtin + f = PickleableTextIOWrapper( + f, encoding=self.encoding, errors=self.errors, newline=self.newline + ) + self.fobjects.append(f) + + return self.fobjects[-1] + + def __exit__(self, *args): + self.close() + + @property + def full_name(self): + return _unstrip_protocol(self.path, self.fs) + + def open(self): + """Materialise this as a real open file without context + + The OpenFile object should be explicitly closed to avoid enclosed file + instances persisting. You must, therefore, keep a reference to the OpenFile + during the life of the file-like it generates. + """ + return self.__enter__() + + def close(self): + """Close all encapsulated file objects""" + for f in reversed(self.fobjects): + if "r" not in self.mode and not f.closed: + f.flush() + f.close() + self.fobjects.clear() + + +class OpenFiles(list): + """List of OpenFile instances + + Can be used in a single context, which opens and closes all of the + contained files. Normal list access to get the elements works as + normal. + + A special case is made for caching filesystems - the files will + be down/uploaded together at the start or end of the context, and + this may happen concurrently, if the target filesystem supports it. + """ + + def __init__(self, *args, mode="rb", fs=None): + self.mode = mode + self.fs = fs + self.files = [] + super().__init__(*args) + + def __enter__(self): + if self.fs is None: + raise ValueError("Context has already been used") + + fs = self.fs + while True: + if hasattr(fs, "open_many"): + # check for concurrent cache download; or set up for upload + self.files = fs.open_many(self) + return self.files + if hasattr(fs, "fs") and fs.fs is not None: + fs = fs.fs + else: + break + return [s.__enter__() for s in self] + + def __exit__(self, *args): + fs = self.fs + [s.__exit__(*args) for s in self] + if "r" not in self.mode: + while True: + if hasattr(fs, "open_many"): + # check for concurrent cache upload + fs.commit_many(self.files) + return + if hasattr(fs, "fs") and fs.fs is not None: + fs = fs.fs + else: + break + + def __getitem__(self, item): + out = super().__getitem__(item) + if isinstance(item, slice): + return OpenFiles(out, mode=self.mode, fs=self.fs) + return out + + def __repr__(self): + return f"" + + +def open_files( + urlpath, + mode="rb", + compression=None, + encoding="utf8", + errors=None, + name_function=None, + num=1, + protocol=None, + newline=None, + auto_mkdir=True, + expand=True, + **kwargs, +): + """Given a path or paths, return a list of ``OpenFile`` objects. + + For writing, a str path must contain the "*" character, which will be filled + in by increasing numbers, e.g., "part*" -> "part1", "part2" if num=2. + + For either reading or writing, can instead provide explicit list of paths. + + Parameters + ---------- + urlpath: string or list + Absolute or relative filepath(s). Prefix with a protocol like ``s3://`` + to read from alternative filesystems. To read from multiple files you + can pass a globstring or a list of paths, with the caveat that they + must all have the same protocol. + mode: 'rb', 'wt', etc. + compression: string or None + If given, open file using compression codec. Can either be a compression + name (a key in ``fsspec.compression.compr``) or "infer" to guess the + compression from the filename suffix. + encoding: str + For text mode only + errors: None or str + Passed to TextIOWrapper in text mode + name_function: function or None + if opening a set of files for writing, those files do not yet exist, + so we need to generate their names by formatting the urlpath for + each sequence number + num: int [1] + if writing mode, number of files we expect to create (passed to + name+function) + protocol: str or None + If given, overrides the protocol found in the URL. + newline: bytes or None + Used for line terminator in text mode. If None, uses system default; + if blank, uses no translation. + auto_mkdir: bool (True) + If in write mode, this will ensure the target directory exists before + writing, by calling ``fs.mkdirs(exist_ok=True)``. + expand: bool + **kwargs: dict + Extra options that make sense to a particular storage connection, e.g. + host, port, username, password, etc. + + Examples + -------- + >>> files = open_files('2015-*-*.csv') # doctest: +SKIP + >>> files = open_files( + ... 's3://bucket/2015-*-*.csv.gz', compression='gzip' + ... ) # doctest: +SKIP + + Returns + ------- + An ``OpenFiles`` instance, which is a list of ``OpenFile`` objects that can + be used as a single context + + Notes + ----- + For a full list of the available protocols and the implementations that + they map across to see the latest online documentation: + + - For implementations built into ``fsspec`` see + https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations + - For implementations in separate packages see + https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations + """ + fs, fs_token, paths = get_fs_token_paths( + urlpath, + mode, + num=num, + name_function=name_function, + storage_options=kwargs, + protocol=protocol, + expand=expand, + ) + if fs.protocol == "file": + fs.auto_mkdir = auto_mkdir + elif "r" not in mode and auto_mkdir: + parents = {fs._parent(path) for path in paths} + for parent in parents: + try: + fs.makedirs(parent, exist_ok=True) + except PermissionError: + pass + return OpenFiles( + [ + OpenFile( + fs, + path, + mode=mode, + compression=compression, + encoding=encoding, + errors=errors, + newline=newline, + ) + for path in paths + ], + mode=mode, + fs=fs, + ) + + +def _un_chain(path, kwargs): + # Avoid a circular import + from fsspec.implementations.chained import ChainedFileSystem + + if "::" in path: + x = re.compile(".*[^a-z]+.*") # test for non protocol-like single word + known_protocols = set(available_protocols()) + bits = [] + + # split on '::', then ensure each bit has a protocol + for p in path.split("::"): + if p in known_protocols: + bits.append(p + "://") + elif "://" in p or x.match(p): + bits.append(p) + else: + bits.append(p + "://") + else: + bits = [path] + + # [[url, protocol, kwargs], ...] + out = [] + previous_bit = None + kwargs = kwargs.copy() + + for bit in reversed(bits): + protocol = kwargs.pop("protocol", None) or split_protocol(bit)[0] or "file" + cls = get_filesystem_class(protocol) + extra_kwargs = cls._get_kwargs_from_urls(bit) + kws = kwargs.pop(protocol, {}) + + if bit is bits[0]: + kws.update(kwargs) + + kw = dict( + **{k: v for k, v in extra_kwargs.items() if k not in kws or v != kws[k]}, + **kws, + ) + bit = cls._strip_protocol(bit) + + if ( + "target_protocol" not in kw + and issubclass(cls, ChainedFileSystem) + and not bit + ): + # replace bit if we are chaining and no path given + bit = previous_bit + + out.append((bit, protocol, kw)) + previous_bit = bit + + out.reverse() + return out + + +def url_to_fs(url, **kwargs): + """ + Turn fully-qualified and potentially chained URL into filesystem instance + + Parameters + ---------- + url : str + The fsspec-compatible URL + **kwargs: dict + Extra options that make sense to a particular storage connection, e.g. + host, port, username, password, etc. + + Returns + ------- + filesystem : FileSystem + The new filesystem discovered from ``url`` and created with + ``**kwargs``. + urlpath : str + The file-systems-specific URL for ``url``. + """ + url = stringify_path(url) + # non-FS arguments that appear in fsspec.open() + # inspect could keep this in sync with open()'s signature + known_kwargs = { + "compression", + "encoding", + "errors", + "expand", + "mode", + "name_function", + "newline", + "num", + } + kwargs = {k: v for k, v in kwargs.items() if k not in known_kwargs} + chain = _un_chain(url, kwargs) + inkwargs = {} + # Reverse iterate the chain, creating a nested target_* structure + for i, ch in enumerate(reversed(chain)): + urls, protocol, kw = ch + if i == len(chain) - 1: + inkwargs = dict(**kw, **inkwargs) + continue + inkwargs["target_options"] = dict(**kw, **inkwargs) + inkwargs["target_protocol"] = protocol + inkwargs["fo"] = urls + urlpath, protocol, _ = chain[0] + fs = filesystem(protocol, **inkwargs) + return fs, urlpath + + +DEFAULT_EXPAND = conf.get("open_expand", False) + + +def open( + urlpath, + mode="rb", + compression=None, + encoding="utf8", + errors=None, + protocol=None, + newline=None, + expand=None, + **kwargs, +): + """Given a path or paths, return one ``OpenFile`` object. + + Parameters + ---------- + urlpath: string or list + Absolute or relative filepath. Prefix with a protocol like ``s3://`` + to read from alternative filesystems. Should not include glob + character(s). + mode: 'rb', 'wt', etc. + compression: string or None + If given, open file using compression codec. Can either be a compression + name (a key in ``fsspec.compression.compr``) or "infer" to guess the + compression from the filename suffix. + encoding: str + For text mode only + errors: None or str + Passed to TextIOWrapper in text mode + protocol: str or None + If given, overrides the protocol found in the URL. + newline: bytes or None + Used for line terminator in text mode. If None, uses system default; + if blank, uses no translation. + expand: bool or None + Whether to regard file paths containing special glob characters as needing + expansion (finding the first match) or absolute. Setting False allows using + paths which do embed such characters. If None (default), this argument + takes its value from the DEFAULT_EXPAND module variable, which takes + its initial value from the "open_expand" config value at startup, which will + be False if not set. + **kwargs: dict + Extra options that make sense to a particular storage connection, e.g. + host, port, username, password, etc. + + Examples + -------- + >>> openfile = open('2015-01-01.csv') # doctest: +SKIP + >>> openfile = open( + ... 's3://bucket/2015-01-01.csv.gz', compression='gzip' + ... ) # doctest: +SKIP + >>> with openfile as f: + ... df = pd.read_csv(f) # doctest: +SKIP + ... + + Returns + ------- + ``OpenFile`` object. + + Notes + ----- + For a full list of the available protocols and the implementations that + they map across to see the latest online documentation: + + - For implementations built into ``fsspec`` see + https://filesystem-spec.readthedocs.io/en/latest/api.html#built-in-implementations + - For implementations in separate packages see + https://filesystem-spec.readthedocs.io/en/latest/api.html#other-known-implementations + """ + expand = DEFAULT_EXPAND if expand is None else expand + out = open_files( + urlpath=[urlpath], + mode=mode, + compression=compression, + encoding=encoding, + errors=errors, + protocol=protocol, + newline=newline, + expand=expand, + **kwargs, + ) + if not out: + raise FileNotFoundError(urlpath) + return out[0] + + +def open_local( + url: str | list[str] | Path | list[Path], + mode: str = "rb", + **storage_options: dict, +) -> str | list[str]: + """Open file(s) which can be resolved to local + + For files which either are local, or get downloaded upon open + (e.g., by file caching) + + Parameters + ---------- + url: str or list(str) + mode: str + Must be read mode + storage_options: + passed on to FS for or used by open_files (e.g., compression) + """ + if "r" not in mode: + raise ValueError("Can only ensure local files when reading") + of = open_files(url, mode=mode, **storage_options) + if not getattr(of[0].fs, "local_file", False): + raise ValueError( + "open_local can only be used on a filesystem which" + " has attribute local_file=True" + ) + with of as files: + paths = [f.name for f in files] + if (isinstance(url, str) and not has_magic(url)) or isinstance(url, Path): + return paths[0] + return paths + + +def get_compression(urlpath, compression): + if compression == "infer": + compression = infer_compression(urlpath) + if compression is not None and compression not in compr: + raise ValueError(f"Compression type {compression} not supported") + return compression + + +def split_protocol(urlpath): + """Return protocol, path pair""" + urlpath = stringify_path(urlpath) + if "://" in urlpath: + protocol, path = urlpath.split("://", 1) + if len(protocol) > 1: + # excludes Windows paths + return protocol, path + if urlpath.startswith("data:"): + return urlpath.split(":", 1) + return None, urlpath + + +def strip_protocol(urlpath): + """Return only path part of full URL, according to appropriate backend""" + protocol, _ = split_protocol(urlpath) + cls = get_filesystem_class(protocol) + return cls._strip_protocol(urlpath) + + +def expand_paths_if_needed(paths, mode, num, fs, name_function): + """Expand paths if they have a ``*`` in them (write mode) or any of ``*?[]`` + in them (read mode). + + :param paths: list of paths + mode: str + Mode in which to open files. + num: int + If opening in writing mode, number of files we expect to create. + fs: filesystem object + name_function: callable + If opening in writing mode, this callable is used to generate path + names. Names are generated for each partition by + ``urlpath.replace('*', name_function(partition_index))``. + :return: list of paths + """ + expanded_paths = [] + paths = list(paths) + + if "w" in mode: # read mode + if sum(1 for p in paths if "*" in p) > 1: + raise ValueError( + "When writing data, only one filename mask can be specified." + ) + num = max(num, len(paths)) + + for curr_path in paths: + if "*" in curr_path: + # expand using name_function + expanded_paths.extend(_expand_paths(curr_path, name_function, num)) + else: + expanded_paths.append(curr_path) + # if we generated more paths that asked for, trim the list + if len(expanded_paths) > num: + expanded_paths = expanded_paths[:num] + + else: # read mode + for curr_path in paths: + if has_magic(curr_path): + # expand using glob + expanded_paths.extend(fs.glob(curr_path)) + else: + expanded_paths.append(curr_path) + + return expanded_paths + + +def get_fs_token_paths( + urlpath, + mode="rb", + num=1, + name_function=None, + storage_options=None, + protocol=None, + expand=True, +): + """Filesystem, deterministic token, and paths from a urlpath and options. + + Parameters + ---------- + urlpath: string or iterable + Absolute or relative filepath, URL (may include protocols like + ``s3://``), or globstring pointing to data. + mode: str, optional + Mode in which to open files. + num: int, optional + If opening in writing mode, number of files we expect to create. + name_function: callable, optional + If opening in writing mode, this callable is used to generate path + names. Names are generated for each partition by + ``urlpath.replace('*', name_function(partition_index))``. + storage_options: dict, optional + Additional keywords to pass to the filesystem class. + protocol: str or None + To override the protocol specifier in the URL + expand: bool + Expand string paths for writing, assuming the path is a directory + """ + if isinstance(urlpath, (list, tuple, set)): + if not urlpath: + raise ValueError("empty urlpath sequence") + urlpath0 = stringify_path(next(iter(urlpath))) + else: + urlpath0 = stringify_path(urlpath) + storage_options = storage_options or {} + if protocol: + storage_options["protocol"] = protocol + chain = _un_chain(urlpath0, storage_options or {}) + inkwargs = {} + # Reverse iterate the chain, creating a nested target_* structure + for i, ch in enumerate(reversed(chain)): + urls, nested_protocol, kw = ch + if i == len(chain) - 1: + inkwargs = dict(**kw, **inkwargs) + continue + inkwargs["target_options"] = dict(**kw, **inkwargs) + inkwargs["target_protocol"] = nested_protocol + inkwargs["fo"] = urls + paths, protocol, _ = chain[0] + fs = filesystem(protocol, **inkwargs) + if isinstance(urlpath, (list, tuple, set)): + pchains = [ + _un_chain(stringify_path(u), storage_options or {})[0] for u in urlpath + ] + if len({pc[1] for pc in pchains}) > 1: + raise ValueError("Protocol mismatch getting fs from %s", urlpath) + paths = [pc[0] for pc in pchains] + else: + paths = fs._strip_protocol(paths) + if isinstance(paths, (list, tuple, set)): + if expand: + paths = expand_paths_if_needed(paths, mode, num, fs, name_function) + elif not isinstance(paths, list): + paths = list(paths) + else: + if ("w" in mode or "x" in mode) and expand: + paths = _expand_paths(paths, name_function, num) + elif "*" in paths: + paths = [f for f in sorted(fs.glob(paths)) if not fs.isdir(f)] + else: + paths = [paths] + + return fs, fs._fs_token, paths + + +def _expand_paths(path, name_function, num): + if isinstance(path, str): + if path.count("*") > 1: + raise ValueError("Output path spec must contain exactly one '*'.") + elif "*" not in path: + path = os.path.join(path, "*.part") + + if name_function is None: + name_function = build_name_function(num - 1) + + paths = [path.replace("*", name_function(i)) for i in range(num)] + if paths != sorted(paths): + logger.warning( + "In order to preserve order between partitions" + " paths created with ``name_function`` should " + "sort to partition order" + ) + elif isinstance(path, (tuple, list)): + assert len(path) == num + paths = list(path) + else: + raise ValueError( + "Path should be either\n" + "1. A list of paths: ['foo.json', 'bar.json', ...]\n" + "2. A directory: 'foo/\n" + "3. A path with a '*' in it: 'foo.*.json'" + ) + return paths + + +class PickleableTextIOWrapper(io.TextIOWrapper): + """TextIOWrapper cannot be pickled. This solves it. + + Requires that ``buffer`` be pickleable, which all instances of + AbstractBufferedFile are. + """ + + def __init__( + self, + buffer, + encoding=None, + errors=None, + newline=None, + line_buffering=False, + write_through=False, + ): + self.args = buffer, encoding, errors, newline, line_buffering, write_through + super().__init__(*self.args) + + def __reduce__(self): + return PickleableTextIOWrapper, self.args diff --git a/venv/lib/python3.10/site-packages/fsspec/dircache.py b/venv/lib/python3.10/site-packages/fsspec/dircache.py new file mode 100644 index 0000000000000000000000000000000000000000..eca19566b135e5a7a4f6e7407d56411ec58bfe44 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/dircache.py @@ -0,0 +1,98 @@ +import time +from collections.abc import MutableMapping +from functools import lru_cache + + +class DirCache(MutableMapping): + """ + Caching of directory listings, in a structure like:: + + {"path0": [ + {"name": "path0/file0", + "size": 123, + "type": "file", + ... + }, + {"name": "path0/file1", + }, + ... + ], + "path1": [...] + } + + Parameters to this class control listing expiry or indeed turn + caching off + """ + + def __init__( + self, + use_listings_cache=True, + listings_expiry_time=None, + max_paths=None, + **kwargs, + ): + """ + + Parameters + ---------- + use_listings_cache: bool + If False, this cache never returns items, but always reports KeyError, + and setting items has no effect + listings_expiry_time: int or float (optional) + Time in seconds that a listing is considered valid. If None, + listings do not expire. + max_paths: int (optional) + The number of most recent listings that are considered valid; 'recent' + refers to when the entry was set. + """ + self._cache = {} + self._times = {} + if max_paths: + self._q = lru_cache(max_paths + 1)(lambda key: self._cache.pop(key, None)) + self.use_listings_cache = use_listings_cache + self.listings_expiry_time = listings_expiry_time + self.max_paths = max_paths + + def __getitem__(self, item): + if self.listings_expiry_time is not None: + if self._times.get(item, 0) - time.time() < -self.listings_expiry_time: + del self._cache[item] + if self.max_paths: + self._q(item) + return self._cache[item] # maybe raises KeyError + + def clear(self): + self._cache.clear() + + def __len__(self): + return len(self._cache) + + def __contains__(self, item): + try: + self[item] + return True + except KeyError: + return False + + def __setitem__(self, key, value): + if not self.use_listings_cache: + return + if self.max_paths: + self._q(key) + self._cache[key] = value + if self.listings_expiry_time is not None: + self._times[key] = time.time() + + def __delitem__(self, key): + del self._cache[key] + + def __iter__(self): + entries = list(self._cache) + + return (k for k in entries if k in self) + + def __reduce__(self): + return ( + DirCache, + (self.use_listings_cache, self.listings_expiry_time, self.max_paths), + ) diff --git a/venv/lib/python3.10/site-packages/fsspec/exceptions.py b/venv/lib/python3.10/site-packages/fsspec/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..ae8905475f02655f4fc5863931d99ca9da55db78 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/exceptions.py @@ -0,0 +1,18 @@ +""" +fsspec user-defined exception classes +""" + +import asyncio + + +class BlocksizeMismatchError(ValueError): + """ + Raised when a cached file is opened with a different blocksize than it was + written with + """ + + +class FSTimeoutError(asyncio.TimeoutError): + """ + Raised when a fsspec function timed out occurs + """ diff --git a/venv/lib/python3.10/site-packages/fsspec/fuse.py b/venv/lib/python3.10/site-packages/fsspec/fuse.py new file mode 100644 index 0000000000000000000000000000000000000000..566d520fce3e94e3bbaee48c3c6acc9f1db315a8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/fuse.py @@ -0,0 +1,324 @@ +import argparse +import logging +import os +import stat +import threading +import time +from errno import EIO, ENOENT + +from fuse import FUSE, FuseOSError, LoggingMixIn, Operations + +from fsspec import __version__ +from fsspec.core import url_to_fs + +logger = logging.getLogger("fsspec.fuse") + + +class FUSEr(Operations): + def __init__(self, fs, path, ready_file=False): + self.fs = fs + self.cache = {} + self.root = path.rstrip("/") + "/" + self.counter = 0 + logger.info("Starting FUSE at %s", path) + self._ready_file = ready_file + + def getattr(self, path, fh=None): + logger.debug("getattr %s", path) + if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]: + return {"type": "file", "st_size": 5} + + path = "".join([self.root, path.lstrip("/")]).rstrip("/") + try: + info = self.fs.info(path) + except FileNotFoundError as exc: + raise FuseOSError(ENOENT) from exc + + data = {"st_uid": info.get("uid", 1000), "st_gid": info.get("gid", 1000)} + perm = info.get("mode", 0o777) + + if info["type"] != "file": + data["st_mode"] = stat.S_IFDIR | perm + data["st_size"] = 0 + data["st_blksize"] = 0 + else: + data["st_mode"] = stat.S_IFREG | perm + data["st_size"] = info["size"] + data["st_blksize"] = 5 * 2**20 + data["st_nlink"] = 1 + data["st_atime"] = info["atime"] if "atime" in info else time.time() + data["st_ctime"] = info["ctime"] if "ctime" in info else time.time() + data["st_mtime"] = info["mtime"] if "mtime" in info else time.time() + return data + + def readdir(self, path, fh): + logger.debug("readdir %s", path) + path = "".join([self.root, path.lstrip("/")]) + files = self.fs.ls(path, False) + files = [os.path.basename(f.rstrip("/")) for f in files] + return [".", ".."] + files + + def mkdir(self, path, mode): + path = "".join([self.root, path.lstrip("/")]) + self.fs.mkdir(path) + return 0 + + def rmdir(self, path): + path = "".join([self.root, path.lstrip("/")]) + self.fs.rmdir(path) + return 0 + + def read(self, path, size, offset, fh): + logger.debug("read %s", (path, size, offset)) + if self._ready_file and path in ["/.fuse_ready", ".fuse_ready"]: + # status indicator + return b"ready" + + f = self.cache[fh] + f.seek(offset) + out = f.read(size) + return out + + def write(self, path, data, offset, fh): + logger.debug("write %s", (path, offset)) + f = self.cache[fh] + f.seek(offset) + f.write(data) + return len(data) + + def create(self, path, flags, fi=None): + logger.debug("create %s", (path, flags)) + fn = "".join([self.root, path.lstrip("/")]) + self.fs.touch(fn) # OS will want to get attributes immediately + f = self.fs.open(fn, "wb") + self.cache[self.counter] = f + self.counter += 1 + return self.counter - 1 + + def open(self, path, flags): + logger.debug("open %s", (path, flags)) + fn = "".join([self.root, path.lstrip("/")]) + if flags % 2 == 0: + # read + mode = "rb" + else: + # write/create + mode = "wb" + self.cache[self.counter] = self.fs.open(fn, mode) + self.counter += 1 + return self.counter - 1 + + def truncate(self, path, length, fh=None): + fn = "".join([self.root, path.lstrip("/")]) + if length != 0: + raise NotImplementedError + # maybe should be no-op since open with write sets size to zero anyway + self.fs.touch(fn) + + def unlink(self, path): + fn = "".join([self.root, path.lstrip("/")]) + try: + self.fs.rm(fn, False) + except (OSError, FileNotFoundError) as exc: + raise FuseOSError(EIO) from exc + + def release(self, path, fh): + try: + if fh in self.cache: + f = self.cache[fh] + f.close() + self.cache.pop(fh) + except Exception as e: + print(e) + return 0 + + def chmod(self, path, mode): + if hasattr(self.fs, "chmod"): + path = "".join([self.root, path.lstrip("/")]) + return self.fs.chmod(path, mode) + raise NotImplementedError + + +def run( + fs, + path, + mount_point, + foreground=True, + threads=False, + ready_file=False, + ops_class=FUSEr, +): + """Mount stuff in a local directory + + This uses fusepy to make it appear as if a given path on an fsspec + instance is in fact resident within the local file-system. + + This requires that fusepy by installed, and that FUSE be available on + the system (typically requiring a package to be installed with + apt, yum, brew, etc.). + + Parameters + ---------- + fs: file-system instance + From one of the compatible implementations + path: str + Location on that file-system to regard as the root directory to + mount. Note that you typically should include the terminating "/" + character. + mount_point: str + An empty directory on the local file-system where the contents of + the remote path will appear. + foreground: bool + Whether or not calling this function will block. Operation will + typically be more stable if True. + threads: bool + Whether or not to create threads when responding to file operations + within the mounter directory. Operation will typically be more + stable if False. + ready_file: bool + Whether the FUSE process is ready. The ``.fuse_ready`` file will + exist in the ``mount_point`` directory if True. Debugging purpose. + ops_class: FUSEr or Subclass of FUSEr + To override the default behavior of FUSEr. For Example, logging + to file. + + """ + func = lambda: FUSE( + ops_class(fs, path, ready_file=ready_file), + mount_point, + nothreads=not threads, + foreground=foreground, + ) + if not foreground: + th = threading.Thread(target=func) + th.daemon = True + th.start() + return th + else: # pragma: no cover + try: + func() + except KeyboardInterrupt: + pass + + +def main(args): + """Mount filesystem from chained URL to MOUNT_POINT. + + Examples: + + python3 -m fsspec.fuse memory /usr/share /tmp/mem + + python3 -m fsspec.fuse local /tmp/source /tmp/local \\ + -l /tmp/fsspecfuse.log + + You can also mount chained-URLs and use special settings: + + python3 -m fsspec.fuse 'filecache::zip::file://data.zip' \\ + / /tmp/zip \\ + -o 'filecache-cache_storage=/tmp/simplecache' + + You can specify the type of the setting by using `[int]` or `[bool]`, + (`true`, `yes`, `1` represents the Boolean value `True`): + + python3 -m fsspec.fuse 'simplecache::ftp://ftp1.at.proftpd.org' \\ + /historic/packages/RPMS /tmp/ftp \\ + -o 'simplecache-cache_storage=/tmp/simplecache' \\ + -o 'simplecache-check_files=false[bool]' \\ + -o 'ftp-listings_expiry_time=60[int]' \\ + -o 'ftp-username=anonymous' \\ + -o 'ftp-password=xieyanbo' + """ + + class RawDescriptionArgumentParser(argparse.ArgumentParser): + def format_help(self): + usage = super().format_help() + parts = usage.split("\n\n") + parts[1] = self.description.rstrip() + return "\n\n".join(parts) + + parser = RawDescriptionArgumentParser(prog="fsspec.fuse", description=main.__doc__) + parser.add_argument("--version", action="version", version=__version__) + parser.add_argument("url", type=str, help="fs url") + parser.add_argument("source_path", type=str, help="source directory in fs") + parser.add_argument("mount_point", type=str, help="local directory") + parser.add_argument( + "-o", + "--option", + action="append", + help="Any options of protocol included in the chained URL", + ) + parser.add_argument( + "-l", "--log-file", type=str, help="Logging FUSE debug info (Default: '')" + ) + parser.add_argument( + "-f", + "--foreground", + action="store_false", + help="Running in foreground or not (Default: False)", + ) + parser.add_argument( + "-t", + "--threads", + action="store_false", + help="Running with threads support (Default: False)", + ) + parser.add_argument( + "-r", + "--ready-file", + action="store_false", + help="The `.fuse_ready` file will exist after FUSE is ready. " + "(Debugging purpose, Default: False)", + ) + args = parser.parse_args(args) + + kwargs = {} + for item in args.option or []: + key, sep, value = item.partition("=") + if not sep: + parser.error(message=f"Wrong option: {item!r}") + val = value.lower() + if val.endswith("[int]"): + value = int(value[: -len("[int]")]) + elif val.endswith("[bool]"): + value = val[: -len("[bool]")] in ["1", "yes", "true"] + + if "-" in key: + fs_name, setting_name = key.split("-", 1) + if fs_name in kwargs: + kwargs[fs_name][setting_name] = value + else: + kwargs[fs_name] = {setting_name: value} + else: + kwargs[key] = value + + if args.log_file: + logging.basicConfig( + level=logging.DEBUG, + filename=args.log_file, + format="%(asctime)s %(message)s", + ) + + class LoggingFUSEr(FUSEr, LoggingMixIn): + pass + + fuser = LoggingFUSEr + else: + fuser = FUSEr + + fs, url_path = url_to_fs(args.url, **kwargs) + logger.debug("Mounting %s to %s", url_path, str(args.mount_point)) + run( + fs, + args.source_path, + args.mount_point, + foreground=args.foreground, + threads=args.threads, + ready_file=args.ready_file, + ops_class=fuser, + ) + + +if __name__ == "__main__": + import sys + + main(sys.argv[1:]) diff --git a/venv/lib/python3.10/site-packages/fsspec/generic.py b/venv/lib/python3.10/site-packages/fsspec/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..0a641b0e2bcf70729a44064319eecb3647450379 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/generic.py @@ -0,0 +1,396 @@ +from __future__ import annotations + +import inspect +import logging +import os +import shutil +import uuid + +from .asyn import AsyncFileSystem, _run_coros_in_chunks, sync_wrapper +from .callbacks import DEFAULT_CALLBACK +from .core import filesystem, get_filesystem_class, split_protocol, url_to_fs + +_generic_fs = {} +logger = logging.getLogger("fsspec.generic") + + +def set_generic_fs(protocol, **storage_options): + """Populate the dict used for method=="generic" lookups""" + _generic_fs[protocol] = filesystem(protocol, **storage_options) + + +def _resolve_fs(url, method, protocol=None, storage_options=None): + """Pick instance of backend FS""" + url = url[0] if isinstance(url, (list, tuple)) else url + protocol = protocol or split_protocol(url)[0] + storage_options = storage_options or {} + if method == "default": + return filesystem(protocol) + if method == "generic": + return _generic_fs[protocol] + if method == "current": + cls = get_filesystem_class(protocol) + return cls.current() + if method == "options": + fs, _ = url_to_fs(url, **storage_options.get(protocol, {})) + return fs + raise ValueError(f"Unknown FS resolution method: {method}") + + +def rsync( + source, + destination, + delete_missing=False, + source_field="size", + dest_field="size", + update_cond="different", + inst_kwargs=None, + fs=None, + **kwargs, +): + """Sync files between two directory trees + + (experimental) + + Parameters + ---------- + source: str + Root of the directory tree to take files from. This must be a directory, but + do not include any terminating "/" character + destination: str + Root path to copy into. The contents of this location should be + identical to the contents of ``source`` when done. This will be made a + directory, and the terminal "/" should not be included. + delete_missing: bool + If there are paths in the destination that don't exist in the + source and this is True, delete them. Otherwise, leave them alone. + source_field: str | callable + If ``update_field`` is "different", this is the key in the info + of source files to consider for difference. Maybe a function of the + info dict. + dest_field: str | callable + If ``update_field`` is "different", this is the key in the info + of destination files to consider for difference. May be a function of + the info dict. + update_cond: "different"|"always"|"never" + If "always", every file is copied, regardless of whether it exists in + the destination. If "never", files that exist in the destination are + not copied again. If "different" (default), only copy if the info + fields given by ``source_field`` and ``dest_field`` (usually "size") + are different. Other comparisons may be added in the future. + inst_kwargs: dict|None + If ``fs`` is None, use this set of keyword arguments to make a + GenericFileSystem instance + fs: GenericFileSystem|None + Instance to use if explicitly given. The instance defines how to + to make downstream file system instances from paths. + + Returns + ------- + dict of the copy operations that were performed, {source: destination} + """ + fs = fs or GenericFileSystem(**(inst_kwargs or {})) + source = fs._strip_protocol(source) + destination = fs._strip_protocol(destination) + allfiles = fs.find(source, withdirs=True, detail=True) + if not fs.isdir(source): + raise ValueError("Can only rsync on a directory") + otherfiles = fs.find(destination, withdirs=True, detail=True) + dirs = [ + a + for a, v in allfiles.items() + if v["type"] == "directory" and a.replace(source, destination) not in otherfiles + ] + logger.debug(f"{len(dirs)} directories to create") + if dirs: + fs.make_many_dirs( + [dirn.replace(source, destination) for dirn in dirs], exist_ok=True + ) + allfiles = {a: v for a, v in allfiles.items() if v["type"] == "file"} + logger.debug(f"{len(allfiles)} files to consider for copy") + to_delete = [ + o + for o, v in otherfiles.items() + if o.replace(destination, source) not in allfiles and v["type"] == "file" + ] + for k, v in allfiles.copy().items(): + otherfile = k.replace(source, destination) + if otherfile in otherfiles: + if update_cond == "always": + allfiles[k] = otherfile + elif update_cond == "never": + allfiles.pop(k) + elif update_cond == "different": + inf1 = source_field(v) if callable(source_field) else v[source_field] + v2 = otherfiles[otherfile] + inf2 = dest_field(v2) if callable(dest_field) else v2[dest_field] + if inf1 != inf2: + # details mismatch, make copy + allfiles[k] = otherfile + else: + # details match, don't copy + allfiles.pop(k) + else: + # file not in target yet + allfiles[k] = otherfile + logger.debug(f"{len(allfiles)} files to copy") + if allfiles: + source_files, target_files = zip(*allfiles.items()) + fs.cp(source_files, target_files, **kwargs) + logger.debug(f"{len(to_delete)} files to delete") + if delete_missing and to_delete: + fs.rm(to_delete) + return allfiles + + +class GenericFileSystem(AsyncFileSystem): + """Wrapper over all other FS types + + + + This implementation is a single unified interface to be able to run FS operations + over generic URLs, and dispatch to the specific implementations using the URL + protocol prefix. + + Note: instances of this FS are always async, even if you never use it with any async + backend. + """ + + protocol = "generic" # there is no real reason to ever use a protocol with this FS + + def __init__(self, default_method="default", storage_options=None, **kwargs): + """ + + Parameters + ---------- + default_method: str (optional) + Defines how to configure backend FS instances. Options are: + - "default": instantiate like FSClass(), with no + extra arguments; this is the default instance of that FS, and can be + configured via the config system + - "generic": takes instances from the `_generic_fs` dict in this module, + which you must populate before use. Keys are by protocol + - "options": expects storage_options, a dict mapping protocol to + kwargs to use when constructing the filesystem + - "current": takes the most recently instantiated version of each FS + """ + self.method = default_method + self.st_opts = storage_options + super().__init__(**kwargs) + + def _parent(self, path): + fs = _resolve_fs(path, self.method, storage_options=self.st_opts) + return fs.unstrip_protocol(fs._parent(path)) + + def _strip_protocol(self, path): + # normalization only + fs = _resolve_fs(path, self.method, storage_options=self.st_opts) + return fs.unstrip_protocol(fs._strip_protocol(path)) + + async def _find(self, path, maxdepth=None, withdirs=False, detail=False, **kwargs): + fs = _resolve_fs(path, self.method, storage_options=self.st_opts) + if fs.async_impl: + out = await fs._find( + path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs + ) + else: + out = fs.find( + path, maxdepth=maxdepth, withdirs=withdirs, detail=True, **kwargs + ) + result = {} + for k, v in out.items(): + v = v.copy() # don't corrupt target FS dircache + name = fs.unstrip_protocol(k) + v["name"] = name + result[name] = v + if detail: + return result + return list(result) + + async def _info(self, url, **kwargs): + fs = _resolve_fs(url, self.method) + if fs.async_impl: + out = await fs._info(url, **kwargs) + else: + out = fs.info(url, **kwargs) + out = out.copy() # don't edit originals + out["name"] = fs.unstrip_protocol(out["name"]) + return out + + async def _ls( + self, + url, + detail=True, + **kwargs, + ): + fs = _resolve_fs(url, self.method) + if fs.async_impl: + out = await fs._ls(url, detail=True, **kwargs) + else: + out = fs.ls(url, detail=True, **kwargs) + out = [o.copy() for o in out] # don't edit originals + for o in out: + o["name"] = fs.unstrip_protocol(o["name"]) + if detail: + return out + else: + return [o["name"] for o in out] + + async def _cat_file( + self, + url, + **kwargs, + ): + fs = _resolve_fs(url, self.method) + if fs.async_impl: + return await fs._cat_file(url, **kwargs) + else: + return fs.cat_file(url, **kwargs) + + async def _pipe_file( + self, + path, + value, + **kwargs, + ): + fs = _resolve_fs(path, self.method, storage_options=self.st_opts) + if fs.async_impl: + return await fs._pipe_file(path, value, **kwargs) + else: + return fs.pipe_file(path, value, **kwargs) + + async def _rm(self, url, **kwargs): + urls = url + if isinstance(urls, str): + urls = [urls] + fs = _resolve_fs(urls[0], self.method) + if fs.async_impl: + await fs._rm(urls, **kwargs) + else: + fs.rm(url, **kwargs) + + async def _makedirs(self, path, exist_ok=False): + logger.debug("Make dir %s", path) + fs = _resolve_fs(path, self.method, storage_options=self.st_opts) + if fs.async_impl: + await fs._makedirs(path, exist_ok=exist_ok) + else: + fs.makedirs(path, exist_ok=exist_ok) + + def rsync(self, source, destination, **kwargs): + """Sync files between two directory trees + + See `func:rsync` for more details. + """ + rsync(source, destination, fs=self, **kwargs) + + async def _cp_file( + self, + url, + url2, + blocksize=2**20, + callback=DEFAULT_CALLBACK, + tempdir: str | None = None, + **kwargs, + ): + fs = _resolve_fs(url, self.method) + fs2 = _resolve_fs(url2, self.method) + if fs is fs2: + # pure remote + if fs.async_impl: + return await fs._copy(url, url2, **kwargs) + else: + return fs.copy(url, url2, **kwargs) + await copy_file_op(fs, [url], fs2, [url2], tempdir, 1, on_error="raise") + + async def _make_many_dirs(self, urls, exist_ok=True): + fs = _resolve_fs(urls[0], self.method) + if fs.async_impl: + coros = [fs._makedirs(u, exist_ok=exist_ok) for u in urls] + await _run_coros_in_chunks(coros) + else: + for u in urls: + fs.makedirs(u, exist_ok=exist_ok) + + make_many_dirs = sync_wrapper(_make_many_dirs) + + async def _copy( + self, + path1: list[str], + path2: list[str], + recursive: bool = False, + on_error: str = "ignore", + maxdepth: int | None = None, + batch_size: int | None = None, + tempdir: str | None = None, + **kwargs, + ): + # TODO: special case for one FS being local, which can use get/put + # TODO: special case for one being memFS, which can use cat/pipe + if recursive: + raise NotImplementedError("Please use fsspec.generic.rsync") + path1 = [path1] if isinstance(path1, str) else path1 + path2 = [path2] if isinstance(path2, str) else path2 + + fs = _resolve_fs(path1, self.method) + fs2 = _resolve_fs(path2, self.method) + + if fs is fs2: + if fs.async_impl: + return await fs._copy(path1, path2, **kwargs) + else: + return fs.copy(path1, path2, **kwargs) + + await copy_file_op( + fs, path1, fs2, path2, tempdir, batch_size, on_error=on_error + ) + + +async def copy_file_op( + fs1, url1, fs2, url2, tempdir=None, batch_size=20, on_error="ignore" +): + import tempfile + + tempdir = tempdir or tempfile.mkdtemp() + try: + coros = [ + _copy_file_op( + fs1, + u1, + fs2, + u2, + os.path.join(tempdir, uuid.uuid4().hex), + ) + for u1, u2 in zip(url1, url2) + ] + out = await _run_coros_in_chunks( + coros, batch_size=batch_size, return_exceptions=True + ) + finally: + shutil.rmtree(tempdir) + if on_error == "return": + return out + elif on_error == "raise": + for o in out: + if isinstance(o, Exception): + raise o + + +async def _copy_file_op(fs1, url1, fs2, url2, local, on_error="ignore"): + if fs1.async_impl: + await fs1._get_file(url1, local) + else: + fs1.get_file(url1, local) + if fs2.async_impl: + await fs2._put_file(local, url2) + else: + fs2.put_file(local, url2) + os.unlink(local) + logger.debug("Copy %s -> %s; done", url1, url2) + + +async def maybe_await(cor): + if inspect.iscoroutine(cor): + return await cor + else: + return cor diff --git a/venv/lib/python3.10/site-packages/fsspec/gui.py b/venv/lib/python3.10/site-packages/fsspec/gui.py new file mode 100644 index 0000000000000000000000000000000000000000..9d914c8beb6cabb2c2700eb8eee31028559be2bd --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/gui.py @@ -0,0 +1,417 @@ +import ast +import contextlib +import logging +import os +import re +from collections.abc import Sequence +from typing import ClassVar + +import panel as pn + +from .core import OpenFile, get_filesystem_class, split_protocol +from .registry import known_implementations + +pn.extension() +logger = logging.getLogger("fsspec.gui") + + +class SigSlot: + """Signal-slot mixin, for Panel event passing + + Include this class in a widget manager's superclasses to be able to + register events and callbacks on Panel widgets managed by that class. + + The method ``_register`` should be called as widgets are added, and external + code should call ``connect`` to associate callbacks. + + By default, all signals emit a DEBUG logging statement. + """ + + # names of signals that this class may emit each of which must be + # set by _register for any new instance + signals: ClassVar[Sequence[str]] = [] + # names of actions that this class may respond to + slots: ClassVar[Sequence[str]] = [] + + # each of which must be a method name + + def __init__(self): + self._ignoring_events = False + self._sigs = {} + self._map = {} + self._setup() + + def _setup(self): + """Create GUI elements and register signals""" + self.panel = pn.pane.PaneBase() + # no signals to set up in the base class + + def _register( + self, widget, name, thing="value", log_level=logging.DEBUG, auto=False + ): + """Watch the given attribute of a widget and assign it a named event + + This is normally called at the time a widget is instantiated, in the + class which owns it. + + Parameters + ---------- + widget : pn.layout.Panel or None + Widget to watch. If None, an anonymous signal not associated with + any widget. + name : str + Name of this event + thing : str + Attribute of the given widget to watch + log_level : int + When the signal is triggered, a logging event of the given level + will be fired in the dfviz logger. + auto : bool + If True, automatically connects with a method in this class of the + same name. + """ + if name not in self.signals: + raise ValueError(f"Attempt to assign an undeclared signal: {name}") + self._sigs[name] = { + "widget": widget, + "callbacks": [], + "thing": thing, + "log": log_level, + } + wn = "-".join( + [ + getattr(widget, "name", str(widget)) if widget is not None else "none", + thing, + ] + ) + self._map[wn] = name + if widget is not None: + widget.param.watch(self._signal, thing, onlychanged=True) + if auto and hasattr(self, name): + self.connect(name, getattr(self, name)) + + def _repr_mimebundle_(self, *args, **kwargs): + """Display in a notebook or a server""" + try: + return self.panel._repr_mimebundle_(*args, **kwargs) + except (ValueError, AttributeError) as exc: + raise NotImplementedError( + "Panel does not seem to be set up properly" + ) from exc + + def connect(self, signal, slot): + """Associate call back with given event + + The callback must be a function which takes the "new" value of the + watched attribute as the only parameter. If the callback return False, + this cancels any further processing of the given event. + + Alternatively, the callback can be a string, in which case it means + emitting the correspondingly-named event (i.e., connect to self) + """ + self._sigs[signal]["callbacks"].append(slot) + + def _signal(self, event): + """This is called by a an action on a widget + + Within an self.ignore_events context, nothing happens. + + Tests can execute this method by directly changing the values of + widget components. + """ + if not self._ignoring_events: + wn = "-".join([event.obj.name, event.name]) + if wn in self._map and self._map[wn] in self._sigs: + self._emit(self._map[wn], event.new) + + @contextlib.contextmanager + def ignore_events(self): + """Temporarily turn off events processing in this instance + + (does not propagate to children) + """ + self._ignoring_events = True + try: + yield + finally: + self._ignoring_events = False + + def _emit(self, sig, value=None): + """An event happened, call its callbacks + + This method can be used in tests to simulate message passing without + directly changing visual elements. + + Calling of callbacks will halt whenever one returns False. + """ + logger.log(self._sigs[sig]["log"], f"{sig}: {value}") + for callback in self._sigs[sig]["callbacks"]: + if isinstance(callback, str): + self._emit(callback) + else: + try: + # running callbacks should not break the interface + ret = callback(value) + if ret is False: + break + except Exception as e: + logger.exception( + "Exception (%s) while executing callback for signal: %s", + e, + sig, + ) + + def show(self, threads=False): + """Open a new browser tab and display this instance's interface""" + self.panel.show(threads=threads, verbose=False) + return self + + +class SingleSelect(SigSlot): + """A multiselect which only allows you to select one item for an event""" + + signals = ["_selected", "selected"] # the first is internal + slots = ["set_options", "set_selection", "add", "clear", "select"] + + def __init__(self, **kwargs): + self.kwargs = kwargs + super().__init__() + + def _setup(self): + self.panel = pn.widgets.MultiSelect(**self.kwargs) + self._register(self.panel, "_selected", "value") + self._register(None, "selected") + self.connect("_selected", self.select_one) + + def _signal(self, *args, **kwargs): + super()._signal(*args, **kwargs) + + def select_one(self, *_): + with self.ignore_events(): + val = [self.panel.value[-1]] if self.panel.value else [] + self.panel.value = val + self._emit("selected", self.panel.value) + + def set_options(self, options): + self.panel.options = options + + def clear(self): + self.panel.options = [] + + @property + def value(self): + return self.panel.value + + def set_selection(self, selection): + self.panel.value = [selection] + + +class FileSelector(SigSlot): + """Panel-based graphical file selector widget + + Instances of this widget are interactive and can be displayed in jupyter by having + them as the output of a cell, or in a separate browser tab using ``.show()``. + """ + + signals = [ + "protocol_changed", + "selection_changed", + "directory_entered", + "home_clicked", + "up_clicked", + "go_clicked", + "filters_changed", + ] + slots = ["set_filters", "go_home"] + + def __init__(self, url=None, filters=None, ignore=None, kwargs=None): + """ + + Parameters + ---------- + url : str (optional) + Initial value of the URL to populate the dialog; should include protocol + filters : list(str) (optional) + File endings to include in the listings. If not included, all files are + allowed. Does not affect directories. + If given, the endings will appear as checkboxes in the interface + ignore : list(str) (optional) + Regex(s) of file basename patterns to ignore, e.g., "\\." for typical + hidden files on posix + kwargs : dict (optional) + To pass to file system instance + """ + if url: + self.init_protocol, url = split_protocol(url) + else: + self.init_protocol, url = "file", os.getcwd() + self.init_url = url + self.init_kwargs = (kwargs if isinstance(kwargs, str) else str(kwargs)) or "{}" + self.filters = filters + self.ignore = [re.compile(i) for i in ignore or []] + self._fs = None + super().__init__() + + def _setup(self): + self.url = pn.widgets.TextInput( + name="url", + value=self.init_url, + align="end", + sizing_mode="stretch_width", + width_policy="max", + ) + self.protocol = pn.widgets.Select( + options=sorted(known_implementations), + value=self.init_protocol, + name="protocol", + align="center", + ) + self.kwargs = pn.widgets.TextInput( + name="kwargs", value=self.init_kwargs, align="center" + ) + self.go = pn.widgets.Button(name="⇨", align="end", width=45) + self.main = SingleSelect(size=10) + self.home = pn.widgets.Button(name="🏠", width=40, height=30, align="end") + self.up = pn.widgets.Button(name="‹", width=30, height=30, align="end") + + self._register(self.protocol, "protocol_changed", auto=True) + self._register(self.go, "go_clicked", "clicks", auto=True) + self._register(self.up, "up_clicked", "clicks", auto=True) + self._register(self.home, "home_clicked", "clicks", auto=True) + self._register(None, "selection_changed") + self.main.connect("selected", self.selection_changed) + self._register(None, "directory_entered") + self.prev_protocol = self.protocol.value + self.prev_kwargs = self.storage_options + + self.filter_sel = pn.widgets.CheckBoxGroup( + value=[], options=[], inline=False, align="end", width_policy="min" + ) + self._register(self.filter_sel, "filters_changed", auto=True) + + self.panel = pn.Column( + pn.Row(self.protocol, self.kwargs), + pn.Row(self.home, self.up, self.url, self.go, self.filter_sel), + self.main.panel, + ) + self.set_filters(self.filters) + self.go_clicked() + + def set_filters(self, filters=None): + self.filters = filters + if filters: + self.filter_sel.options = filters + self.filter_sel.value = filters + else: + self.filter_sel.options = [] + self.filter_sel.value = [] + + @property + def storage_options(self): + """Value of the kwargs box as a dictionary""" + return ast.literal_eval(self.kwargs.value) or {} + + @property + def fs(self): + """Current filesystem instance""" + if self._fs is None: + cls = get_filesystem_class(self.protocol.value) + self._fs = cls(**self.storage_options) + return self._fs + + @property + def urlpath(self): + """URL of currently selected item""" + return ( + (f"{self.protocol.value}://{self.main.value[0]}") + if self.main.value + else None + ) + + def open_file(self, mode="rb", compression=None, encoding=None): + """Create OpenFile instance for the currently selected item + + For example, in a notebook you might do something like + + .. code-block:: + + [ ]: sel = FileSelector(); sel + + # user selects their file + + [ ]: with sel.open_file('rb') as f: + ... out = f.read() + + Parameters + ---------- + mode: str (optional) + Open mode for the file. + compression: str (optional) + The interact with the file as compressed. Set to 'infer' to guess + compression from the file ending + encoding: str (optional) + If using text mode, use this encoding; defaults to UTF8. + """ + if self.urlpath is None: + raise ValueError("No file selected") + return OpenFile(self.fs, self.urlpath, mode, compression, encoding) + + def filters_changed(self, values): + self.filters = values + self.go_clicked() + + def selection_changed(self, *_): + if self.urlpath is None: + return + if self.fs.isdir(self.urlpath): + self.url.value = self.fs._strip_protocol(self.urlpath) + self.go_clicked() + + def go_clicked(self, *_): + if ( + self.prev_protocol != self.protocol.value + or self.prev_kwargs != self.storage_options + ): + self._fs = None # causes fs to be recreated + self.prev_protocol = self.protocol.value + self.prev_kwargs = self.storage_options + listing = sorted( + self.fs.ls(self.url.value, detail=True), key=lambda x: x["name"] + ) + listing = [ + l + for l in listing + if not any(i.match(l["name"].rsplit("/", 1)[-1]) for i in self.ignore) + ] + folders = { + "📁 " + o["name"].rsplit("/", 1)[-1]: o["name"] + for o in listing + if o["type"] == "directory" + } + files = { + "📄 " + o["name"].rsplit("/", 1)[-1]: o["name"] + for o in listing + if o["type"] == "file" + } + if self.filters: + files = { + k: v + for k, v in files.items() + if any(v.endswith(ext) for ext in self.filters) + } + self.main.set_options(dict(**folders, **files)) + + def protocol_changed(self, *_): + self._fs = None + self.main.options = [] + self.url.value = "" + + def home_clicked(self, *_): + self.protocol.value = self.init_protocol + self.kwargs.value = self.init_kwargs + self.url.value = self.init_url + self.go_clicked() + + def up_clicked(self, *_): + self.url.value = self.fs._parent(self.url.value) + self.go_clicked() diff --git a/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/libarchive.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/libarchive.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..10813e3e550ffadb56dbacf8f3782c4da6545a73 Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/libarchive.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/local.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/local.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53f67d33d32c24d280bd80a3ec70a69596511571 Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/local.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/memory.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1437b611ef4e132ee5c6994d7b2192d4a61a14c6 Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/memory.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/reference.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/reference.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c9232768d268526ee6005562705cae46ea48fe3 Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/reference.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/sftp.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/sftp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be53b9fab5540a4dee8a119a9ac4ddaa096427a0 Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/sftp.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/smb.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/smb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b218321c0e8de9c9104a874d2619653c884972e4 Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/smb.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/tar.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/tar.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..441fbc9344995754894297b2ed135ef813bd5130 Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/tar.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5076b525869483b14ce781b31e7503e50b4d18ff Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/webhdfs.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/zip.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/zip.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..22adfc34ded3c7a48c243057e726c273dc5999af Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/implementations/__pycache__/zip.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/json.py b/venv/lib/python3.10/site-packages/fsspec/json.py new file mode 100644 index 0000000000000000000000000000000000000000..5c53a24913d0b28f4b53a163b97ff8f58abeb031 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/json.py @@ -0,0 +1,112 @@ +import json +from collections.abc import Callable, Mapping, Sequence +from contextlib import suppress +from pathlib import PurePath +from typing import Any, ClassVar + +from .registry import _import_class, get_filesystem_class +from .spec import AbstractFileSystem + + +class FilesystemJSONEncoder(json.JSONEncoder): + include_password: ClassVar[bool] = True + + def default(self, o: Any) -> Any: + if isinstance(o, AbstractFileSystem): + return o.to_dict(include_password=self.include_password) + if isinstance(o, PurePath): + cls = type(o) + return {"cls": f"{cls.__module__}.{cls.__name__}", "str": str(o)} + + return super().default(o) + + def make_serializable(self, obj: Any) -> Any: + """ + Recursively converts an object so that it can be JSON serialized via + :func:`json.dumps` and :func:`json.dump`, without actually calling + said functions. + """ + if isinstance(obj, (str, int, float, bool)): + return obj + if isinstance(obj, Mapping): + return {k: self.make_serializable(v) for k, v in obj.items()} + if isinstance(obj, Sequence): + return [self.make_serializable(v) for v in obj] + + return self.default(obj) + + +class FilesystemJSONDecoder(json.JSONDecoder): + def __init__( + self, + *, + object_hook: Callable[[dict[str, Any]], Any] | None = None, + parse_float: Callable[[str], Any] | None = None, + parse_int: Callable[[str], Any] | None = None, + parse_constant: Callable[[str], Any] | None = None, + strict: bool = True, + object_pairs_hook: Callable[[list[tuple[str, Any]]], Any] | None = None, + ) -> None: + self.original_object_hook = object_hook + + super().__init__( + object_hook=self.custom_object_hook, + parse_float=parse_float, + parse_int=parse_int, + parse_constant=parse_constant, + strict=strict, + object_pairs_hook=object_pairs_hook, + ) + + @classmethod + def try_resolve_path_cls(cls, dct: dict[str, Any]): + with suppress(Exception): + fqp = dct["cls"] + + path_cls = _import_class(fqp) + + if issubclass(path_cls, PurePath): + return path_cls + + return None + + @classmethod + def try_resolve_fs_cls(cls, dct: dict[str, Any]): + with suppress(Exception): + if "cls" in dct: + try: + fs_cls = _import_class(dct["cls"]) + if issubclass(fs_cls, AbstractFileSystem): + return fs_cls + except Exception: + if "protocol" in dct: # Fallback if cls cannot be imported + return get_filesystem_class(dct["protocol"]) + + raise + + return None + + def custom_object_hook(self, dct: dict[str, Any]): + if "cls" in dct: + if (obj_cls := self.try_resolve_fs_cls(dct)) is not None: + return AbstractFileSystem.from_dict(dct) + if (obj_cls := self.try_resolve_path_cls(dct)) is not None: + return obj_cls(dct["str"]) + + if self.original_object_hook is not None: + return self.original_object_hook(dct) + + return dct + + def unmake_serializable(self, obj: Any) -> Any: + """ + Inverse function of :meth:`FilesystemJSONEncoder.make_serializable`. + """ + if isinstance(obj, dict): + obj = self.custom_object_hook(obj) + if isinstance(obj, dict): + return {k: self.unmake_serializable(v) for k, v in obj.items()} + if isinstance(obj, (list, tuple)): + return [self.unmake_serializable(v) for v in obj] + + return obj diff --git a/venv/lib/python3.10/site-packages/fsspec/mapping.py b/venv/lib/python3.10/site-packages/fsspec/mapping.py new file mode 100644 index 0000000000000000000000000000000000000000..752eef35273b13eded7297e2e801b58e436a25b1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/mapping.py @@ -0,0 +1,251 @@ +import array +import logging +import posixpath +import warnings +from collections.abc import MutableMapping +from functools import cached_property + +from fsspec.core import url_to_fs + +logger = logging.getLogger("fsspec.mapping") + + +class FSMap(MutableMapping): + """Wrap a FileSystem instance as a mutable wrapping. + + The keys of the mapping become files under the given root, and the + values (which must be bytes) the contents of those files. + + Parameters + ---------- + root: string + prefix for all the files + fs: FileSystem instance + check: bool (=True) + performs a touch at the location, to check for write access. + + Examples + -------- + >>> fs = FileSystem(**parameters) # doctest: +SKIP + >>> d = FSMap('my-data/path/', fs) # doctest: +SKIP + or, more likely + >>> d = fs.get_mapper('my-data/path/') + + >>> d['loc1'] = b'Hello World' # doctest: +SKIP + >>> list(d.keys()) # doctest: +SKIP + ['loc1'] + >>> d['loc1'] # doctest: +SKIP + b'Hello World' + """ + + def __init__(self, root, fs, check=False, create=False, missing_exceptions=None): + self.fs = fs + self.root = fs._strip_protocol(root) + self._root_key_to_str = fs._strip_protocol(posixpath.join(root, "x"))[:-1] + if missing_exceptions is None: + missing_exceptions = ( + FileNotFoundError, + IsADirectoryError, + NotADirectoryError, + ) + self.missing_exceptions = missing_exceptions + self.check = check + self.create = create + if create: + if not self.fs.exists(root): + self.fs.mkdir(root) + if check: + if not self.fs.exists(root): + raise ValueError( + f"Path {root} does not exist. Create " + f" with the ``create=True`` keyword" + ) + self.fs.touch(root + "/a") + self.fs.rm(root + "/a") + + @cached_property + def dirfs(self): + """dirfs instance that can be used with the same keys as the mapper""" + from .implementations.dirfs import DirFileSystem + + return DirFileSystem(path=self._root_key_to_str, fs=self.fs) + + def clear(self): + """Remove all keys below root - empties out mapping""" + logger.info("Clear mapping at %s", self.root) + try: + self.fs.rm(self.root, True) + self.fs.mkdir(self.root) + except: # noqa: E722 + pass + + def getitems(self, keys, on_error="raise"): + """Fetch multiple items from the store + + If the backend is async-able, this might proceed concurrently + + Parameters + ---------- + keys: list(str) + They keys to be fetched + on_error : "raise", "omit", "return" + If raise, an underlying exception will be raised (converted to KeyError + if the type is in self.missing_exceptions); if omit, keys with exception + will simply not be included in the output; if "return", all keys are + included in the output, but the value will be bytes or an exception + instance. + + Returns + ------- + dict(key, bytes|exception) + """ + keys2 = [self._key_to_str(k) for k in keys] + oe = on_error if on_error == "raise" else "return" + try: + out = self.fs.cat(keys2, on_error=oe) + if isinstance(out, bytes): + out = {keys2[0]: out} + except self.missing_exceptions as e: + raise KeyError from e + out = { + k: (KeyError() if isinstance(v, self.missing_exceptions) else v) + for k, v in out.items() + } + return { + key: out[k2] if on_error == "raise" else out.get(k2, KeyError(k2)) + for key, k2 in zip(keys, keys2) + if on_error == "return" or not isinstance(out[k2], BaseException) + } + + def setitems(self, values_dict): + """Set the values of multiple items in the store + + Parameters + ---------- + values_dict: dict(str, bytes) + """ + values = {self._key_to_str(k): maybe_convert(v) for k, v in values_dict.items()} + self.fs.pipe(values) + + def delitems(self, keys): + """Remove multiple keys from the store""" + self.fs.rm([self._key_to_str(k) for k in keys]) + + def _key_to_str(self, key): + """Generate full path for the key""" + if not isinstance(key, str): + # raise TypeError("key must be of type `str`, got `{type(key).__name__}`" + warnings.warn( + "from fsspec 2023.5 onward FSMap non-str keys will raise TypeError", + DeprecationWarning, + ) + if isinstance(key, list): + key = tuple(key) + key = str(key) + return f"{self._root_key_to_str}{key}".rstrip("/") + + def _str_to_key(self, s): + """Strip path of to leave key name""" + return s[len(self.root) :].lstrip("/") + + def __getitem__(self, key, default=None): + """Retrieve data""" + k = self._key_to_str(key) + try: + result = self.fs.cat(k) + except self.missing_exceptions as exc: + if default is not None: + return default + raise KeyError(key) from exc + return result + + def pop(self, key, default=None): + """Pop data""" + result = self.__getitem__(key, default) + try: + del self[key] + except KeyError: + pass + return result + + def __setitem__(self, key, value): + """Store value in key""" + key = self._key_to_str(key) + self.fs.mkdirs(self.fs._parent(key), exist_ok=True) + self.fs.pipe_file(key, maybe_convert(value)) + + def __iter__(self): + return (self._str_to_key(x) for x in self.fs.find(self.root)) + + def __len__(self): + return len(self.fs.find(self.root)) + + def __delitem__(self, key): + """Remove key""" + try: + self.fs.rm(self._key_to_str(key)) + except Exception as exc: + raise KeyError from exc + + def __contains__(self, key): + """Does key exist in mapping?""" + path = self._key_to_str(key) + return self.fs.isfile(path) + + def __reduce__(self): + return FSMap, (self.root, self.fs, False, False, self.missing_exceptions) + + +def maybe_convert(value): + if isinstance(value, array.array) or hasattr(value, "__array__"): + # bytes-like things + if hasattr(value, "dtype") and value.dtype.kind in "Mm": + # The buffer interface doesn't support datetime64/timdelta64 numpy + # arrays + value = value.view("int64") + value = bytes(memoryview(value)) + return value + + +def get_mapper( + url="", + check=False, + create=False, + missing_exceptions=None, + alternate_root=None, + **kwargs, +): + """Create key-value interface for given URL and options + + The URL will be of the form "protocol://location" and point to the root + of the mapper required. All keys will be file-names below this location, + and their values the contents of each key. + + Also accepts compound URLs like zip::s3://bucket/file.zip , see ``fsspec.open``. + + Parameters + ---------- + url: str + Root URL of mapping + check: bool + Whether to attempt to read from the location before instantiation, to + check that the mapping does exist + create: bool + Whether to make the directory corresponding to the root before + instantiating + missing_exceptions: None or tuple + If given, these exception types will be regarded as missing keys and + return KeyError when trying to read data. By default, you get + (FileNotFoundError, IsADirectoryError, NotADirectoryError) + alternate_root: None or str + In cases of complex URLs, the parser may fail to pick the correct part + for the mapper root, so this arg can override + + Returns + ------- + ``FSMap`` instance, the dict-like key-value store. + """ + # Removing protocol here - could defer to each open() on the backend + fs, urlpath = url_to_fs(url, **kwargs) + root = alternate_root if alternate_root is not None else urlpath + return FSMap(root, fs, check, create, missing_exceptions=missing_exceptions) diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__init__.py b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8ed2ad802ecaf021106c25c03112f29e75c7b2f8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__init__.py @@ -0,0 +1,289 @@ +import os +from hashlib import md5 + +import pytest + +from fsspec.implementations.local import LocalFileSystem +from fsspec.tests.abstract.copy import AbstractCopyTests # noqa: F401 +from fsspec.tests.abstract.get import AbstractGetTests # noqa: F401 +from fsspec.tests.abstract.open import AbstractOpenTests # noqa: F401 +from fsspec.tests.abstract.pipe import AbstractPipeTests # noqa: F401 +from fsspec.tests.abstract.put import AbstractPutTests # noqa: F401 + + +class BaseAbstractFixtures: + """ + Abstract base class containing fixtures that are used by but never need to + be overridden in derived filesystem-specific classes to run the abstract + tests on such filesystems. + """ + + @pytest.fixture + def fs_bulk_operations_scenario_0(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used for many cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._bulk_operations_scenario_0(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_glob_edge_cases_files(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used for glob edge cases cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._glob_edge_cases_files(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_dir_and_file_with_same_name_prefix(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used to check cp/get/put on directory + and file with the same name prefixes. + + Cleans up at the end of each test it which it is used. + """ + source = self._dir_and_file_with_same_name_prefix(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_10_files_with_hashed_names(self, fs, fs_join, fs_path): + """ + Scenario on remote filesystem that is used to check cp/get/put files order + when source and destination are lists. + + Cleans up at the end of each test it which it is used. + """ + source = self._10_files_with_hashed_names(fs, fs_join, fs_path) + yield source + fs.rm(source, recursive=True) + + @pytest.fixture + def fs_target(self, fs, fs_join, fs_path): + """ + Return name of remote directory that does not yet exist to copy into. + + Cleans up at the end of each test it which it is used. + """ + target = fs_join(fs_path, "target") + yield target + if fs.exists(target): + fs.rm(target, recursive=True) + + @pytest.fixture + def local_bulk_operations_scenario_0(self, local_fs, local_join, local_path): + """ + Scenario on local filesystem that is used for many cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._bulk_operations_scenario_0(local_fs, local_join, local_path) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_glob_edge_cases_files(self, local_fs, local_join, local_path): + """ + Scenario on local filesystem that is used for glob edge cases cp/get/put tests. + + Cleans up at the end of each test it which it is used. + """ + source = self._glob_edge_cases_files(local_fs, local_join, local_path) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_dir_and_file_with_same_name_prefix( + self, local_fs, local_join, local_path + ): + """ + Scenario on local filesystem that is used to check cp/get/put on directory + and file with the same name prefixes. + + Cleans up at the end of each test it which it is used. + """ + source = self._dir_and_file_with_same_name_prefix( + local_fs, local_join, local_path + ) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_10_files_with_hashed_names(self, local_fs, local_join, local_path): + """ + Scenario on local filesystem that is used to check cp/get/put files order + when source and destination are lists. + + Cleans up at the end of each test it which it is used. + """ + source = self._10_files_with_hashed_names(local_fs, local_join, local_path) + yield source + local_fs.rm(source, recursive=True) + + @pytest.fixture + def local_target(self, local_fs, local_join, local_path): + """ + Return name of local directory that does not yet exist to copy into. + + Cleans up at the end of each test it which it is used. + """ + target = local_join(local_path, "target") + yield target + if local_fs.exists(target): + local_fs.rm(target, recursive=True) + + def _glob_edge_cases_files(self, some_fs, some_join, some_path): + """ + Scenario that is used for glob edge cases cp/get/put tests. + Creates the following directory and file structure: + + 📁 source + ├── 📄 file1 + ├── 📄 file2 + ├── 📁 subdir0 + │ ├── 📄 subfile1 + │ ├── 📄 subfile2 + │ └── 📁 nesteddir + │ └── 📄 nestedfile + └── 📁 subdir1 + ├── 📄 subfile1 + ├── 📄 subfile2 + └── 📁 nesteddir + └── 📄 nestedfile + """ + source = some_join(some_path, "source") + some_fs.touch(some_join(source, "file1")) + some_fs.touch(some_join(source, "file2")) + + for subdir_idx in range(2): + subdir = some_join(source, f"subdir{subdir_idx}") + nesteddir = some_join(subdir, "nesteddir") + some_fs.makedirs(nesteddir) + some_fs.touch(some_join(subdir, "subfile1")) + some_fs.touch(some_join(subdir, "subfile2")) + some_fs.touch(some_join(nesteddir, "nestedfile")) + + return source + + def _bulk_operations_scenario_0(self, some_fs, some_join, some_path): + """ + Scenario that is used for many cp/get/put tests. Creates the following + directory and file structure: + + 📁 source + ├── 📄 file1 + ├── 📄 file2 + └── 📁 subdir + ├── 📄 subfile1 + ├── 📄 subfile2 + └── 📁 nesteddir + └── 📄 nestedfile + """ + source = some_join(some_path, "source") + subdir = some_join(source, "subdir") + nesteddir = some_join(subdir, "nesteddir") + some_fs.makedirs(nesteddir) + some_fs.touch(some_join(source, "file1")) + some_fs.touch(some_join(source, "file2")) + some_fs.touch(some_join(subdir, "subfile1")) + some_fs.touch(some_join(subdir, "subfile2")) + some_fs.touch(some_join(nesteddir, "nestedfile")) + return source + + def _dir_and_file_with_same_name_prefix(self, some_fs, some_join, some_path): + """ + Scenario that is used to check cp/get/put on directory and file with + the same name prefixes. Creates the following directory and file structure: + + 📁 source + ├── 📄 subdir.txt + └── 📁 subdir + └── 📄 subfile.txt + """ + source = some_join(some_path, "source") + subdir = some_join(source, "subdir") + file = some_join(source, "subdir.txt") + subfile = some_join(subdir, "subfile.txt") + some_fs.makedirs(subdir) + some_fs.touch(file) + some_fs.touch(subfile) + return source + + def _10_files_with_hashed_names(self, some_fs, some_join, some_path): + """ + Scenario that is used to check cp/get/put files order when source and + destination are lists. Creates the following directory and file structure: + + 📁 source + └── 📄 {hashed([0-9])}.txt + """ + source = some_join(some_path, "source") + for i in range(10): + hashed_i = md5(str(i).encode("utf-8")).hexdigest() + path = some_join(source, f"{hashed_i}.txt") + some_fs.pipe(path=path, value=f"{i}".encode()) + return source + + +class AbstractFixtures(BaseAbstractFixtures): + """ + Abstract base class containing fixtures that may be overridden in derived + filesystem-specific classes to run the abstract tests on such filesystems. + + For any particular filesystem some of these fixtures must be overridden, + such as ``fs`` and ``fs_path``, and others may be overridden if the + default functions here are not appropriate, such as ``fs_join``. + """ + + @pytest.fixture + def fs(self): + raise NotImplementedError("This function must be overridden in derived classes") + + @pytest.fixture + def fs_join(self): + """ + Return a function that joins its arguments together into a path. + + Most fsspec implementations join paths in a platform-dependent way, + but some will override this to always use a forward slash. + """ + return os.path.join + + @pytest.fixture + def fs_path(self): + raise NotImplementedError("This function must be overridden in derived classes") + + @pytest.fixture(scope="class") + def local_fs(self): + # Maybe need an option for auto_mkdir=False? This is only relevant + # for certain implementations. + return LocalFileSystem(auto_mkdir=True) + + @pytest.fixture + def local_join(self): + """ + Return a function that joins its arguments together into a path, on + the local filesystem. + """ + return os.path.join + + @pytest.fixture + def local_path(self, tmpdir): + return tmpdir + + @pytest.fixture + def supports_empty_directories(self): + """ + Return whether this implementation supports empty directories. + """ + return True + + @pytest.fixture + def fs_sanitize_path(self): + return lambda x: x diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d60f94b101cb89ac931cf869cc3be2b9e147e03a Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/common.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9bc648f5796b2bc3905833037a207b1ec79539b Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/common.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/copy.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/copy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59cbb1cef8e0178f6b13414de143c85f5584792b Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/copy.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..904766803bb4e53c70ad714e396786318b275961 Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/get.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/mv.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/mv.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..16224b838ac1455e9a3a9b18607c428c7078f143 Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/mv.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/open.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/open.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dec6b55e7974a2207b7b78b9839e844ad96c1c13 Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/open.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/pipe.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/pipe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..718c99bcf9e2ad8c1e510691826012346417fdb8 Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/pipe.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-310.pyc b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8c13071a73e00d82651e22b634254b8dcc196a1 Binary files /dev/null and b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/__pycache__/put.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/common.py b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/common.py new file mode 100644 index 0000000000000000000000000000000000000000..22e7c4140404ab2a8928689721419cf05c2760b9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/common.py @@ -0,0 +1,175 @@ +GLOB_EDGE_CASES_TESTS = { + "argnames": ("path", "recursive", "maxdepth", "expected"), + "argvalues": [ + ("fil?1", False, None, ["file1"]), + ("fil?1", True, None, ["file1"]), + ("file[1-2]", False, None, ["file1", "file2"]), + ("file[1-2]", True, None, ["file1", "file2"]), + ("*", False, None, ["file1", "file2"]), + ( + "*", + True, + None, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ("*", True, 1, ["file1", "file2"]), + ( + "*", + True, + 2, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir1/subfile1", + "subdir1/subfile2", + ], + ), + ("*1", False, None, ["file1"]), + ( + "*1", + True, + None, + [ + "file1", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ("*1", True, 2, ["file1", "subdir1/subfile1", "subdir1/subfile2"]), + ( + "**", + False, + None, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ( + "**", + True, + None, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ("**", True, 1, ["file1", "file2"]), + ( + "**", + True, + 2, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ( + "**", + False, + 2, + [ + "file1", + "file2", + "subdir0/subfile1", + "subdir0/subfile2", + "subdir1/subfile1", + "subdir1/subfile2", + ], + ), + ("**/*1", False, None, ["file1", "subdir0/subfile1", "subdir1/subfile1"]), + ( + "**/*1", + True, + None, + [ + "file1", + "subdir0/subfile1", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ("**/*1", True, 1, ["file1"]), + ( + "**/*1", + True, + 2, + ["file1", "subdir0/subfile1", "subdir1/subfile1", "subdir1/subfile2"], + ), + ("**/*1", False, 2, ["file1", "subdir0/subfile1", "subdir1/subfile1"]), + ("**/subdir0", False, None, []), + ("**/subdir0", True, None, ["subfile1", "subfile2", "nesteddir/nestedfile"]), + ("**/subdir0/nested*", False, 2, []), + ("**/subdir0/nested*", True, 2, ["nestedfile"]), + ("subdir[1-2]", False, None, []), + ("subdir[1-2]", True, None, ["subfile1", "subfile2", "nesteddir/nestedfile"]), + ("subdir[1-2]", True, 2, ["subfile1", "subfile2"]), + ("subdir[0-1]", False, None, []), + ( + "subdir[0-1]", + True, + None, + [ + "subdir0/subfile1", + "subdir0/subfile2", + "subdir0/nesteddir/nestedfile", + "subdir1/subfile1", + "subdir1/subfile2", + "subdir1/nesteddir/nestedfile", + ], + ), + ( + "subdir[0-1]/*fil[e]*", + False, + None, + [ + "subdir0/subfile1", + "subdir0/subfile2", + "subdir1/subfile1", + "subdir1/subfile2", + ], + ), + ( + "subdir[0-1]/*fil[e]*", + True, + None, + [ + "subdir0/subfile1", + "subdir0/subfile2", + "subdir1/subfile1", + "subdir1/subfile2", + ], + ), + ], +} diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/copy.py b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/copy.py new file mode 100644 index 0000000000000000000000000000000000000000..e39e57e5f7d52bfda8ab5e2398b04cc2303630a0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/copy.py @@ -0,0 +1,557 @@ +from hashlib import md5 +from itertools import product + +import pytest + +from fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS + + +class AbstractCopyTests: + def test_copy_file_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + fs_target, + supports_empty_directories, + ): + # Copy scenario 1a + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + fs.touch(fs_join(target, "dummy")) + assert fs.isdir(target) + + target_file2 = fs_join(target, "file2") + target_subfile1 = fs_join(target, "subfile1") + + # Copy from source directory + fs.cp(fs_join(source, "file2"), target) + assert fs.isfile(target_file2) + + # Copy from sub directory + fs.cp(fs_join(source, "subdir", "subfile1"), target) + assert fs.isfile(target_subfile1) + + # Remove copied files + fs.rm([target_file2, target_subfile1]) + assert not fs.exists(target_file2) + assert not fs.exists(target_subfile1) + + # Repeat with trailing slash on target + fs.cp(fs_join(source, "file2"), target + "/") + assert fs.isdir(target) + assert fs.isfile(target_file2) + + fs.cp(fs_join(source, "subdir", "subfile1"), target + "/") + assert fs.isfile(target_subfile1) + + def test_copy_file_to_new_directory( + self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target + ): + # Copy scenario 1b + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + fs.cp( + fs_join(source, "subdir", "subfile1"), fs_join(target, "newdir/") + ) # Note trailing slash + assert fs.isdir(target) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + + def test_copy_file_to_file_in_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + fs_target, + supports_empty_directories, + ): + # Copy scenario 1c + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + fs.touch(fs_join(target, "dummy")) + assert fs.isdir(target) + + fs.cp(fs_join(source, "subdir", "subfile1"), fs_join(target, "newfile")) + assert fs.isfile(fs_join(target, "newfile")) + + def test_copy_file_to_file_in_new_directory( + self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target + ): + # Copy scenario 1d + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + fs.cp( + fs_join(source, "subdir", "subfile1"), fs_join(target, "newdir", "newfile") + ) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "newfile")) + + def test_copy_directory_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + fs_target, + supports_empty_directories, + ): + # Copy scenario 1e + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + dummy = fs_join(target, "dummy") + fs.touch(dummy) + assert fs.isdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = target + "/" if target_slash else target + + # Without recursive does nothing + fs.cp(s, t) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # With recursive + fs.cp(s, t, recursive=True) + if source_slash: + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert fs.isdir(fs_join(target, "nesteddir")) + assert fs.isfile(fs_join(target, "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + fs_join(target, "nesteddir"), + ], + recursive=True, + ) + else: + assert fs.isdir(fs_join(target, "subdir")) + assert fs.isfile(fs_join(target, "subdir", "subfile1")) + assert fs.isfile(fs_join(target, "subdir", "subfile2")) + assert fs.isdir(fs_join(target, "subdir", "nesteddir")) + assert fs.isfile(fs_join(target, "subdir", "nesteddir", "nestedfile")) + + fs.rm(fs_join(target, "subdir"), recursive=True) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # Limit recursive by maxdepth + fs.cp(s, t, recursive=True, maxdepth=1) + if source_slash: + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert not fs.exists(fs_join(target, "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + ], + recursive=True, + ) + else: + assert fs.isdir(fs_join(target, "subdir")) + assert fs.isfile(fs_join(target, "subdir", "subfile1")) + assert fs.isfile(fs_join(target, "subdir", "subfile2")) + assert not fs.exists(fs_join(target, "subdir", "nesteddir")) + + fs.rm(fs_join(target, "subdir"), recursive=True) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + def test_copy_directory_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + fs_target, + supports_empty_directories, + ): + # Copy scenario 1f + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = fs_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive does nothing + fs.cp(s, t) + if supports_empty_directories: + assert fs.ls(target) == [] + else: + with pytest.raises(FileNotFoundError): + fs.ls(target) + + # With recursive + fs.cp(s, t, recursive=True) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert fs.isdir(fs_join(target, "newdir", "nesteddir")) + assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + # Limit recursive by maxdepth + fs.cp(s, t, recursive=True, maxdepth=1) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + def test_copy_glob_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + fs_target, + supports_empty_directories, + ): + # Copy scenario 1g + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + dummy = fs_join(target, "dummy") + fs.touch(dummy) + assert fs.isdir(target) + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + # Without recursive + fs.cp(fs_join(source, "subdir", "*"), t) + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert not fs.isdir(fs_join(target, "nesteddir")) + assert not fs.exists(fs_join(target, "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.cp(fs_join(source, "subdir", glob), t, recursive=recursive) + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert fs.isdir(fs_join(target, "nesteddir")) + assert fs.isfile(fs_join(target, "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + fs_join(target, "nesteddir"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # Limit recursive by maxdepth + fs.cp( + fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1 + ) + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert not fs.exists(fs_join(target, "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + def test_copy_glob_to_new_directory( + self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target + ): + # Copy scenario 1h + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + for target_slash in [False, True]: + t = fs_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive + fs.cp(fs_join(source, "subdir", "*"), t) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + assert not fs.exists(fs_join(target, "newdir", "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.cp(fs_join(source, "subdir", glob), t, recursive=recursive) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert fs.isdir(fs_join(target, "newdir", "nesteddir")) + assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + assert not fs.exists(fs_join(target, "newdir", "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + # Limit recursive by maxdepth + fs.cp( + fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1 + ) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + assert not fs.exists(fs_join(target, "newdir", "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + @pytest.mark.parametrize( + GLOB_EDGE_CASES_TESTS["argnames"], + GLOB_EDGE_CASES_TESTS["argvalues"], + ) + def test_copy_glob_edge_cases( + self, + path, + recursive, + maxdepth, + expected, + fs, + fs_join, + fs_glob_edge_cases_files, + fs_target, + fs_sanitize_path, + ): + # Copy scenario 1g + source = fs_glob_edge_cases_files + + target = fs_target + + for new_dir, target_slash in product([True, False], [True, False]): + fs.mkdir(target) + + t = fs_join(target, "newdir") if new_dir else target + t = t + "/" if target_slash else t + + fs.copy(fs_join(source, path), t, recursive=recursive, maxdepth=maxdepth) + + output = fs.find(target) + if new_dir: + prefixed_expected = [ + fs_sanitize_path(fs_join(target, "newdir", p)) for p in expected + ] + else: + prefixed_expected = [ + fs_sanitize_path(fs_join(target, p)) for p in expected + ] + assert sorted(output) == sorted(prefixed_expected) + + try: + fs.rm(target, recursive=True) + except FileNotFoundError: + pass + + def test_copy_list_of_files_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + fs_target, + supports_empty_directories, + ): + # Copy scenario 2a + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + dummy = fs_join(target, "dummy") + fs.touch(dummy) + assert fs.isdir(target) + + source_files = [ + fs_join(source, "file1"), + fs_join(source, "file2"), + fs_join(source, "subdir", "subfile1"), + ] + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + fs.cp(source_files, t) + assert fs.isfile(fs_join(target, "file1")) + assert fs.isfile(fs_join(target, "file2")) + assert fs.isfile(fs_join(target, "subfile1")) + + fs.rm( + [ + fs_join(target, "file1"), + fs_join(target, "file2"), + fs_join(target, "subfile1"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + def test_copy_list_of_files_to_new_directory( + self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target + ): + # Copy scenario 2b + source = fs_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + source_files = [ + fs_join(source, "file1"), + fs_join(source, "file2"), + fs_join(source, "subdir", "subfile1"), + ] + + fs.cp(source_files, fs_join(target, "newdir") + "/") # Note trailing slash + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "file1")) + assert fs.isfile(fs_join(target, "newdir", "file2")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + + def test_copy_two_files_new_directory( + self, fs, fs_join, fs_bulk_operations_scenario_0, fs_target + ): + # This is a duplicate of test_copy_list_of_files_to_new_directory and + # can eventually be removed. + source = fs_bulk_operations_scenario_0 + + target = fs_target + assert not fs.exists(target) + fs.cp([fs_join(source, "file1"), fs_join(source, "file2")], target) + + assert fs.isdir(target) + assert fs.isfile(fs_join(target, "file1")) + assert fs.isfile(fs_join(target, "file2")) + + def test_copy_directory_without_files_with_same_name_prefix( + self, + fs, + fs_join, + fs_target, + fs_dir_and_file_with_same_name_prefix, + supports_empty_directories, + ): + # Create the test dirs + source = fs_dir_and_file_with_same_name_prefix + target = fs_target + + # Test without glob + fs.cp(fs_join(source, "subdir"), target, recursive=True) + + assert fs.isfile(fs_join(target, "subfile.txt")) + assert not fs.isfile(fs_join(target, "subdir.txt")) + + fs.rm([fs_join(target, "subfile.txt")]) + if supports_empty_directories: + assert fs.ls(target) == [] + else: + assert not fs.exists(target) + + # Test with glob + fs.cp(fs_join(source, "subdir*"), target, recursive=True) + + assert fs.isdir(fs_join(target, "subdir")) + assert fs.isfile(fs_join(target, "subdir", "subfile.txt")) + assert fs.isfile(fs_join(target, "subdir.txt")) + + def test_copy_with_source_and_destination_as_list( + self, fs, fs_target, fs_join, fs_10_files_with_hashed_names + ): + # Create the test dir + source = fs_10_files_with_hashed_names + target = fs_target + + # Create list of files for source and destination + source_files = [] + destination_files = [] + for i in range(10): + hashed_i = md5(str(i).encode("utf-8")).hexdigest() + source_files.append(fs_join(source, f"{hashed_i}.txt")) + destination_files.append(fs_join(target, f"{hashed_i}.txt")) + + # Copy and assert order was kept + fs.copy(path1=source_files, path2=destination_files) + + for i in range(10): + file_content = fs.cat(destination_files[i]).decode("utf-8") + assert file_content == str(i) diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/get.py b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/get.py new file mode 100644 index 0000000000000000000000000000000000000000..851ab81ee581e74cac41c64c83ef0af75826d6b0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/get.py @@ -0,0 +1,587 @@ +from hashlib import md5 +from itertools import product + +import pytest + +from fsspec.implementations.local import make_path_posix +from fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS + + +class AbstractGetTests: + def test_get_file_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1a + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + assert local_fs.isdir(target) + + target_file2 = local_join(target, "file2") + target_subfile1 = local_join(target, "subfile1") + + # Copy from source directory + fs.get(fs_join(source, "file2"), target) + assert local_fs.isfile(target_file2) + + # Copy from sub directory + fs.get(fs_join(source, "subdir", "subfile1"), target) + assert local_fs.isfile(target_subfile1) + + # Remove copied files + local_fs.rm([target_file2, target_subfile1]) + assert not local_fs.exists(target_file2) + assert not local_fs.exists(target_subfile1) + + # Repeat with trailing slash on target + fs.get(fs_join(source, "file2"), target + "/") + assert local_fs.isdir(target) + assert local_fs.isfile(target_file2) + + fs.get(fs_join(source, "subdir", "subfile1"), target + "/") + assert local_fs.isfile(target_subfile1) + + def test_get_file_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1b + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + fs.get( + fs_join(source, "subdir", "subfile1"), local_join(target, "newdir/") + ) # Note trailing slash + + assert local_fs.isdir(target) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + + def test_get_file_to_file_in_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1c + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + fs.get(fs_join(source, "subdir", "subfile1"), local_join(target, "newfile")) + assert local_fs.isfile(local_join(target, "newfile")) + + def test_get_file_to_file_in_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1d + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + fs.get( + fs_join(source, "subdir", "subfile1"), + local_join(target, "newdir", "newfile"), + ) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "newfile")) + + def test_get_directory_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1e + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + assert local_fs.isdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = target + "/" if target_slash else target + + # Without recursive does nothing + fs.get(s, t) + assert local_fs.ls(target) == [] + + # With recursive + fs.get(s, t, recursive=True) + if source_slash: + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert local_fs.isdir(local_join(target, "nesteddir")) + assert local_fs.isfile(local_join(target, "nesteddir", "nestedfile")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + local_join(target, "nesteddir"), + ], + recursive=True, + ) + else: + assert local_fs.isdir(local_join(target, "subdir")) + assert local_fs.isfile(local_join(target, "subdir", "subfile1")) + assert local_fs.isfile(local_join(target, "subdir", "subfile2")) + assert local_fs.isdir(local_join(target, "subdir", "nesteddir")) + assert local_fs.isfile( + local_join(target, "subdir", "nesteddir", "nestedfile") + ) + + local_fs.rm(local_join(target, "subdir"), recursive=True) + assert local_fs.ls(target) == [] + + # Limit recursive by maxdepth + fs.get(s, t, recursive=True, maxdepth=1) + if source_slash: + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert not local_fs.exists(local_join(target, "nesteddir")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + ], + recursive=True, + ) + else: + assert local_fs.isdir(local_join(target, "subdir")) + assert local_fs.isfile(local_join(target, "subdir", "subfile1")) + assert local_fs.isfile(local_join(target, "subdir", "subfile2")) + assert not local_fs.exists(local_join(target, "subdir", "nesteddir")) + + local_fs.rm(local_join(target, "subdir"), recursive=True) + assert local_fs.ls(target) == [] + + def test_get_directory_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1f + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = local_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive does nothing + fs.get(s, t) + assert local_fs.ls(target) == [] + + # With recursive + fs.get(s, t, recursive=True) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert local_fs.isdir(local_join(target, "newdir", "nesteddir")) + assert local_fs.isfile( + local_join(target, "newdir", "nesteddir", "nestedfile") + ) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm(local_join(target, "newdir"), recursive=True) + assert local_fs.ls(target) == [] + + # Limit recursive by maxdepth + fs.get(s, t, recursive=True, maxdepth=1) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert not local_fs.exists(local_join(target, "newdir", "nesteddir")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm(local_join(target, "newdir"), recursive=True) + assert not local_fs.exists(local_join(target, "newdir")) + + def test_get_glob_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1g + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + # Without recursive + fs.get(fs_join(source, "subdir", "*"), t) + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert not local_fs.isdir(local_join(target, "nesteddir")) + assert not local_fs.exists(local_join(target, "nesteddir", "nestedfile")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + ], + recursive=True, + ) + assert local_fs.ls(target) == [] + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.get(fs_join(source, "subdir", glob), t, recursive=recursive) + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert local_fs.isdir(local_join(target, "nesteddir")) + assert local_fs.isfile(local_join(target, "nesteddir", "nestedfile")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + local_join(target, "nesteddir"), + ], + recursive=True, + ) + assert local_fs.ls(target) == [] + + # Limit recursive by maxdepth + fs.get( + fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1 + ) + assert local_fs.isfile(local_join(target, "subfile1")) + assert local_fs.isfile(local_join(target, "subfile2")) + assert not local_fs.exists(local_join(target, "nesteddir")) + assert not local_fs.exists(local_join(target, "subdir")) + + local_fs.rm( + [ + local_join(target, "subfile1"), + local_join(target, "subfile2"), + ], + recursive=True, + ) + assert local_fs.ls(target) == [] + + def test_get_glob_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1h + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + for target_slash in [False, True]: + t = fs_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive + fs.get(fs_join(source, "subdir", "*"), t) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert not local_fs.exists(local_join(target, "newdir", "nesteddir")) + assert not local_fs.exists( + local_join(target, "newdir", "nesteddir", "nestedfile") + ) + assert not local_fs.exists(local_join(target, "subdir")) + assert not local_fs.exists(local_join(target, "newdir", "subdir")) + + local_fs.rm(local_join(target, "newdir"), recursive=True) + assert local_fs.ls(target) == [] + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.get(fs_join(source, "subdir", glob), t, recursive=recursive) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert local_fs.isdir(local_join(target, "newdir", "nesteddir")) + assert local_fs.isfile( + local_join(target, "newdir", "nesteddir", "nestedfile") + ) + assert not local_fs.exists(local_join(target, "subdir")) + assert not local_fs.exists(local_join(target, "newdir", "subdir")) + + local_fs.rm(local_join(target, "newdir"), recursive=True) + assert not local_fs.exists(local_join(target, "newdir")) + + # Limit recursive by maxdepth + fs.get( + fs_join(source, "subdir", glob), t, recursive=recursive, maxdepth=1 + ) + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + assert local_fs.isfile(local_join(target, "newdir", "subfile2")) + assert not local_fs.exists(local_join(target, "newdir", "nesteddir")) + assert not local_fs.exists(local_join(target, "subdir")) + assert not local_fs.exists(local_join(target, "newdir", "subdir")) + + local_fs.rm(local_fs.ls(target, detail=False), recursive=True) + assert not local_fs.exists(local_join(target, "newdir")) + + @pytest.mark.parametrize( + GLOB_EDGE_CASES_TESTS["argnames"], + GLOB_EDGE_CASES_TESTS["argvalues"], + ) + def test_get_glob_edge_cases( + self, + path, + recursive, + maxdepth, + expected, + fs, + fs_join, + fs_glob_edge_cases_files, + local_fs, + local_join, + local_target, + ): + # Copy scenario 1g + source = fs_glob_edge_cases_files + + target = local_target + + for new_dir, target_slash in product([True, False], [True, False]): + local_fs.mkdir(target) + + t = local_join(target, "newdir") if new_dir else target + t = t + "/" if target_slash else t + + fs.get(fs_join(source, path), t, recursive=recursive, maxdepth=maxdepth) + + output = local_fs.find(target) + if new_dir: + prefixed_expected = [ + make_path_posix(local_join(target, "newdir", p)) for p in expected + ] + else: + prefixed_expected = [ + make_path_posix(local_join(target, p)) for p in expected + ] + assert sorted(output) == sorted(prefixed_expected) + + try: + local_fs.rm(target, recursive=True) + except FileNotFoundError: + pass + + def test_get_list_of_files_to_existing_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 2a + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + source_files = [ + fs_join(source, "file1"), + fs_join(source, "file2"), + fs_join(source, "subdir", "subfile1"), + ] + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + fs.get(source_files, t) + assert local_fs.isfile(local_join(target, "file1")) + assert local_fs.isfile(local_join(target, "file2")) + assert local_fs.isfile(local_join(target, "subfile1")) + + local_fs.rm( + [ + local_join(target, "file1"), + local_join(target, "file2"), + local_join(target, "subfile1"), + ], + recursive=True, + ) + assert local_fs.ls(target) == [] + + def test_get_list_of_files_to_new_directory( + self, + fs, + fs_join, + fs_bulk_operations_scenario_0, + local_fs, + local_join, + local_target, + ): + # Copy scenario 2b + source = fs_bulk_operations_scenario_0 + + target = local_target + local_fs.mkdir(target) + + source_files = [ + fs_join(source, "file1"), + fs_join(source, "file2"), + fs_join(source, "subdir", "subfile1"), + ] + + fs.get(source_files, local_join(target, "newdir") + "/") # Note trailing slash + assert local_fs.isdir(local_join(target, "newdir")) + assert local_fs.isfile(local_join(target, "newdir", "file1")) + assert local_fs.isfile(local_join(target, "newdir", "file2")) + assert local_fs.isfile(local_join(target, "newdir", "subfile1")) + + def test_get_directory_recursive( + self, fs, fs_join, fs_path, local_fs, local_join, local_target + ): + # https://github.com/fsspec/filesystem_spec/issues/1062 + # Recursive cp/get/put of source directory into non-existent target directory. + src = fs_join(fs_path, "src") + src_file = fs_join(src, "file") + fs.mkdir(src) + fs.touch(src_file) + + target = local_target + + # get without slash + assert not local_fs.exists(target) + for loop in range(2): + fs.get(src, target, recursive=True) + assert local_fs.isdir(target) + + if loop == 0: + assert local_fs.isfile(local_join(target, "file")) + assert not local_fs.exists(local_join(target, "src")) + else: + assert local_fs.isfile(local_join(target, "file")) + assert local_fs.isdir(local_join(target, "src")) + assert local_fs.isfile(local_join(target, "src", "file")) + + local_fs.rm(target, recursive=True) + + # get with slash + assert not local_fs.exists(target) + for loop in range(2): + fs.get(src + "/", target, recursive=True) + assert local_fs.isdir(target) + assert local_fs.isfile(local_join(target, "file")) + assert not local_fs.exists(local_join(target, "src")) + + def test_get_directory_without_files_with_same_name_prefix( + self, + fs, + fs_join, + local_fs, + local_join, + local_target, + fs_dir_and_file_with_same_name_prefix, + ): + # Create the test dirs + source = fs_dir_and_file_with_same_name_prefix + target = local_target + + # Test without glob + fs.get(fs_join(source, "subdir"), target, recursive=True) + + assert local_fs.isfile(local_join(target, "subfile.txt")) + assert not local_fs.isfile(local_join(target, "subdir.txt")) + + local_fs.rm([local_join(target, "subfile.txt")]) + assert local_fs.ls(target) == [] + + # Test with glob + fs.get(fs_join(source, "subdir*"), target, recursive=True) + + assert local_fs.isdir(local_join(target, "subdir")) + assert local_fs.isfile(local_join(target, "subdir", "subfile.txt")) + assert local_fs.isfile(local_join(target, "subdir.txt")) + + def test_get_with_source_and_destination_as_list( + self, + fs, + fs_join, + local_fs, + local_join, + local_target, + fs_10_files_with_hashed_names, + ): + # Create the test dir + source = fs_10_files_with_hashed_names + target = local_target + + # Create list of files for source and destination + source_files = [] + destination_files = [] + for i in range(10): + hashed_i = md5(str(i).encode("utf-8")).hexdigest() + source_files.append(fs_join(source, f"{hashed_i}.txt")) + destination_files.append( + make_path_posix(local_join(target, f"{hashed_i}.txt")) + ) + + # Copy and assert order was kept + fs.get(rpath=source_files, lpath=destination_files) + + for i in range(10): + file_content = local_fs.cat(destination_files[i]).decode("utf-8") + assert file_content == str(i) diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/mv.py b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/mv.py new file mode 100644 index 0000000000000000000000000000000000000000..39f6caa3de815e024fa84de2acecc986c823ed29 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/mv.py @@ -0,0 +1,57 @@ +import os + +import pytest + +import fsspec + + +def test_move_raises_error_with_tmpdir(tmpdir): + # Create a file in the temporary directory + source = tmpdir.join("source_file.txt") + source.write("content") + + # Define a destination that simulates a protected or invalid path + destination = tmpdir.join("non_existent_directory/destination_file.txt") + + # Instantiate the filesystem (assuming the local file system interface) + fs = fsspec.filesystem("file") + + # Use the actual file paths as string + with pytest.raises(FileNotFoundError): + fs.mv(str(source), str(destination)) + + +@pytest.mark.parametrize("recursive", (True, False)) +def test_move_raises_error_with_tmpdir_permission(recursive, tmpdir): + # Create a file in the temporary directory + source = tmpdir.join("source_file.txt") + source.write("content") + + # Create a protected directory (non-writable) + protected_dir = tmpdir.mkdir("protected_directory") + protected_path = str(protected_dir) + + # Set the directory to read-only + if os.name == "nt": + os.system(f'icacls "{protected_path}" /deny Everyone:(W)') + else: + os.chmod(protected_path, 0o555) # Sets the directory to read-only + + # Define a destination inside the protected directory + destination = protected_dir.join("destination_file.txt") + + # Instantiate the filesystem (assuming the local file system interface) + fs = fsspec.filesystem("file") + + # Try to move the file to the read-only directory, expecting a permission error + with pytest.raises(PermissionError): + fs.mv(str(source), str(destination), recursive=recursive) + + # Assert the file was not created in the destination + assert not os.path.exists(destination) + + # Cleanup: Restore permissions so the directory can be cleaned up + if os.name == "nt": + os.system(f'icacls "{protected_path}" /remove:d Everyone') + else: + os.chmod(protected_path, 0o755) # Restore write permission for cleanup diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/open.py b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/open.py new file mode 100644 index 0000000000000000000000000000000000000000..bb75ea852276fb8d834345883813b8e27a0ae24c --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/open.py @@ -0,0 +1,11 @@ +import pytest + + +class AbstractOpenTests: + def test_open_exclusive(self, fs, fs_target): + with fs.open(fs_target, "wb") as f: + f.write(b"data") + with fs.open(fs_target, "rb") as f: + assert f.read() == b"data" + with pytest.raises(FileExistsError): + fs.open(fs_target, "xb") diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/pipe.py b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/pipe.py new file mode 100644 index 0000000000000000000000000000000000000000..8ecca96e9d23ff268a253c48269d5cca451ea270 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/pipe.py @@ -0,0 +1,11 @@ +import pytest + + +class AbstractPipeTests: + def test_pipe_exclusive(self, fs, fs_target): + fs.pipe_file(fs_target, b"data") + assert fs.cat_file(fs_target) == b"data" + with pytest.raises(FileExistsError): + fs.pipe_file(fs_target, b"data", mode="create") + fs.pipe_file(fs_target, b"new data", mode="overwrite") + assert fs.cat_file(fs_target) == b"new data" diff --git a/venv/lib/python3.10/site-packages/fsspec/tests/abstract/put.py b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/put.py new file mode 100644 index 0000000000000000000000000000000000000000..9fc349977f0384d9fc86126498be5c6ad99a21d3 --- /dev/null +++ b/venv/lib/python3.10/site-packages/fsspec/tests/abstract/put.py @@ -0,0 +1,591 @@ +from hashlib import md5 +from itertools import product + +import pytest + +from fsspec.tests.abstract.common import GLOB_EDGE_CASES_TESTS + + +class AbstractPutTests: + def test_put_file_to_existing_directory( + self, + fs, + fs_join, + fs_target, + local_join, + local_bulk_operations_scenario_0, + supports_empty_directories, + ): + # Copy scenario 1a + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + fs.touch(fs_join(target, "dummy")) + assert fs.isdir(target) + + target_file2 = fs_join(target, "file2") + target_subfile1 = fs_join(target, "subfile1") + + # Copy from source directory + fs.put(local_join(source, "file2"), target) + assert fs.isfile(target_file2) + + # Copy from sub directory + fs.put(local_join(source, "subdir", "subfile1"), target) + assert fs.isfile(target_subfile1) + + # Remove copied files + fs.rm([target_file2, target_subfile1]) + assert not fs.exists(target_file2) + assert not fs.exists(target_subfile1) + + # Repeat with trailing slash on target + fs.put(local_join(source, "file2"), target + "/") + assert fs.isdir(target) + assert fs.isfile(target_file2) + + fs.put(local_join(source, "subdir", "subfile1"), target + "/") + assert fs.isfile(target_subfile1) + + def test_put_file_to_new_directory( + self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0 + ): + # Copy scenario 1b + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + fs.put( + local_join(source, "subdir", "subfile1"), fs_join(target, "newdir/") + ) # Note trailing slash + assert fs.isdir(target) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + + def test_put_file_to_file_in_existing_directory( + self, + fs, + fs_join, + fs_target, + local_join, + supports_empty_directories, + local_bulk_operations_scenario_0, + ): + # Copy scenario 1c + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + fs.touch(fs_join(target, "dummy")) + assert fs.isdir(target) + + fs.put(local_join(source, "subdir", "subfile1"), fs_join(target, "newfile")) + assert fs.isfile(fs_join(target, "newfile")) + + def test_put_file_to_file_in_new_directory( + self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0 + ): + # Copy scenario 1d + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + fs.put( + local_join(source, "subdir", "subfile1"), + fs_join(target, "newdir", "newfile"), + ) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "newfile")) + + def test_put_directory_to_existing_directory( + self, + fs, + fs_join, + fs_target, + local_bulk_operations_scenario_0, + supports_empty_directories, + ): + # Copy scenario 1e + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + dummy = fs_join(target, "dummy") + fs.touch(dummy) + assert fs.isdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = target + "/" if target_slash else target + + # Without recursive does nothing + fs.put(s, t) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # With recursive + fs.put(s, t, recursive=True) + if source_slash: + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert fs.isdir(fs_join(target, "nesteddir")) + assert fs.isfile(fs_join(target, "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + fs_join(target, "nesteddir"), + ], + recursive=True, + ) + else: + assert fs.isdir(fs_join(target, "subdir")) + assert fs.isfile(fs_join(target, "subdir", "subfile1")) + assert fs.isfile(fs_join(target, "subdir", "subfile2")) + assert fs.isdir(fs_join(target, "subdir", "nesteddir")) + assert fs.isfile(fs_join(target, "subdir", "nesteddir", "nestedfile")) + + fs.rm(fs_join(target, "subdir"), recursive=True) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # Limit recursive by maxdepth + fs.put(s, t, recursive=True, maxdepth=1) + if source_slash: + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert not fs.exists(fs_join(target, "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + ], + recursive=True, + ) + else: + assert fs.isdir(fs_join(target, "subdir")) + assert fs.isfile(fs_join(target, "subdir", "subfile1")) + assert fs.isfile(fs_join(target, "subdir", "subfile2")) + assert not fs.exists(fs_join(target, "subdir", "nesteddir")) + + fs.rm(fs_join(target, "subdir"), recursive=True) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + def test_put_directory_to_new_directory( + self, + fs, + fs_join, + fs_target, + local_bulk_operations_scenario_0, + supports_empty_directories, + ): + # Copy scenario 1f + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + for source_slash, target_slash in zip([False, True], [False, True]): + s = fs_join(source, "subdir") + if source_slash: + s += "/" + t = fs_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive does nothing + fs.put(s, t) + if supports_empty_directories: + assert fs.ls(target) == [] + else: + with pytest.raises(FileNotFoundError): + fs.ls(target) + + # With recursive + fs.put(s, t, recursive=True) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert fs.isdir(fs_join(target, "newdir", "nesteddir")) + assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + # Limit recursive by maxdepth + fs.put(s, t, recursive=True, maxdepth=1) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + def test_put_glob_to_existing_directory( + self, + fs, + fs_join, + fs_target, + local_join, + supports_empty_directories, + local_bulk_operations_scenario_0, + ): + # Copy scenario 1g + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + dummy = fs_join(target, "dummy") + fs.touch(dummy) + assert fs.isdir(target) + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + # Without recursive + fs.put(local_join(source, "subdir", "*"), t) + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert not fs.isdir(fs_join(target, "nesteddir")) + assert not fs.exists(fs_join(target, "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.put(local_join(source, "subdir", glob), t, recursive=recursive) + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert fs.isdir(fs_join(target, "nesteddir")) + assert fs.isfile(fs_join(target, "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + fs_join(target, "nesteddir"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + # Limit recursive by maxdepth + fs.put( + local_join(source, "subdir", glob), + t, + recursive=recursive, + maxdepth=1, + ) + assert fs.isfile(fs_join(target, "subfile1")) + assert fs.isfile(fs_join(target, "subfile2")) + assert not fs.exists(fs_join(target, "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + + fs.rm( + [ + fs_join(target, "subfile1"), + fs_join(target, "subfile2"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + def test_put_glob_to_new_directory( + self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0 + ): + # Copy scenario 1h + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + for target_slash in [False, True]: + t = fs_join(target, "newdir") + if target_slash: + t += "/" + + # Without recursive + fs.put(local_join(source, "subdir", "*"), t) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + assert not fs.exists(fs_join(target, "newdir", "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + # With recursive + for glob, recursive in zip(["*", "**"], [True, False]): + fs.put(local_join(source, "subdir", glob), t, recursive=recursive) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert fs.isdir(fs_join(target, "newdir", "nesteddir")) + assert fs.isfile(fs_join(target, "newdir", "nesteddir", "nestedfile")) + assert not fs.exists(fs_join(target, "subdir")) + assert not fs.exists(fs_join(target, "newdir", "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + # Limit recursive by maxdepth + fs.put( + local_join(source, "subdir", glob), + t, + recursive=recursive, + maxdepth=1, + ) + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + assert fs.isfile(fs_join(target, "newdir", "subfile2")) + assert not fs.exists(fs_join(target, "newdir", "nesteddir")) + assert not fs.exists(fs_join(target, "subdir")) + assert not fs.exists(fs_join(target, "newdir", "subdir")) + + fs.rm(fs_join(target, "newdir"), recursive=True) + assert not fs.exists(fs_join(target, "newdir")) + + @pytest.mark.parametrize( + GLOB_EDGE_CASES_TESTS["argnames"], + GLOB_EDGE_CASES_TESTS["argvalues"], + ) + def test_put_glob_edge_cases( + self, + path, + recursive, + maxdepth, + expected, + fs, + fs_join, + fs_target, + local_glob_edge_cases_files, + local_join, + fs_sanitize_path, + ): + # Copy scenario 1g + source = local_glob_edge_cases_files + + target = fs_target + + for new_dir, target_slash in product([True, False], [True, False]): + fs.mkdir(target) + + t = fs_join(target, "newdir") if new_dir else target + t = t + "/" if target_slash else t + + fs.put(local_join(source, path), t, recursive=recursive, maxdepth=maxdepth) + + output = fs.find(target) + if new_dir: + prefixed_expected = [ + fs_sanitize_path(fs_join(target, "newdir", p)) for p in expected + ] + else: + prefixed_expected = [ + fs_sanitize_path(fs_join(target, p)) for p in expected + ] + assert sorted(output) == sorted(prefixed_expected) + + try: + fs.rm(target, recursive=True) + except FileNotFoundError: + pass + + def test_put_list_of_files_to_existing_directory( + self, + fs, + fs_join, + fs_target, + local_join, + local_bulk_operations_scenario_0, + supports_empty_directories, + ): + # Copy scenario 2a + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + if not supports_empty_directories: + # Force target directory to exist by adding a dummy file + dummy = fs_join(target, "dummy") + fs.touch(dummy) + assert fs.isdir(target) + + source_files = [ + local_join(source, "file1"), + local_join(source, "file2"), + local_join(source, "subdir", "subfile1"), + ] + + for target_slash in [False, True]: + t = target + "/" if target_slash else target + + fs.put(source_files, t) + assert fs.isfile(fs_join(target, "file1")) + assert fs.isfile(fs_join(target, "file2")) + assert fs.isfile(fs_join(target, "subfile1")) + + fs.rm( + [ + fs_join(target, "file1"), + fs_join(target, "file2"), + fs_join(target, "subfile1"), + ], + recursive=True, + ) + assert fs.ls(target, detail=False) == ( + [] if supports_empty_directories else [dummy] + ) + + def test_put_list_of_files_to_new_directory( + self, fs, fs_join, fs_target, local_join, local_bulk_operations_scenario_0 + ): + # Copy scenario 2b + source = local_bulk_operations_scenario_0 + + target = fs_target + fs.mkdir(target) + + source_files = [ + local_join(source, "file1"), + local_join(source, "file2"), + local_join(source, "subdir", "subfile1"), + ] + + fs.put(source_files, fs_join(target, "newdir") + "/") # Note trailing slash + assert fs.isdir(fs_join(target, "newdir")) + assert fs.isfile(fs_join(target, "newdir", "file1")) + assert fs.isfile(fs_join(target, "newdir", "file2")) + assert fs.isfile(fs_join(target, "newdir", "subfile1")) + + def test_put_directory_recursive( + self, fs, fs_join, fs_target, local_fs, local_join, local_path + ): + # https://github.com/fsspec/filesystem_spec/issues/1062 + # Recursive cp/get/put of source directory into non-existent target directory. + src = local_join(local_path, "src") + src_file = local_join(src, "file") + local_fs.mkdir(src) + local_fs.touch(src_file) + + target = fs_target + + # put without slash + assert not fs.exists(target) + for loop in range(2): + fs.put(src, target, recursive=True) + assert fs.isdir(target) + + if loop == 0: + assert fs.isfile(fs_join(target, "file")) + assert not fs.exists(fs_join(target, "src")) + else: + assert fs.isfile(fs_join(target, "file")) + assert fs.isdir(fs_join(target, "src")) + assert fs.isfile(fs_join(target, "src", "file")) + + fs.rm(target, recursive=True) + + # put with slash + assert not fs.exists(target) + for loop in range(2): + fs.put(src + "/", target, recursive=True) + assert fs.isdir(target) + assert fs.isfile(fs_join(target, "file")) + assert not fs.exists(fs_join(target, "src")) + + def test_put_directory_without_files_with_same_name_prefix( + self, + fs, + fs_join, + fs_target, + local_join, + local_dir_and_file_with_same_name_prefix, + supports_empty_directories, + ): + # Create the test dirs + source = local_dir_and_file_with_same_name_prefix + target = fs_target + + # Test without glob + fs.put(local_join(source, "subdir"), fs_target, recursive=True) + + assert fs.isfile(fs_join(fs_target, "subfile.txt")) + assert not fs.isfile(fs_join(fs_target, "subdir.txt")) + + fs.rm([fs_join(target, "subfile.txt")]) + if supports_empty_directories: + assert fs.ls(target) == [] + else: + assert not fs.exists(target) + + # Test with glob + fs.put(local_join(source, "subdir*"), fs_target, recursive=True) + + assert fs.isdir(fs_join(fs_target, "subdir")) + assert fs.isfile(fs_join(fs_target, "subdir", "subfile.txt")) + assert fs.isfile(fs_join(fs_target, "subdir.txt")) + + def test_copy_with_source_and_destination_as_list( + self, fs, fs_target, fs_join, local_join, local_10_files_with_hashed_names + ): + # Create the test dir + source = local_10_files_with_hashed_names + target = fs_target + + # Create list of files for source and destination + source_files = [] + destination_files = [] + for i in range(10): + hashed_i = md5(str(i).encode("utf-8")).hexdigest() + source_files.append(local_join(source, f"{hashed_i}.txt")) + destination_files.append(fs_join(target, f"{hashed_i}.txt")) + + # Copy and assert order was kept + fs.put(lpath=source_files, rpath=destination_files) + + for i in range(10): + file_content = fs.cat(destination_files[i]).decode("utf-8") + assert file_content == str(i) diff --git a/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/METADATA b/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..8a2f639061cc4a203f7109d8335d28076442c61d --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/METADATA @@ -0,0 +1,202 @@ +Metadata-Version: 2.4 +Name: h11 +Version: 0.16.0 +Summary: A pure-Python, bring-your-own-I/O implementation of HTTP/1.1 +Home-page: https://github.com/python-hyper/h11 +Author: Nathaniel J. Smith +Author-email: njs@pobox.com +License: MIT +Classifier: Development Status :: 3 - Alpha +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: System :: Networking +Requires-Python: >=3.8 +License-File: LICENSE.txt +Dynamic: author +Dynamic: author-email +Dynamic: classifier +Dynamic: description +Dynamic: home-page +Dynamic: license +Dynamic: license-file +Dynamic: requires-python +Dynamic: summary + +h11 +=== + +.. image:: https://travis-ci.org/python-hyper/h11.svg?branch=master + :target: https://travis-ci.org/python-hyper/h11 + :alt: Automated test status + +.. image:: https://codecov.io/gh/python-hyper/h11/branch/master/graph/badge.svg + :target: https://codecov.io/gh/python-hyper/h11 + :alt: Test coverage + +.. image:: https://readthedocs.org/projects/h11/badge/?version=latest + :target: http://h11.readthedocs.io/en/latest/?badge=latest + :alt: Documentation Status + +This is a little HTTP/1.1 library written from scratch in Python, +heavily inspired by `hyper-h2 `_. + +It's a "bring-your-own-I/O" library; h11 contains no IO code +whatsoever. This means you can hook h11 up to your favorite network +API, and that could be anything you want: synchronous, threaded, +asynchronous, or your own implementation of `RFC 6214 +`_ -- h11 won't judge you. +(Compare this to the current state of the art, where every time a `new +network API `_ comes along then someone +gets to start over reimplementing the entire HTTP protocol from +scratch.) Cory Benfield made an `excellent blog post describing the +benefits of this approach +`_, or if you like video +then here's his `PyCon 2016 talk on the same theme +`_. + +This also means that h11 is not immediately useful out of the box: +it's a toolkit for building programs that speak HTTP, not something +that could directly replace ``requests`` or ``twisted.web`` or +whatever. But h11 makes it much easier to implement something like +``requests`` or ``twisted.web``. + +At a high level, working with h11 goes like this: + +1) First, create an ``h11.Connection`` object to track the state of a + single HTTP/1.1 connection. + +2) When you read data off the network, pass it to + ``conn.receive_data(...)``; you'll get back a list of objects + representing high-level HTTP "events". + +3) When you want to send a high-level HTTP event, create the + corresponding "event" object and pass it to ``conn.send(...)``; + this will give you back some bytes that you can then push out + through the network. + +For example, a client might instantiate and then send a +``h11.Request`` object, then zero or more ``h11.Data`` objects for the +request body (e.g., if this is a POST), and then a +``h11.EndOfMessage`` to indicate the end of the message. Then the +server would then send back a ``h11.Response``, some ``h11.Data``, and +its own ``h11.EndOfMessage``. If either side violates the protocol, +you'll get a ``h11.ProtocolError`` exception. + +h11 is suitable for implementing both servers and clients, and has a +pleasantly symmetric API: the events you send as a client are exactly +the ones that you receive as a server and vice-versa. + +`Here's an example of a tiny HTTP client +`_ + +It also has `a fine manual `_. + +FAQ +--- + +*Whyyyyy?* + +I wanted to play with HTTP in `Curio +`__ and `Trio +`__, which at the time didn't have any +HTTP libraries. So I thought, no big deal, Python has, like, a dozen +different implementations of HTTP, surely I can find one that's +reusable. I didn't find one, but I did find Cory's call-to-arms +blog-post. So I figured, well, fine, if I have to implement HTTP from +scratch, at least I can make sure no-one *else* has to ever again. + +*Should I use it?* + +Maybe. You should be aware that it's a very young project. But, it's +feature complete and has an exhaustive test-suite and complete docs, +so the next step is for people to try using it and see how it goes +:-). If you do then please let us know -- if nothing else we'll want +to talk to you before making any incompatible changes! + +*What are the features/limitations?* + +Roughly speaking, it's trying to be a robust, complete, and non-hacky +implementation of the first "chapter" of the HTTP/1.1 spec: `RFC 7230: +HTTP/1.1 Message Syntax and Routing +`_. That is, it mostly focuses on +implementing HTTP at the level of taking bytes on and off the wire, +and the headers related to that, and tries to be anal about spec +conformance. It doesn't know about higher-level concerns like URL +routing, conditional GETs, cross-origin cookie policies, or content +negotiation. But it does know how to take care of framing, +cross-version differences in keep-alive handling, and the "obsolete +line folding" rule, so you can focus your energies on the hard / +interesting parts for your application, and it tries to support the +full specification in the sense that any useful HTTP/1.1 conformant +application should be able to use h11. + +It's pure Python, and has no dependencies outside of the standard +library. + +It has a test suite with 100.0% coverage for both statements and +branches. + +Currently it supports Python 3 (testing on 3.8-3.12) and PyPy 3. +The last Python 2-compatible version was h11 0.11.x. +(Originally it had a Cython wrapper for `http-parser +`_ and a beautiful nested state +machine implemented with ``yield from`` to postprocess the output. But +I had to take these out -- the new *parser* needs fewer lines-of-code +than the old *parser wrapper*, is written in pure Python, uses no +exotic language syntax, and has more features. It's sad, really; that +old state machine was really slick. I just need a few sentences here +to mourn that.) + +I don't know how fast it is. I haven't benchmarked or profiled it yet, +so it's probably got a few pointless hot spots, and I've been trying +to err on the side of simplicity and robustness instead of +micro-optimization. But at the architectural level I tried hard to +avoid fundamentally bad decisions, e.g., I believe that all the +parsing algorithms remain linear-time even in the face of pathological +input like slowloris, and there are no byte-by-byte loops. (I also +believe that it maintains bounded memory usage in the face of +arbitrary/pathological input.) + +The whole library is ~800 lines-of-code. You can read and understand +the whole thing in less than an hour. Most of the energy invested in +this so far has been spent on trying to keep things simple by +minimizing special-cases and ad hoc state manipulation; even though it +is now quite small and simple, I'm still annoyed that I haven't +figured out how to make it even smaller and simpler. (Unfortunately, +HTTP does not lend itself to simplicity.) + +The API is ~feature complete and I don't expect the general outlines +to change much, but you can't judge an API's ergonomics until you +actually document and use it, so I'd expect some changes in the +details. + +*How do I try it?* + +.. code-block:: sh + + $ pip install h11 + $ git clone git@github.com:python-hyper/h11 + $ cd h11/examples + $ python basic-client.py + +and go from there. + +*License?* + +MIT + +*Code of conduct?* + +Contributors are requested to follow our `code of conduct +`_ in +all project spaces. diff --git a/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/RECORD b/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..43bafd33bdd5a47bcd27930886cfd95e3fd92562 --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/RECORD @@ -0,0 +1,29 @@ +h11-0.16.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +h11-0.16.0.dist-info/METADATA,sha256=KPMmCYrAn8unm48YD5YIfIQf4kViFct7hyqcfVzRnWQ,8348 +h11-0.16.0.dist-info/RECORD,, +h11-0.16.0.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91 +h11-0.16.0.dist-info/licenses/LICENSE.txt,sha256=N9tbuFkm2yikJ6JYZ_ELEjIAOuob5pzLhRE4rbjm82E,1124 +h11-0.16.0.dist-info/top_level.txt,sha256=F7dC4jl3zeh8TGHEPaWJrMbeuoWbS379Gwdi-Yvdcis,4 +h11/__init__.py,sha256=iO1KzkSO42yZ6ffg-VMgbx_ZVTWGUY00nRYEWn-s3kY,1507 +h11/__pycache__/__init__.cpython-310.pyc,, +h11/__pycache__/_abnf.cpython-310.pyc,, +h11/__pycache__/_connection.cpython-310.pyc,, +h11/__pycache__/_events.cpython-310.pyc,, +h11/__pycache__/_headers.cpython-310.pyc,, +h11/__pycache__/_readers.cpython-310.pyc,, +h11/__pycache__/_receivebuffer.cpython-310.pyc,, +h11/__pycache__/_state.cpython-310.pyc,, +h11/__pycache__/_util.cpython-310.pyc,, +h11/__pycache__/_version.cpython-310.pyc,, +h11/__pycache__/_writers.cpython-310.pyc,, +h11/_abnf.py,sha256=ybixr0xsupnkA6GFAyMubuXF6Tc1lb_hF890NgCsfNc,4815 +h11/_connection.py,sha256=k9YRVf6koZqbttBW36xSWaJpWdZwa-xQVU9AHEo9DuI,26863 +h11/_events.py,sha256=I97aXoal1Wu7dkL548BANBUCkOIbe-x5CioYA9IBY14,11792 +h11/_headers.py,sha256=P7D-lBNxHwdLZPLimmYwrPG-9ZkjElvvJZJdZAgSP-4,10412 +h11/_readers.py,sha256=a4RypORUCC3d0q_kxPuBIM7jTD8iLt5X91TH0FsduN4,8590 +h11/_receivebuffer.py,sha256=xrspsdsNgWFxRfQcTXxR8RrdjRXXTK0Io5cQYWpJ1Ws,5252 +h11/_state.py,sha256=_5LG_BGR8FCcFQeBPH-TMHgm_-B-EUcWCnQof_9XjFE,13231 +h11/_util.py,sha256=LWkkjXyJaFlAy6Lt39w73UStklFT5ovcvo0TkY7RYuk,4888 +h11/_version.py,sha256=GVSsbPSPDcOuF6ptfIiXnVJoaEm3ygXbMnqlr_Giahw,686 +h11/_writers.py,sha256=oFKm6PtjeHfbj4RLX7VB7KDc1gIY53gXG3_HR9ltmTA,5081 +h11/py.typed,sha256=sow9soTwP9T_gEAQSVh7Gb8855h04Nwmhs2We-JRgZM,7 diff --git a/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/WHEEL b/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..1eb3c49d99559863120cfb8433fc8738fba43ba9 --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: setuptools (78.1.0) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/licenses/LICENSE.txt b/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..8f080eae848f759c9173bfc0c79506357ebe5090 --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/licenses/LICENSE.txt @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2016 Nathaniel J. Smith and other contributors + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/top_level.txt b/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..0d24def711344ec6f4da2108f7d5c9261eb35f8b --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11-0.16.0.dist-info/top_level.txt @@ -0,0 +1 @@ +h11 diff --git a/venv/lib/python3.10/site-packages/h11/__init__.py b/venv/lib/python3.10/site-packages/h11/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..989e92c3458681a6f0be72ae4105ea742750d328 --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11/__init__.py @@ -0,0 +1,62 @@ +# A highish-level implementation of the HTTP/1.1 wire protocol (RFC 7230), +# containing no networking code at all, loosely modelled on hyper-h2's generic +# implementation of HTTP/2 (and in particular the h2.connection.H2Connection +# class). There's still a bunch of subtle details you need to get right if you +# want to make this actually useful, because it doesn't implement all the +# semantics to check that what you're asking to write to the wire is sensible, +# but at least it gets you out of dealing with the wire itself. + +from h11._connection import Connection, NEED_DATA, PAUSED +from h11._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from h11._state import ( + CLIENT, + CLOSED, + DONE, + ERROR, + IDLE, + MIGHT_SWITCH_PROTOCOL, + MUST_CLOSE, + SEND_BODY, + SEND_RESPONSE, + SERVER, + SWITCHED_PROTOCOL, +) +from h11._util import LocalProtocolError, ProtocolError, RemoteProtocolError +from h11._version import __version__ + +PRODUCT_ID = "python-h11/" + __version__ + + +__all__ = ( + "Connection", + "NEED_DATA", + "PAUSED", + "ConnectionClosed", + "Data", + "EndOfMessage", + "Event", + "InformationalResponse", + "Request", + "Response", + "CLIENT", + "CLOSED", + "DONE", + "ERROR", + "IDLE", + "MUST_CLOSE", + "SEND_BODY", + "SEND_RESPONSE", + "SERVER", + "SWITCHED_PROTOCOL", + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", +) diff --git a/venv/lib/python3.10/site-packages/h11/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/h11/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e3ab9c2bccbc1247c361432a859316e7577396bc Binary files /dev/null and b/venv/lib/python3.10/site-packages/h11/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/h11/__pycache__/_abnf.cpython-310.pyc b/venv/lib/python3.10/site-packages/h11/__pycache__/_abnf.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e15d620020bc5b474403e9802fb08890c55685c Binary files /dev/null and b/venv/lib/python3.10/site-packages/h11/__pycache__/_abnf.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/h11/__pycache__/_connection.cpython-310.pyc b/venv/lib/python3.10/site-packages/h11/__pycache__/_connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d73e6c6a3f520ea136e8c1b405c4f389cd31f04 Binary files /dev/null and b/venv/lib/python3.10/site-packages/h11/__pycache__/_connection.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/h11/__pycache__/_events.cpython-310.pyc b/venv/lib/python3.10/site-packages/h11/__pycache__/_events.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e39f48ecd1ed135e5742f3147dbcd0d04b0c4c8a Binary files /dev/null and b/venv/lib/python3.10/site-packages/h11/__pycache__/_events.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/h11/__pycache__/_headers.cpython-310.pyc b/venv/lib/python3.10/site-packages/h11/__pycache__/_headers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54c6ed7e01f4314bf67915b42db0ca7e01fbc3ec Binary files /dev/null and b/venv/lib/python3.10/site-packages/h11/__pycache__/_headers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/h11/__pycache__/_readers.cpython-310.pyc b/venv/lib/python3.10/site-packages/h11/__pycache__/_readers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54584e6ccbba1088dd9c4d4316d7f246c61fc5d7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/h11/__pycache__/_readers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/h11/__pycache__/_receivebuffer.cpython-310.pyc b/venv/lib/python3.10/site-packages/h11/__pycache__/_receivebuffer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9288526f727b4efaa9699041fda1eaaa1b4a3d7f Binary files /dev/null and b/venv/lib/python3.10/site-packages/h11/__pycache__/_receivebuffer.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/h11/__pycache__/_state.cpython-310.pyc b/venv/lib/python3.10/site-packages/h11/__pycache__/_state.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6669261aa28dc73afecfe3281b578808d6fa050 Binary files /dev/null and b/venv/lib/python3.10/site-packages/h11/__pycache__/_state.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/h11/__pycache__/_util.cpython-310.pyc b/venv/lib/python3.10/site-packages/h11/__pycache__/_util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7c5e7b5452a03ae8d7a2cc6a149976e6185e785 Binary files /dev/null and b/venv/lib/python3.10/site-packages/h11/__pycache__/_util.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/h11/__pycache__/_version.cpython-310.pyc b/venv/lib/python3.10/site-packages/h11/__pycache__/_version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efbed5a68086c66a2b79f4e08820800ce0938a94 Binary files /dev/null and b/venv/lib/python3.10/site-packages/h11/__pycache__/_version.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/h11/__pycache__/_writers.cpython-310.pyc b/venv/lib/python3.10/site-packages/h11/__pycache__/_writers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d885cac2042298b0dd25066e8057292f910c0a71 Binary files /dev/null and b/venv/lib/python3.10/site-packages/h11/__pycache__/_writers.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/h11/_abnf.py b/venv/lib/python3.10/site-packages/h11/_abnf.py new file mode 100644 index 0000000000000000000000000000000000000000..933587fba22290d7eb7df4c88e12f1e61702b8ce --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11/_abnf.py @@ -0,0 +1,132 @@ +# We use native strings for all the re patterns, to take advantage of string +# formatting, and then convert to bytestrings when compiling the final re +# objects. + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#whitespace +# OWS = *( SP / HTAB ) +# ; optional whitespace +OWS = r"[ \t]*" + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.token.separators +# token = 1*tchar +# +# tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*" +# / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~" +# / DIGIT / ALPHA +# ; any VCHAR, except delimiters +token = r"[-!#$%&'*+.^_`|~0-9a-zA-Z]+" + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#header.fields +# field-name = token +field_name = token + +# The standard says: +# +# field-value = *( field-content / obs-fold ) +# field-content = field-vchar [ 1*( SP / HTAB ) field-vchar ] +# field-vchar = VCHAR / obs-text +# obs-fold = CRLF 1*( SP / HTAB ) +# ; obsolete line folding +# ; see Section 3.2.4 +# +# https://tools.ietf.org/html/rfc5234#appendix-B.1 +# +# VCHAR = %x21-7E +# ; visible (printing) characters +# +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#rule.quoted-string +# obs-text = %x80-FF +# +# However, the standard definition of field-content is WRONG! It disallows +# fields containing a single visible character surrounded by whitespace, +# e.g. "foo a bar". +# +# See: https://www.rfc-editor.org/errata_search.php?rfc=7230&eid=4189 +# +# So our definition of field_content attempts to fix it up... +# +# Also, we allow lots of control characters, because apparently people assume +# that they're legal in practice (e.g., google analytics makes cookies with +# \x01 in them!): +# https://github.com/python-hyper/h11/issues/57 +# We still don't allow NUL or whitespace, because those are often treated as +# meta-characters and letting them through can lead to nasty issues like SSRF. +vchar = r"[\x21-\x7e]" +vchar_or_obs_text = r"[^\x00\s]" +field_vchar = vchar_or_obs_text +field_content = r"{field_vchar}+(?:[ \t]+{field_vchar}+)*".format(**globals()) + +# We handle obs-fold at a different level, and our fixed-up field_content +# already grows to swallow the whole value, so ? instead of * +field_value = r"({field_content})?".format(**globals()) + +# header-field = field-name ":" OWS field-value OWS +header_field = ( + r"(?P{field_name})" + r":" + r"{OWS}" + r"(?P{field_value})" + r"{OWS}".format(**globals()) +) + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#request.line +# +# request-line = method SP request-target SP HTTP-version CRLF +# method = token +# HTTP-version = HTTP-name "/" DIGIT "." DIGIT +# HTTP-name = %x48.54.54.50 ; "HTTP", case-sensitive +# +# request-target is complicated (see RFC 7230 sec 5.3) -- could be path, full +# URL, host+port (for connect), or even "*", but in any case we are guaranteed +# that it contists of the visible printing characters. +method = token +request_target = r"{vchar}+".format(**globals()) +http_version = r"HTTP/(?P[0-9]\.[0-9])" +request_line = ( + r"(?P{method})" + r" " + r"(?P{request_target})" + r" " + r"{http_version}".format(**globals()) +) + +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#status.line +# +# status-line = HTTP-version SP status-code SP reason-phrase CRLF +# status-code = 3DIGIT +# reason-phrase = *( HTAB / SP / VCHAR / obs-text ) +status_code = r"[0-9]{3}" +reason_phrase = r"([ \t]|{vchar_or_obs_text})*".format(**globals()) +status_line = ( + r"{http_version}" + r" " + r"(?P{status_code})" + # However, there are apparently a few too many servers out there that just + # leave out the reason phrase: + # https://github.com/scrapy/scrapy/issues/345#issuecomment-281756036 + # https://github.com/seanmonstar/httparse/issues/29 + # so make it optional. ?: is a non-capturing group. + r"(?: (?P{reason_phrase}))?".format(**globals()) +) + +HEXDIG = r"[0-9A-Fa-f]" +# Actually +# +# chunk-size = 1*HEXDIG +# +# but we impose an upper-limit to avoid ridiculosity. len(str(2**64)) == 20 +chunk_size = r"({HEXDIG}){{1,20}}".format(**globals()) +# Actually +# +# chunk-ext = *( ";" chunk-ext-name [ "=" chunk-ext-val ] ) +# +# but we aren't parsing the things so we don't really care. +chunk_ext = r";.*" +chunk_header = ( + r"(?P{chunk_size})" + r"(?P{chunk_ext})?" + r"{OWS}\r\n".format( + **globals() + ) # Even though the specification does not allow for extra whitespaces, + # we are lenient with trailing whitespaces because some servers on the wild use it. +) diff --git a/venv/lib/python3.10/site-packages/h11/_connection.py b/venv/lib/python3.10/site-packages/h11/_connection.py new file mode 100644 index 0000000000000000000000000000000000000000..e37d82a82a882c072cb938a90eb4486b51cdad99 --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11/_connection.py @@ -0,0 +1,659 @@ +# This contains the main Connection class. Everything in h11 revolves around +# this. +from typing import ( + Any, + Callable, + cast, + Dict, + List, + Optional, + overload, + Tuple, + Type, + Union, +) + +from ._events import ( + ConnectionClosed, + Data, + EndOfMessage, + Event, + InformationalResponse, + Request, + Response, +) +from ._headers import get_comma_header, has_expect_100_continue, set_comma_header +from ._readers import READERS, ReadersType +from ._receivebuffer import ReceiveBuffer +from ._state import ( + _SWITCH_CONNECT, + _SWITCH_UPGRADE, + CLIENT, + ConnectionState, + DONE, + ERROR, + MIGHT_SWITCH_PROTOCOL, + SEND_BODY, + SERVER, + SWITCHED_PROTOCOL, +) +from ._util import ( # Import the internal things we need + LocalProtocolError, + RemoteProtocolError, + Sentinel, +) +from ._writers import WRITERS, WritersType + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = ["Connection", "NEED_DATA", "PAUSED"] + + +class NEED_DATA(Sentinel, metaclass=Sentinel): + pass + + +class PAUSED(Sentinel, metaclass=Sentinel): + pass + + +# If we ever have this much buffered without it making a complete parseable +# event, we error out. The only time we really buffer is when reading the +# request/response line + headers together, so this is effectively the limit on +# the size of that. +# +# Some precedents for defaults: +# - node.js: 80 * 1024 +# - tomcat: 8 * 1024 +# - IIS: 16 * 1024 +# - Apache: <8 KiB per line> +DEFAULT_MAX_INCOMPLETE_EVENT_SIZE = 16 * 1024 + + +# RFC 7230's rules for connection lifecycles: +# - If either side says they want to close the connection, then the connection +# must close. +# - HTTP/1.1 defaults to keep-alive unless someone says Connection: close +# - HTTP/1.0 defaults to close unless both sides say Connection: keep-alive +# (and even this is a mess -- e.g. if you're implementing a proxy then +# sending Connection: keep-alive is forbidden). +# +# We simplify life by simply not supporting keep-alive with HTTP/1.0 peers. So +# our rule is: +# - If someone says Connection: close, we will close +# - If someone uses HTTP/1.0, we will close. +def _keep_alive(event: Union[Request, Response]) -> bool: + connection = get_comma_header(event.headers, b"connection") + if b"close" in connection: + return False + if getattr(event, "http_version", b"1.1") < b"1.1": + return False + return True + + +def _body_framing( + request_method: bytes, event: Union[Request, Response] +) -> Tuple[str, Union[Tuple[()], Tuple[int]]]: + # Called when we enter SEND_BODY to figure out framing information for + # this body. + # + # These are the only two events that can trigger a SEND_BODY state: + assert type(event) in (Request, Response) + # Returns one of: + # + # ("content-length", count) + # ("chunked", ()) + # ("http/1.0", ()) + # + # which are (lookup key, *args) for constructing body reader/writer + # objects. + # + # Reference: https://tools.ietf.org/html/rfc7230#section-3.3.3 + # + # Step 1: some responses always have an empty body, regardless of what the + # headers say. + if type(event) is Response: + if ( + event.status_code in (204, 304) + or request_method == b"HEAD" + or (request_method == b"CONNECT" and 200 <= event.status_code < 300) + ): + return ("content-length", (0,)) + # Section 3.3.3 also lists another case -- responses with status_code + # < 200. For us these are InformationalResponses, not Responses, so + # they can't get into this function in the first place. + assert event.status_code >= 200 + + # Step 2: check for Transfer-Encoding (T-E beats C-L): + transfer_encodings = get_comma_header(event.headers, b"transfer-encoding") + if transfer_encodings: + assert transfer_encodings == [b"chunked"] + return ("chunked", ()) + + # Step 3: check for Content-Length + content_lengths = get_comma_header(event.headers, b"content-length") + if content_lengths: + return ("content-length", (int(content_lengths[0]),)) + + # Step 4: no applicable headers; fallback/default depends on type + if type(event) is Request: + return ("content-length", (0,)) + else: + return ("http/1.0", ()) + + +################################################################ +# +# The main Connection class +# +################################################################ + + +class Connection: + """An object encapsulating the state of an HTTP connection. + + Args: + our_role: If you're implementing a client, pass :data:`h11.CLIENT`. If + you're implementing a server, pass :data:`h11.SERVER`. + + max_incomplete_event_size (int): + The maximum number of bytes we're willing to buffer of an + incomplete event. In practice this mostly sets a limit on the + maximum size of the request/response line + headers. If this is + exceeded, then :meth:`next_event` will raise + :exc:`RemoteProtocolError`. + + """ + + def __init__( + self, + our_role: Type[Sentinel], + max_incomplete_event_size: int = DEFAULT_MAX_INCOMPLETE_EVENT_SIZE, + ) -> None: + self._max_incomplete_event_size = max_incomplete_event_size + # State and role tracking + if our_role not in (CLIENT, SERVER): + raise ValueError(f"expected CLIENT or SERVER, not {our_role!r}") + self.our_role = our_role + self.their_role: Type[Sentinel] + if our_role is CLIENT: + self.their_role = SERVER + else: + self.their_role = CLIENT + self._cstate = ConnectionState() + + # Callables for converting data->events or vice-versa given the + # current state + self._writer = self._get_io_object(self.our_role, None, WRITERS) + self._reader = self._get_io_object(self.their_role, None, READERS) + + # Holds any unprocessed received data + self._receive_buffer = ReceiveBuffer() + # If this is true, then it indicates that the incoming connection was + # closed *after* the end of whatever's in self._receive_buffer: + self._receive_buffer_closed = False + + # Extra bits of state that don't fit into the state machine. + # + # These two are only used to interpret framing headers for figuring + # out how to read/write response bodies. their_http_version is also + # made available as a convenient public API. + self.their_http_version: Optional[bytes] = None + self._request_method: Optional[bytes] = None + # This is pure flow-control and doesn't at all affect the set of legal + # transitions, so no need to bother ConnectionState with it: + self.client_is_waiting_for_100_continue = False + + @property + def states(self) -> Dict[Type[Sentinel], Type[Sentinel]]: + """A dictionary like:: + + {CLIENT: , SERVER: } + + See :ref:`state-machine` for details. + + """ + return dict(self._cstate.states) + + @property + def our_state(self) -> Type[Sentinel]: + """The current state of whichever role we are playing. See + :ref:`state-machine` for details. + """ + return self._cstate.states[self.our_role] + + @property + def their_state(self) -> Type[Sentinel]: + """The current state of whichever role we are NOT playing. See + :ref:`state-machine` for details. + """ + return self._cstate.states[self.their_role] + + @property + def they_are_waiting_for_100_continue(self) -> bool: + return self.their_role is CLIENT and self.client_is_waiting_for_100_continue + + def start_next_cycle(self) -> None: + """Attempt to reset our connection state for a new request/response + cycle. + + If both client and server are in :data:`DONE` state, then resets them + both to :data:`IDLE` state in preparation for a new request/response + cycle on this same connection. Otherwise, raises a + :exc:`LocalProtocolError`. + + See :ref:`keepalive-and-pipelining`. + + """ + old_states = dict(self._cstate.states) + self._cstate.start_next_cycle() + self._request_method = None + # self.their_http_version gets left alone, since it presumably lasts + # beyond a single request/response cycle + assert not self.client_is_waiting_for_100_continue + self._respond_to_state_changes(old_states) + + def _process_error(self, role: Type[Sentinel]) -> None: + old_states = dict(self._cstate.states) + self._cstate.process_error(role) + self._respond_to_state_changes(old_states) + + def _server_switch_event(self, event: Event) -> Optional[Type[Sentinel]]: + if type(event) is InformationalResponse and event.status_code == 101: + return _SWITCH_UPGRADE + if type(event) is Response: + if ( + _SWITCH_CONNECT in self._cstate.pending_switch_proposals + and 200 <= event.status_code < 300 + ): + return _SWITCH_CONNECT + return None + + # All events go through here + def _process_event(self, role: Type[Sentinel], event: Event) -> None: + # First, pass the event through the state machine to make sure it + # succeeds. + old_states = dict(self._cstate.states) + if role is CLIENT and type(event) is Request: + if event.method == b"CONNECT": + self._cstate.process_client_switch_proposal(_SWITCH_CONNECT) + if get_comma_header(event.headers, b"upgrade"): + self._cstate.process_client_switch_proposal(_SWITCH_UPGRADE) + server_switch_event = None + if role is SERVER: + server_switch_event = self._server_switch_event(event) + self._cstate.process_event(role, type(event), server_switch_event) + + # Then perform the updates triggered by it. + + if type(event) is Request: + self._request_method = event.method + + if role is self.their_role and type(event) in ( + Request, + Response, + InformationalResponse, + ): + event = cast(Union[Request, Response, InformationalResponse], event) + self.their_http_version = event.http_version + + # Keep alive handling + # + # RFC 7230 doesn't really say what one should do if Connection: close + # shows up on a 1xx InformationalResponse. I think the idea is that + # this is not supposed to happen. In any case, if it does happen, we + # ignore it. + if type(event) in (Request, Response) and not _keep_alive( + cast(Union[Request, Response], event) + ): + self._cstate.process_keep_alive_disabled() + + # 100-continue + if type(event) is Request and has_expect_100_continue(event): + self.client_is_waiting_for_100_continue = True + if type(event) in (InformationalResponse, Response): + self.client_is_waiting_for_100_continue = False + if role is CLIENT and type(event) in (Data, EndOfMessage): + self.client_is_waiting_for_100_continue = False + + self._respond_to_state_changes(old_states, event) + + def _get_io_object( + self, + role: Type[Sentinel], + event: Optional[Event], + io_dict: Union[ReadersType, WritersType], + ) -> Optional[Callable[..., Any]]: + # event may be None; it's only used when entering SEND_BODY + state = self._cstate.states[role] + if state is SEND_BODY: + # Special case: the io_dict has a dict of reader/writer factories + # that depend on the request/response framing. + framing_type, args = _body_framing( + cast(bytes, self._request_method), cast(Union[Request, Response], event) + ) + return io_dict[SEND_BODY][framing_type](*args) # type: ignore[index] + else: + # General case: the io_dict just has the appropriate reader/writer + # for this state + return io_dict.get((role, state)) # type: ignore[return-value] + + # This must be called after any action that might have caused + # self._cstate.states to change. + def _respond_to_state_changes( + self, + old_states: Dict[Type[Sentinel], Type[Sentinel]], + event: Optional[Event] = None, + ) -> None: + # Update reader/writer + if self.our_state != old_states[self.our_role]: + self._writer = self._get_io_object(self.our_role, event, WRITERS) + if self.their_state != old_states[self.their_role]: + self._reader = self._get_io_object(self.their_role, event, READERS) + + @property + def trailing_data(self) -> Tuple[bytes, bool]: + """Data that has been received, but not yet processed, represented as + a tuple with two elements, where the first is a byte-string containing + the unprocessed data itself, and the second is a bool that is True if + the receive connection was closed. + + See :ref:`switching-protocols` for discussion of why you'd want this. + """ + return (bytes(self._receive_buffer), self._receive_buffer_closed) + + def receive_data(self, data: bytes) -> None: + """Add data to our internal receive buffer. + + This does not actually do any processing on the data, just stores + it. To trigger processing, you have to call :meth:`next_event`. + + Args: + data (:term:`bytes-like object`): + The new data that was just received. + + Special case: If *data* is an empty byte-string like ``b""``, + then this indicates that the remote side has closed the + connection (end of file). Normally this is convenient, because + standard Python APIs like :meth:`file.read` or + :meth:`socket.recv` use ``b""`` to indicate end-of-file, while + other failures to read are indicated using other mechanisms + like raising :exc:`TimeoutError`. When using such an API you + can just blindly pass through whatever you get from ``read`` + to :meth:`receive_data`, and everything will work. + + But, if you have an API where reading an empty string is a + valid non-EOF condition, then you need to be aware of this and + make sure to check for such strings and avoid passing them to + :meth:`receive_data`. + + Returns: + Nothing, but after calling this you should call :meth:`next_event` + to parse the newly received data. + + Raises: + RuntimeError: + Raised if you pass an empty *data*, indicating EOF, and then + pass a non-empty *data*, indicating more data that somehow + arrived after the EOF. + + (Calling ``receive_data(b"")`` multiple times is fine, + and equivalent to calling it once.) + + """ + if data: + if self._receive_buffer_closed: + raise RuntimeError("received close, then received more data?") + self._receive_buffer += data + else: + self._receive_buffer_closed = True + + def _extract_next_receive_event( + self, + ) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: + state = self.their_state + # We don't pause immediately when they enter DONE, because even in + # DONE state we can still process a ConnectionClosed() event. But + # if we have data in our buffer, then we definitely aren't getting + # a ConnectionClosed() immediately and we need to pause. + if state is DONE and self._receive_buffer: + return PAUSED + if state is MIGHT_SWITCH_PROTOCOL or state is SWITCHED_PROTOCOL: + return PAUSED + assert self._reader is not None + event = self._reader(self._receive_buffer) + if event is None: + if not self._receive_buffer and self._receive_buffer_closed: + # In some unusual cases (basically just HTTP/1.0 bodies), EOF + # triggers an actual protocol event; in that case, we want to + # return that event, and then the state will change and we'll + # get called again to generate the actual ConnectionClosed(). + if hasattr(self._reader, "read_eof"): + event = self._reader.read_eof() + else: + event = ConnectionClosed() + if event is None: + event = NEED_DATA + return event # type: ignore[no-any-return] + + def next_event(self) -> Union[Event, Type[NEED_DATA], Type[PAUSED]]: + """Parse the next event out of our receive buffer, update our internal + state, and return it. + + This is a mutating operation -- think of it like calling :func:`next` + on an iterator. + + Returns: + : One of three things: + + 1) An event object -- see :ref:`events`. + + 2) The special constant :data:`NEED_DATA`, which indicates that + you need to read more data from your socket and pass it to + :meth:`receive_data` before this method will be able to return + any more events. + + 3) The special constant :data:`PAUSED`, which indicates that we + are not in a state where we can process incoming data (usually + because the peer has finished their part of the current + request/response cycle, and you have not yet called + :meth:`start_next_cycle`). See :ref:`flow-control` for details. + + Raises: + RemoteProtocolError: + The peer has misbehaved. You should close the connection + (possibly after sending some kind of 4xx response). + + Once this method returns :class:`ConnectionClosed` once, then all + subsequent calls will also return :class:`ConnectionClosed`. + + If this method raises any exception besides :exc:`RemoteProtocolError` + then that's a bug -- if it happens please file a bug report! + + If this method raises any exception then it also sets + :attr:`Connection.their_state` to :data:`ERROR` -- see + :ref:`error-handling` for discussion. + + """ + + if self.their_state is ERROR: + raise RemoteProtocolError("Can't receive data when peer state is ERROR") + try: + event = self._extract_next_receive_event() + if event not in [NEED_DATA, PAUSED]: + self._process_event(self.their_role, cast(Event, event)) + if event is NEED_DATA: + if len(self._receive_buffer) > self._max_incomplete_event_size: + # 431 is "Request header fields too large" which is pretty + # much the only situation where we can get here + raise RemoteProtocolError( + "Receive buffer too long", error_status_hint=431 + ) + if self._receive_buffer_closed: + # We're still trying to complete some event, but that's + # never going to happen because no more data is coming + raise RemoteProtocolError("peer unexpectedly closed connection") + return event + except BaseException as exc: + self._process_error(self.their_role) + if isinstance(exc, LocalProtocolError): + exc._reraise_as_remote_protocol_error() + else: + raise + + @overload + def send(self, event: ConnectionClosed) -> None: + ... + + @overload + def send( + self, event: Union[Request, InformationalResponse, Response, Data, EndOfMessage] + ) -> bytes: + ... + + @overload + def send(self, event: Event) -> Optional[bytes]: + ... + + def send(self, event: Event) -> Optional[bytes]: + """Convert a high-level event into bytes that can be sent to the peer, + while updating our internal state machine. + + Args: + event: The :ref:`event ` to send. + + Returns: + If ``type(event) is ConnectionClosed``, then returns + ``None``. Otherwise, returns a :term:`bytes-like object`. + + Raises: + LocalProtocolError: + Sending this event at this time would violate our + understanding of the HTTP/1.1 protocol. + + If this method raises any exception then it also sets + :attr:`Connection.our_state` to :data:`ERROR` -- see + :ref:`error-handling` for discussion. + + """ + data_list = self.send_with_data_passthrough(event) + if data_list is None: + return None + else: + return b"".join(data_list) + + def send_with_data_passthrough(self, event: Event) -> Optional[List[bytes]]: + """Identical to :meth:`send`, except that in situations where + :meth:`send` returns a single :term:`bytes-like object`, this instead + returns a list of them -- and when sending a :class:`Data` event, this + list is guaranteed to contain the exact object you passed in as + :attr:`Data.data`. See :ref:`sendfile` for discussion. + + """ + if self.our_state is ERROR: + raise LocalProtocolError("Can't send data when our state is ERROR") + try: + if type(event) is Response: + event = self._clean_up_response_headers_for_sending(event) + # We want to call _process_event before calling the writer, + # because if someone tries to do something invalid then this will + # give a sensible error message, while our writers all just assume + # they will only receive valid events. But, _process_event might + # change self._writer. So we have to do a little dance: + writer = self._writer + self._process_event(self.our_role, event) + if type(event) is ConnectionClosed: + return None + else: + # In any situation where writer is None, process_event should + # have raised ProtocolError + assert writer is not None + data_list: List[bytes] = [] + writer(event, data_list.append) + return data_list + except: + self._process_error(self.our_role) + raise + + def send_failed(self) -> None: + """Notify the state machine that we failed to send the data it gave + us. + + This causes :attr:`Connection.our_state` to immediately become + :data:`ERROR` -- see :ref:`error-handling` for discussion. + + """ + self._process_error(self.our_role) + + # When sending a Response, we take responsibility for a few things: + # + # - Sometimes you MUST set Connection: close. We take care of those + # times. (You can also set it yourself if you want, and if you do then + # we'll respect that and close the connection at the right time. But you + # don't have to worry about that unless you want to.) + # + # - The user has to set Content-Length if they want it. Otherwise, for + # responses that have bodies (e.g. not HEAD), then we will automatically + # select the right mechanism for streaming a body of unknown length, + # which depends on depending on the peer's HTTP version. + # + # This function's *only* responsibility is making sure headers are set up + # right -- everything downstream just looks at the headers. There are no + # side channels. + def _clean_up_response_headers_for_sending(self, response: Response) -> Response: + assert type(response) is Response + + headers = response.headers + need_close = False + + # HEAD requests need some special handling: they always act like they + # have Content-Length: 0, and that's how _body_framing treats + # them. But their headers are supposed to match what we would send if + # the request was a GET. (Technically there is one deviation allowed: + # we're allowed to leave out the framing headers -- see + # https://tools.ietf.org/html/rfc7231#section-4.3.2 . But it's just as + # easy to get them right.) + method_for_choosing_headers = cast(bytes, self._request_method) + if method_for_choosing_headers == b"HEAD": + method_for_choosing_headers = b"GET" + framing_type, _ = _body_framing(method_for_choosing_headers, response) + if framing_type in ("chunked", "http/1.0"): + # This response has a body of unknown length. + # If our peer is HTTP/1.1, we use Transfer-Encoding: chunked + # If our peer is HTTP/1.0, we use no framing headers, and close the + # connection afterwards. + # + # Make sure to clear Content-Length (in principle user could have + # set both and then we ignored Content-Length b/c + # Transfer-Encoding overwrote it -- this would be naughty of them, + # but the HTTP spec says that if our peer does this then we have + # to fix it instead of erroring out, so we'll accord the user the + # same respect). + headers = set_comma_header(headers, b"content-length", []) + if self.their_http_version is None or self.their_http_version < b"1.1": + # Either we never got a valid request and are sending back an + # error (their_http_version is None), so we assume the worst; + # or else we did get a valid HTTP/1.0 request, so we know that + # they don't understand chunked encoding. + headers = set_comma_header(headers, b"transfer-encoding", []) + # This is actually redundant ATM, since currently we + # unconditionally disable keep-alive when talking to HTTP/1.0 + # peers. But let's be defensive just in case we add + # Connection: keep-alive support later: + if self._request_method != b"HEAD": + need_close = True + else: + headers = set_comma_header(headers, b"transfer-encoding", [b"chunked"]) + + if not self._cstate.keep_alive or need_close: + # Make sure Connection: close is set + connection = set(get_comma_header(headers, b"connection")) + connection.discard(b"keep-alive") + connection.add(b"close") + headers = set_comma_header(headers, b"connection", sorted(connection)) + + return Response( + headers=headers, + status_code=response.status_code, + http_version=response.http_version, + reason=response.reason, + ) diff --git a/venv/lib/python3.10/site-packages/h11/_events.py b/venv/lib/python3.10/site-packages/h11/_events.py new file mode 100644 index 0000000000000000000000000000000000000000..ca1c3adbde2c4e7710482a18e3471f91f1da610e --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11/_events.py @@ -0,0 +1,369 @@ +# High level events that make up HTTP/1.1 conversations. Loosely inspired by +# the corresponding events in hyper-h2: +# +# http://python-hyper.org/h2/en/stable/api.html#events +# +# Don't subclass these. Stuff will break. + +import re +from abc import ABC +from dataclasses import dataclass +from typing import List, Tuple, Union + +from ._abnf import method, request_target +from ._headers import Headers, normalize_and_validate +from ._util import bytesify, LocalProtocolError, validate + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = [ + "Event", + "Request", + "InformationalResponse", + "Response", + "Data", + "EndOfMessage", + "ConnectionClosed", +] + +method_re = re.compile(method.encode("ascii")) +request_target_re = re.compile(request_target.encode("ascii")) + + +class Event(ABC): + """ + Base class for h11 events. + """ + + __slots__ = () + + +@dataclass(init=False, frozen=True) +class Request(Event): + """The beginning of an HTTP request. + + Fields: + + .. attribute:: method + + An HTTP method, e.g. ``b"GET"`` or ``b"POST"``. Always a byte + string. :term:`Bytes-like objects ` and native + strings containing only ascii characters will be automatically + converted to byte strings. + + .. attribute:: target + + The target of an HTTP request, e.g. ``b"/index.html"``, or one of the + more exotic formats described in `RFC 7320, section 5.3 + `_. Always a byte + string. :term:`Bytes-like objects ` and native + strings containing only ascii characters will be automatically + converted to byte strings. + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + """ + + __slots__ = ("method", "headers", "target", "http_version") + + method: bytes + headers: Headers + target: bytes + http_version: bytes + + def __init__( + self, + *, + method: Union[bytes, str], + headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]], + target: Union[bytes, str], + http_version: Union[bytes, str] = b"1.1", + _parsed: bool = False, + ) -> None: + super().__init__() + if isinstance(headers, Headers): + object.__setattr__(self, "headers", headers) + else: + object.__setattr__( + self, "headers", normalize_and_validate(headers, _parsed=_parsed) + ) + if not _parsed: + object.__setattr__(self, "method", bytesify(method)) + object.__setattr__(self, "target", bytesify(target)) + object.__setattr__(self, "http_version", bytesify(http_version)) + else: + object.__setattr__(self, "method", method) + object.__setattr__(self, "target", target) + object.__setattr__(self, "http_version", http_version) + + # "A server MUST respond with a 400 (Bad Request) status code to any + # HTTP/1.1 request message that lacks a Host header field and to any + # request message that contains more than one Host header field or a + # Host header field with an invalid field-value." + # -- https://tools.ietf.org/html/rfc7230#section-5.4 + host_count = 0 + for name, value in self.headers: + if name == b"host": + host_count += 1 + if self.http_version == b"1.1" and host_count == 0: + raise LocalProtocolError("Missing mandatory Host: header") + if host_count > 1: + raise LocalProtocolError("Found multiple Host: headers") + + validate(method_re, self.method, "Illegal method characters") + validate(request_target_re, self.target, "Illegal target characters") + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class _ResponseBase(Event): + __slots__ = ("headers", "http_version", "reason", "status_code") + + headers: Headers + http_version: bytes + reason: bytes + status_code: int + + def __init__( + self, + *, + headers: Union[Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]]], + status_code: int, + http_version: Union[bytes, str] = b"1.1", + reason: Union[bytes, str] = b"", + _parsed: bool = False, + ) -> None: + super().__init__() + if isinstance(headers, Headers): + object.__setattr__(self, "headers", headers) + else: + object.__setattr__( + self, "headers", normalize_and_validate(headers, _parsed=_parsed) + ) + if not _parsed: + object.__setattr__(self, "reason", bytesify(reason)) + object.__setattr__(self, "http_version", bytesify(http_version)) + if not isinstance(status_code, int): + raise LocalProtocolError("status code must be integer") + # Because IntEnum objects are instances of int, but aren't + # duck-compatible (sigh), see gh-72. + object.__setattr__(self, "status_code", int(status_code)) + else: + object.__setattr__(self, "reason", reason) + object.__setattr__(self, "http_version", http_version) + object.__setattr__(self, "status_code", status_code) + + self.__post_init__() + + def __post_init__(self) -> None: + pass + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class InformationalResponse(_ResponseBase): + """An HTTP informational response. + + Fields: + + .. attribute:: status_code + + The status code of this response, as an integer. For an + :class:`InformationalResponse`, this is always in the range [100, + 200). + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for + details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + .. attribute:: reason + + The reason phrase of this response, as a byte string. For example: + ``b"OK"``, or ``b"Not Found"``. + + """ + + def __post_init__(self) -> None: + if not (100 <= self.status_code < 200): + raise LocalProtocolError( + "InformationalResponse status_code should be in range " + "[100, 200), not {}".format(self.status_code) + ) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class Response(_ResponseBase): + """The beginning of an HTTP response. + + Fields: + + .. attribute:: status_code + + The status code of this response, as an integer. For an + :class:`Response`, this is always in the range [200, + 1000). + + .. attribute:: headers + + Request headers, represented as a list of (name, value) pairs. See + :ref:`the header normalization rules ` for details. + + .. attribute:: http_version + + The HTTP protocol version, represented as a byte string like + ``b"1.1"``. See :ref:`the HTTP version normalization rules + ` for details. + + .. attribute:: reason + + The reason phrase of this response, as a byte string. For example: + ``b"OK"``, or ``b"Not Found"``. + + """ + + def __post_init__(self) -> None: + if not (200 <= self.status_code < 1000): + raise LocalProtocolError( + "Response status_code should be in range [200, 1000), not {}".format( + self.status_code + ) + ) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(init=False, frozen=True) +class Data(Event): + """Part of an HTTP message body. + + Fields: + + .. attribute:: data + + A :term:`bytes-like object` containing part of a message body. Or, if + using the ``combine=False`` argument to :meth:`Connection.send`, then + any object that your socket writing code knows what to do with, and for + which calling :func:`len` returns the number of bytes that will be + written -- see :ref:`sendfile` for details. + + .. attribute:: chunk_start + + A marker that indicates whether this data object is from the start of a + chunked transfer encoding chunk. This field is ignored when when a Data + event is provided to :meth:`Connection.send`: it is only valid on + events emitted from :meth:`Connection.next_event`. You probably + shouldn't use this attribute at all; see + :ref:`chunk-delimiters-are-bad` for details. + + .. attribute:: chunk_end + + A marker that indicates whether this data object is the last for a + given chunked transfer encoding chunk. This field is ignored when when + a Data event is provided to :meth:`Connection.send`: it is only valid + on events emitted from :meth:`Connection.next_event`. You probably + shouldn't use this attribute at all; see + :ref:`chunk-delimiters-are-bad` for details. + + """ + + __slots__ = ("data", "chunk_start", "chunk_end") + + data: bytes + chunk_start: bool + chunk_end: bool + + def __init__( + self, data: bytes, chunk_start: bool = False, chunk_end: bool = False + ) -> None: + object.__setattr__(self, "data", data) + object.__setattr__(self, "chunk_start", chunk_start) + object.__setattr__(self, "chunk_end", chunk_end) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +# XX FIXME: "A recipient MUST ignore (or consider as an error) any fields that +# are forbidden to be sent in a trailer, since processing them as if they were +# present in the header section might bypass external security filters." +# https://svn.tools.ietf.org/svn/wg/httpbis/specs/rfc7230.html#chunked.trailer.part +# Unfortunately, the list of forbidden fields is long and vague :-/ +@dataclass(init=False, frozen=True) +class EndOfMessage(Event): + """The end of an HTTP message. + + Fields: + + .. attribute:: headers + + Default value: ``[]`` + + Any trailing headers attached to this message, represented as a list of + (name, value) pairs. See :ref:`the header normalization rules + ` for details. + + Must be empty unless ``Transfer-Encoding: chunked`` is in use. + + """ + + __slots__ = ("headers",) + + headers: Headers + + def __init__( + self, + *, + headers: Union[ + Headers, List[Tuple[bytes, bytes]], List[Tuple[str, str]], None + ] = None, + _parsed: bool = False, + ) -> None: + super().__init__() + if headers is None: + headers = Headers([]) + elif not isinstance(headers, Headers): + headers = normalize_and_validate(headers, _parsed=_parsed) + + object.__setattr__(self, "headers", headers) + + # This is an unhashable type. + __hash__ = None # type: ignore + + +@dataclass(frozen=True) +class ConnectionClosed(Event): + """This event indicates that the sender has closed their outgoing + connection. + + Note that this does not necessarily mean that they can't *receive* further + data, because TCP connections are composed to two one-way channels which + can be closed independently. See :ref:`closing` for details. + + No fields. + """ + + pass diff --git a/venv/lib/python3.10/site-packages/h11/_headers.py b/venv/lib/python3.10/site-packages/h11/_headers.py new file mode 100644 index 0000000000000000000000000000000000000000..31da3e2b23b55a624b36f105e62a6902e63286aa --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11/_headers.py @@ -0,0 +1,282 @@ +import re +from typing import AnyStr, cast, List, overload, Sequence, Tuple, TYPE_CHECKING, Union + +from ._abnf import field_name, field_value +from ._util import bytesify, LocalProtocolError, validate + +if TYPE_CHECKING: + from ._events import Request + +try: + from typing import Literal +except ImportError: + from typing_extensions import Literal # type: ignore + +CONTENT_LENGTH_MAX_DIGITS = 20 # allow up to 1 billion TB - 1 + + +# Facts +# ----- +# +# Headers are: +# keys: case-insensitive ascii +# values: mixture of ascii and raw bytes +# +# "Historically, HTTP has allowed field content with text in the ISO-8859-1 +# charset [ISO-8859-1], supporting other charsets only through use of +# [RFC2047] encoding. In practice, most HTTP header field values use only a +# subset of the US-ASCII charset [USASCII]. Newly defined header fields SHOULD +# limit their field values to US-ASCII octets. A recipient SHOULD treat other +# octets in field content (obs-text) as opaque data." +# And it deprecates all non-ascii values +# +# Leading/trailing whitespace in header names is forbidden +# +# Values get leading/trailing whitespace stripped +# +# Content-Disposition actually needs to contain unicode semantically; to +# accomplish this it has a terrifically weird way of encoding the filename +# itself as ascii (and even this still has lots of cross-browser +# incompatibilities) +# +# Order is important: +# "a proxy MUST NOT change the order of these field values when forwarding a +# message" +# (and there are several headers where the order indicates a preference) +# +# Multiple occurences of the same header: +# "A sender MUST NOT generate multiple header fields with the same field name +# in a message unless either the entire field value for that header field is +# defined as a comma-separated list [or the header is Set-Cookie which gets a +# special exception]" - RFC 7230. (cookies are in RFC 6265) +# +# So every header aside from Set-Cookie can be merged by b", ".join if it +# occurs repeatedly. But, of course, they can't necessarily be split by +# .split(b","), because quoting. +# +# Given all this mess (case insensitive, duplicates allowed, order is +# important, ...), there doesn't appear to be any standard way to handle +# headers in Python -- they're almost like dicts, but... actually just +# aren't. For now we punt and just use a super simple representation: headers +# are a list of pairs +# +# [(name1, value1), (name2, value2), ...] +# +# where all entries are bytestrings, names are lowercase and have no +# leading/trailing whitespace, and values are bytestrings with no +# leading/trailing whitespace. Searching and updating are done via naive O(n) +# methods. +# +# Maybe a dict-of-lists would be better? + +_content_length_re = re.compile(rb"[0-9]+") +_field_name_re = re.compile(field_name.encode("ascii")) +_field_value_re = re.compile(field_value.encode("ascii")) + + +class Headers(Sequence[Tuple[bytes, bytes]]): + """ + A list-like interface that allows iterating over headers as byte-pairs + of (lowercased-name, value). + + Internally we actually store the representation as three-tuples, + including both the raw original casing, in order to preserve casing + over-the-wire, and the lowercased name, for case-insensitive comparisions. + + r = Request( + method="GET", + target="/", + headers=[("Host", "example.org"), ("Connection", "keep-alive")], + http_version="1.1", + ) + assert r.headers == [ + (b"host", b"example.org"), + (b"connection", b"keep-alive") + ] + assert r.headers.raw_items() == [ + (b"Host", b"example.org"), + (b"Connection", b"keep-alive") + ] + """ + + __slots__ = "_full_items" + + def __init__(self, full_items: List[Tuple[bytes, bytes, bytes]]) -> None: + self._full_items = full_items + + def __bool__(self) -> bool: + return bool(self._full_items) + + def __eq__(self, other: object) -> bool: + return list(self) == list(other) # type: ignore + + def __len__(self) -> int: + return len(self._full_items) + + def __repr__(self) -> str: + return "" % repr(list(self)) + + def __getitem__(self, idx: int) -> Tuple[bytes, bytes]: # type: ignore[override] + _, name, value = self._full_items[idx] + return (name, value) + + def raw_items(self) -> List[Tuple[bytes, bytes]]: + return [(raw_name, value) for raw_name, _, value in self._full_items] + + +HeaderTypes = Union[ + List[Tuple[bytes, bytes]], + List[Tuple[bytes, str]], + List[Tuple[str, bytes]], + List[Tuple[str, str]], +] + + +@overload +def normalize_and_validate(headers: Headers, _parsed: Literal[True]) -> Headers: + ... + + +@overload +def normalize_and_validate(headers: HeaderTypes, _parsed: Literal[False]) -> Headers: + ... + + +@overload +def normalize_and_validate( + headers: Union[Headers, HeaderTypes], _parsed: bool = False +) -> Headers: + ... + + +def normalize_and_validate( + headers: Union[Headers, HeaderTypes], _parsed: bool = False +) -> Headers: + new_headers = [] + seen_content_length = None + saw_transfer_encoding = False + for name, value in headers: + # For headers coming out of the parser, we can safely skip some steps, + # because it always returns bytes and has already run these regexes + # over the data: + if not _parsed: + name = bytesify(name) + value = bytesify(value) + validate(_field_name_re, name, "Illegal header name {!r}", name) + validate(_field_value_re, value, "Illegal header value {!r}", value) + assert isinstance(name, bytes) + assert isinstance(value, bytes) + + raw_name = name + name = name.lower() + if name == b"content-length": + lengths = {length.strip() for length in value.split(b",")} + if len(lengths) != 1: + raise LocalProtocolError("conflicting Content-Length headers") + value = lengths.pop() + validate(_content_length_re, value, "bad Content-Length") + if len(value) > CONTENT_LENGTH_MAX_DIGITS: + raise LocalProtocolError("bad Content-Length") + if seen_content_length is None: + seen_content_length = value + new_headers.append((raw_name, name, value)) + elif seen_content_length != value: + raise LocalProtocolError("conflicting Content-Length headers") + elif name == b"transfer-encoding": + # "A server that receives a request message with a transfer coding + # it does not understand SHOULD respond with 501 (Not + # Implemented)." + # https://tools.ietf.org/html/rfc7230#section-3.3.1 + if saw_transfer_encoding: + raise LocalProtocolError( + "multiple Transfer-Encoding headers", error_status_hint=501 + ) + # "All transfer-coding names are case-insensitive" + # -- https://tools.ietf.org/html/rfc7230#section-4 + value = value.lower() + if value != b"chunked": + raise LocalProtocolError( + "Only Transfer-Encoding: chunked is supported", + error_status_hint=501, + ) + saw_transfer_encoding = True + new_headers.append((raw_name, name, value)) + else: + new_headers.append((raw_name, name, value)) + return Headers(new_headers) + + +def get_comma_header(headers: Headers, name: bytes) -> List[bytes]: + # Should only be used for headers whose value is a list of + # comma-separated, case-insensitive values. + # + # The header name `name` is expected to be lower-case bytes. + # + # Connection: meets these criteria (including cast insensitivity). + # + # Content-Length: technically is just a single value (1*DIGIT), but the + # standard makes reference to implementations that do multiple values, and + # using this doesn't hurt. Ditto, case insensitivity doesn't things either + # way. + # + # Transfer-Encoding: is more complex (allows for quoted strings), so + # splitting on , is actually wrong. For example, this is legal: + # + # Transfer-Encoding: foo; options="1,2", chunked + # + # and should be parsed as + # + # foo; options="1,2" + # chunked + # + # but this naive function will parse it as + # + # foo; options="1 + # 2" + # chunked + # + # However, this is okay because the only thing we are going to do with + # any Transfer-Encoding is reject ones that aren't just "chunked", so + # both of these will be treated the same anyway. + # + # Expect: the only legal value is the literal string + # "100-continue". Splitting on commas is harmless. Case insensitive. + # + out: List[bytes] = [] + for _, found_name, found_raw_value in headers._full_items: + if found_name == name: + found_raw_value = found_raw_value.lower() + for found_split_value in found_raw_value.split(b","): + found_split_value = found_split_value.strip() + if found_split_value: + out.append(found_split_value) + return out + + +def set_comma_header(headers: Headers, name: bytes, new_values: List[bytes]) -> Headers: + # The header name `name` is expected to be lower-case bytes. + # + # Note that when we store the header we use title casing for the header + # names, in order to match the conventional HTTP header style. + # + # Simply calling `.title()` is a blunt approach, but it's correct + # here given the cases where we're using `set_comma_header`... + # + # Connection, Content-Length, Transfer-Encoding. + new_headers: List[Tuple[bytes, bytes]] = [] + for found_raw_name, found_name, found_raw_value in headers._full_items: + if found_name != name: + new_headers.append((found_raw_name, found_raw_value)) + for new_value in new_values: + new_headers.append((name.title(), new_value)) + return normalize_and_validate(new_headers) + + +def has_expect_100_continue(request: "Request") -> bool: + # https://tools.ietf.org/html/rfc7231#section-5.1.1 + # "A server that receives a 100-continue expectation in an HTTP/1.0 request + # MUST ignore that expectation." + if request.http_version < b"1.1": + return False + expect = get_comma_header(request.headers, b"expect") + return b"100-continue" in expect diff --git a/venv/lib/python3.10/site-packages/h11/_readers.py b/venv/lib/python3.10/site-packages/h11/_readers.py new file mode 100644 index 0000000000000000000000000000000000000000..576804cc282032526e0a932c9853d586a094bad0 --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11/_readers.py @@ -0,0 +1,250 @@ +# Code to read HTTP data +# +# Strategy: each reader is a callable which takes a ReceiveBuffer object, and +# either: +# 1) consumes some of it and returns an Event +# 2) raises a LocalProtocolError (for consistency -- e.g. we call validate() +# and it might raise a LocalProtocolError, so simpler just to always use +# this) +# 3) returns None, meaning "I need more data" +# +# If they have a .read_eof attribute, then this will be called if an EOF is +# received -- but this is optional. Either way, the actual ConnectionClosed +# event will be generated afterwards. +# +# READERS is a dict describing how to pick a reader. It maps states to either: +# - a reader +# - or, for body readers, a dict of per-framing reader factories + +import re +from typing import Any, Callable, Dict, Iterable, NoReturn, Optional, Tuple, Type, Union + +from ._abnf import chunk_header, header_field, request_line, status_line +from ._events import Data, EndOfMessage, InformationalResponse, Request, Response +from ._receivebuffer import ReceiveBuffer +from ._state import ( + CLIENT, + CLOSED, + DONE, + IDLE, + MUST_CLOSE, + SEND_BODY, + SEND_RESPONSE, + SERVER, +) +from ._util import LocalProtocolError, RemoteProtocolError, Sentinel, validate + +__all__ = ["READERS"] + +header_field_re = re.compile(header_field.encode("ascii")) +obs_fold_re = re.compile(rb"[ \t]+") + + +def _obsolete_line_fold(lines: Iterable[bytes]) -> Iterable[bytes]: + it = iter(lines) + last: Optional[bytes] = None + for line in it: + match = obs_fold_re.match(line) + if match: + if last is None: + raise LocalProtocolError("continuation line at start of headers") + if not isinstance(last, bytearray): + # Cast to a mutable type, avoiding copy on append to ensure O(n) time + last = bytearray(last) + last += b" " + last += line[match.end() :] + else: + if last is not None: + yield last + last = line + if last is not None: + yield last + + +def _decode_header_lines( + lines: Iterable[bytes], +) -> Iterable[Tuple[bytes, bytes]]: + for line in _obsolete_line_fold(lines): + matches = validate(header_field_re, line, "illegal header line: {!r}", line) + yield (matches["field_name"], matches["field_value"]) + + +request_line_re = re.compile(request_line.encode("ascii")) + + +def maybe_read_from_IDLE_client(buf: ReceiveBuffer) -> Optional[Request]: + lines = buf.maybe_extract_lines() + if lines is None: + if buf.is_next_line_obviously_invalid_request_line(): + raise LocalProtocolError("illegal request line") + return None + if not lines: + raise LocalProtocolError("no request line received") + matches = validate( + request_line_re, lines[0], "illegal request line: {!r}", lines[0] + ) + return Request( + headers=list(_decode_header_lines(lines[1:])), _parsed=True, **matches + ) + + +status_line_re = re.compile(status_line.encode("ascii")) + + +def maybe_read_from_SEND_RESPONSE_server( + buf: ReceiveBuffer, +) -> Union[InformationalResponse, Response, None]: + lines = buf.maybe_extract_lines() + if lines is None: + if buf.is_next_line_obviously_invalid_request_line(): + raise LocalProtocolError("illegal request line") + return None + if not lines: + raise LocalProtocolError("no response line received") + matches = validate(status_line_re, lines[0], "illegal status line: {!r}", lines[0]) + http_version = ( + b"1.1" if matches["http_version"] is None else matches["http_version"] + ) + reason = b"" if matches["reason"] is None else matches["reason"] + status_code = int(matches["status_code"]) + class_: Union[Type[InformationalResponse], Type[Response]] = ( + InformationalResponse if status_code < 200 else Response + ) + return class_( + headers=list(_decode_header_lines(lines[1:])), + _parsed=True, + status_code=status_code, + reason=reason, + http_version=http_version, + ) + + +class ContentLengthReader: + def __init__(self, length: int) -> None: + self._length = length + self._remaining = length + + def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]: + if self._remaining == 0: + return EndOfMessage() + data = buf.maybe_extract_at_most(self._remaining) + if data is None: + return None + self._remaining -= len(data) + return Data(data=data) + + def read_eof(self) -> NoReturn: + raise RemoteProtocolError( + "peer closed connection without sending complete message body " + "(received {} bytes, expected {})".format( + self._length - self._remaining, self._length + ) + ) + + +chunk_header_re = re.compile(chunk_header.encode("ascii")) + + +class ChunkedReader: + def __init__(self) -> None: + self._bytes_in_chunk = 0 + # After reading a chunk, we have to throw away the trailing \r\n. + # This tracks the bytes that we need to match and throw away. + self._bytes_to_discard = b"" + self._reading_trailer = False + + def __call__(self, buf: ReceiveBuffer) -> Union[Data, EndOfMessage, None]: + if self._reading_trailer: + lines = buf.maybe_extract_lines() + if lines is None: + return None + return EndOfMessage(headers=list(_decode_header_lines(lines))) + if self._bytes_to_discard: + data = buf.maybe_extract_at_most(len(self._bytes_to_discard)) + if data is None: + return None + if data != self._bytes_to_discard[: len(data)]: + raise LocalProtocolError( + f"malformed chunk footer: {data!r} (expected {self._bytes_to_discard!r})" + ) + self._bytes_to_discard = self._bytes_to_discard[len(data) :] + if self._bytes_to_discard: + return None + # else, fall through and read some more + assert self._bytes_to_discard == b"" + if self._bytes_in_chunk == 0: + # We need to refill our chunk count + chunk_header = buf.maybe_extract_next_line() + if chunk_header is None: + return None + matches = validate( + chunk_header_re, + chunk_header, + "illegal chunk header: {!r}", + chunk_header, + ) + # XX FIXME: we discard chunk extensions. Does anyone care? + self._bytes_in_chunk = int(matches["chunk_size"], base=16) + if self._bytes_in_chunk == 0: + self._reading_trailer = True + return self(buf) + chunk_start = True + else: + chunk_start = False + assert self._bytes_in_chunk > 0 + data = buf.maybe_extract_at_most(self._bytes_in_chunk) + if data is None: + return None + self._bytes_in_chunk -= len(data) + if self._bytes_in_chunk == 0: + self._bytes_to_discard = b"\r\n" + chunk_end = True + else: + chunk_end = False + return Data(data=data, chunk_start=chunk_start, chunk_end=chunk_end) + + def read_eof(self) -> NoReturn: + raise RemoteProtocolError( + "peer closed connection without sending complete message body " + "(incomplete chunked read)" + ) + + +class Http10Reader: + def __call__(self, buf: ReceiveBuffer) -> Optional[Data]: + data = buf.maybe_extract_at_most(999999999) + if data is None: + return None + return Data(data=data) + + def read_eof(self) -> EndOfMessage: + return EndOfMessage() + + +def expect_nothing(buf: ReceiveBuffer) -> None: + if buf: + raise LocalProtocolError("Got data when expecting EOF") + return None + + +ReadersType = Dict[ + Union[Type[Sentinel], Tuple[Type[Sentinel], Type[Sentinel]]], + Union[Callable[..., Any], Dict[str, Callable[..., Any]]], +] + +READERS: ReadersType = { + (CLIENT, IDLE): maybe_read_from_IDLE_client, + (SERVER, IDLE): maybe_read_from_SEND_RESPONSE_server, + (SERVER, SEND_RESPONSE): maybe_read_from_SEND_RESPONSE_server, + (CLIENT, DONE): expect_nothing, + (CLIENT, MUST_CLOSE): expect_nothing, + (CLIENT, CLOSED): expect_nothing, + (SERVER, DONE): expect_nothing, + (SERVER, MUST_CLOSE): expect_nothing, + (SERVER, CLOSED): expect_nothing, + SEND_BODY: { + "chunked": ChunkedReader, + "content-length": ContentLengthReader, + "http/1.0": Http10Reader, + }, +} diff --git a/venv/lib/python3.10/site-packages/h11/_receivebuffer.py b/venv/lib/python3.10/site-packages/h11/_receivebuffer.py new file mode 100644 index 0000000000000000000000000000000000000000..e5c4e08a56f5081e87103f38b4add6ce1b730204 --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11/_receivebuffer.py @@ -0,0 +1,153 @@ +import re +import sys +from typing import List, Optional, Union + +__all__ = ["ReceiveBuffer"] + + +# Operations we want to support: +# - find next \r\n or \r\n\r\n (\n or \n\n are also acceptable), +# or wait until there is one +# - read at-most-N bytes +# Goals: +# - on average, do this fast +# - worst case, do this in O(n) where n is the number of bytes processed +# Plan: +# - store bytearray, offset, how far we've searched for a separator token +# - use the how-far-we've-searched data to avoid rescanning +# - while doing a stream of uninterrupted processing, advance offset instead +# of constantly copying +# WARNING: +# - I haven't benchmarked or profiled any of this yet. +# +# Note that starting in Python 3.4, deleting the initial n bytes from a +# bytearray is amortized O(n), thanks to some excellent work by Antoine +# Martin: +# +# https://bugs.python.org/issue19087 +# +# This means that if we only supported 3.4+, we could get rid of the code here +# involving self._start and self.compress, because it's doing exactly the same +# thing that bytearray now does internally. +# +# BUT unfortunately, we still support 2.7, and reading short segments out of a +# long buffer MUST be O(bytes read) to avoid DoS issues, so we can't actually +# delete this code. Yet: +# +# https://pythonclock.org/ +# +# (Two things to double-check first though: make sure PyPy also has the +# optimization, and benchmark to make sure it's a win, since we do have a +# slightly clever thing where we delay calling compress() until we've +# processed a whole event, which could in theory be slightly more efficient +# than the internal bytearray support.) +blank_line_regex = re.compile(b"\n\r?\n", re.MULTILINE) + + +class ReceiveBuffer: + def __init__(self) -> None: + self._data = bytearray() + self._next_line_search = 0 + self._multiple_lines_search = 0 + + def __iadd__(self, byteslike: Union[bytes, bytearray]) -> "ReceiveBuffer": + self._data += byteslike + return self + + def __bool__(self) -> bool: + return bool(len(self)) + + def __len__(self) -> int: + return len(self._data) + + # for @property unprocessed_data + def __bytes__(self) -> bytes: + return bytes(self._data) + + def _extract(self, count: int) -> bytearray: + # extracting an initial slice of the data buffer and return it + out = self._data[:count] + del self._data[:count] + + self._next_line_search = 0 + self._multiple_lines_search = 0 + + return out + + def maybe_extract_at_most(self, count: int) -> Optional[bytearray]: + """ + Extract a fixed number of bytes from the buffer. + """ + out = self._data[:count] + if not out: + return None + + return self._extract(count) + + def maybe_extract_next_line(self) -> Optional[bytearray]: + """ + Extract the first line, if it is completed in the buffer. + """ + # Only search in buffer space that we've not already looked at. + search_start_index = max(0, self._next_line_search - 1) + partial_idx = self._data.find(b"\r\n", search_start_index) + + if partial_idx == -1: + self._next_line_search = len(self._data) + return None + + # + 2 is to compensate len(b"\r\n") + idx = partial_idx + 2 + + return self._extract(idx) + + def maybe_extract_lines(self) -> Optional[List[bytearray]]: + """ + Extract everything up to the first blank line, and return a list of lines. + """ + # Handle the case where we have an immediate empty line. + if self._data[:1] == b"\n": + self._extract(1) + return [] + + if self._data[:2] == b"\r\n": + self._extract(2) + return [] + + # Only search in buffer space that we've not already looked at. + match = blank_line_regex.search(self._data, self._multiple_lines_search) + if match is None: + self._multiple_lines_search = max(0, len(self._data) - 2) + return None + + # Truncate the buffer and return it. + idx = match.span(0)[-1] + out = self._extract(idx) + lines = out.split(b"\n") + + for line in lines: + if line.endswith(b"\r"): + del line[-1] + + assert lines[-2] == lines[-1] == b"" + + del lines[-2:] + + return lines + + # In theory we should wait until `\r\n` before starting to validate + # incoming data. However it's interesting to detect (very) invalid data + # early given they might not even contain `\r\n` at all (hence only + # timeout will get rid of them). + # This is not a 100% effective detection but more of a cheap sanity check + # allowing for early abort in some useful cases. + # This is especially interesting when peer is messing up with HTTPS and + # sent us a TLS stream where we were expecting plain HTTP given all + # versions of TLS so far start handshake with a 0x16 message type code. + def is_next_line_obviously_invalid_request_line(self) -> bool: + try: + # HTTP header line must not contain non-printable characters + # and should not start with a space + return self._data[0] < 0x21 + except IndexError: + return False diff --git a/venv/lib/python3.10/site-packages/h11/_state.py b/venv/lib/python3.10/site-packages/h11/_state.py new file mode 100644 index 0000000000000000000000000000000000000000..3ad444b043e3f3d6c05c2d9d84d5119312bfaa34 --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11/_state.py @@ -0,0 +1,365 @@ +################################################################ +# The core state machine +################################################################ +# +# Rule 1: everything that affects the state machine and state transitions must +# live here in this file. As much as possible goes into the table-based +# representation, but for the bits that don't quite fit, the actual code and +# state must nonetheless live here. +# +# Rule 2: this file does not know about what role we're playing; it only knows +# about HTTP request/response cycles in the abstract. This ensures that we +# don't cheat and apply different rules to local and remote parties. +# +# +# Theory of operation +# =================== +# +# Possibly the simplest way to think about this is that we actually have 5 +# different state machines here. Yes, 5. These are: +# +# 1) The client state, with its complicated automaton (see the docs) +# 2) The server state, with its complicated automaton (see the docs) +# 3) The keep-alive state, with possible states {True, False} +# 4) The SWITCH_CONNECT state, with possible states {False, True} +# 5) The SWITCH_UPGRADE state, with possible states {False, True} +# +# For (3)-(5), the first state listed is the initial state. +# +# (1)-(3) are stored explicitly in member variables. The last +# two are stored implicitly in the pending_switch_proposals set as: +# (state of 4) == (_SWITCH_CONNECT in pending_switch_proposals) +# (state of 5) == (_SWITCH_UPGRADE in pending_switch_proposals) +# +# And each of these machines has two different kinds of transitions: +# +# a) Event-triggered +# b) State-triggered +# +# Event triggered is the obvious thing that you'd think it is: some event +# happens, and if it's the right event at the right time then a transition +# happens. But there are somewhat complicated rules for which machines can +# "see" which events. (As a rule of thumb, if a machine "sees" an event, this +# means two things: the event can affect the machine, and if the machine is +# not in a state where it expects that event then it's an error.) These rules +# are: +# +# 1) The client machine sees all h11.events objects emitted by the client. +# +# 2) The server machine sees all h11.events objects emitted by the server. +# +# It also sees the client's Request event. +# +# And sometimes, server events are annotated with a _SWITCH_* event. For +# example, we can have a (Response, _SWITCH_CONNECT) event, which is +# different from a regular Response event. +# +# 3) The keep-alive machine sees the process_keep_alive_disabled() event +# (which is derived from Request/Response events), and this event +# transitions it from True -> False, or from False -> False. There's no way +# to transition back. +# +# 4&5) The _SWITCH_* machines transition from False->True when we get a +# Request that proposes the relevant type of switch (via +# process_client_switch_proposals), and they go from True->False when we +# get a Response that has no _SWITCH_* annotation. +# +# So that's event-triggered transitions. +# +# State-triggered transitions are less standard. What they do here is couple +# the machines together. The way this works is, when certain *joint* +# configurations of states are achieved, then we automatically transition to a +# new *joint* state. So, for example, if we're ever in a joint state with +# +# client: DONE +# keep-alive: False +# +# then the client state immediately transitions to: +# +# client: MUST_CLOSE +# +# This is fundamentally different from an event-based transition, because it +# doesn't matter how we arrived at the {client: DONE, keep-alive: False} state +# -- maybe the client transitioned SEND_BODY -> DONE, or keep-alive +# transitioned True -> False. Either way, once this precondition is satisfied, +# this transition is immediately triggered. +# +# What if two conflicting state-based transitions get enabled at the same +# time? In practice there's only one case where this arises (client DONE -> +# MIGHT_SWITCH_PROTOCOL versus DONE -> MUST_CLOSE), and we resolve it by +# explicitly prioritizing the DONE -> MIGHT_SWITCH_PROTOCOL transition. +# +# Implementation +# -------------- +# +# The event-triggered transitions for the server and client machines are all +# stored explicitly in a table. Ditto for the state-triggered transitions that +# involve just the server and client state. +# +# The transitions for the other machines, and the state-triggered transitions +# that involve the other machines, are written out as explicit Python code. +# +# It'd be nice if there were some cleaner way to do all this. This isn't +# *too* terrible, but I feel like it could probably be better. +# +# WARNING +# ------- +# +# The script that generates the state machine diagrams for the docs knows how +# to read out the EVENT_TRIGGERED_TRANSITIONS and STATE_TRIGGERED_TRANSITIONS +# tables. But it can't automatically read the transitions that are written +# directly in Python code. So if you touch those, you need to also update the +# script to keep it in sync! +from typing import cast, Dict, Optional, Set, Tuple, Type, Union + +from ._events import * +from ._util import LocalProtocolError, Sentinel + +# Everything in __all__ gets re-exported as part of the h11 public API. +__all__ = [ + "CLIENT", + "SERVER", + "IDLE", + "SEND_RESPONSE", + "SEND_BODY", + "DONE", + "MUST_CLOSE", + "CLOSED", + "MIGHT_SWITCH_PROTOCOL", + "SWITCHED_PROTOCOL", + "ERROR", +] + + +class CLIENT(Sentinel, metaclass=Sentinel): + pass + + +class SERVER(Sentinel, metaclass=Sentinel): + pass + + +# States +class IDLE(Sentinel, metaclass=Sentinel): + pass + + +class SEND_RESPONSE(Sentinel, metaclass=Sentinel): + pass + + +class SEND_BODY(Sentinel, metaclass=Sentinel): + pass + + +class DONE(Sentinel, metaclass=Sentinel): + pass + + +class MUST_CLOSE(Sentinel, metaclass=Sentinel): + pass + + +class CLOSED(Sentinel, metaclass=Sentinel): + pass + + +class ERROR(Sentinel, metaclass=Sentinel): + pass + + +# Switch types +class MIGHT_SWITCH_PROTOCOL(Sentinel, metaclass=Sentinel): + pass + + +class SWITCHED_PROTOCOL(Sentinel, metaclass=Sentinel): + pass + + +class _SWITCH_UPGRADE(Sentinel, metaclass=Sentinel): + pass + + +class _SWITCH_CONNECT(Sentinel, metaclass=Sentinel): + pass + + +EventTransitionType = Dict[ + Type[Sentinel], + Dict[ + Type[Sentinel], + Dict[Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], Type[Sentinel]], + ], +] + +EVENT_TRIGGERED_TRANSITIONS: EventTransitionType = { + CLIENT: { + IDLE: {Request: SEND_BODY, ConnectionClosed: CLOSED}, + SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE}, + DONE: {ConnectionClosed: CLOSED}, + MUST_CLOSE: {ConnectionClosed: CLOSED}, + CLOSED: {ConnectionClosed: CLOSED}, + MIGHT_SWITCH_PROTOCOL: {}, + SWITCHED_PROTOCOL: {}, + ERROR: {}, + }, + SERVER: { + IDLE: { + ConnectionClosed: CLOSED, + Response: SEND_BODY, + # Special case: server sees client Request events, in this form + (Request, CLIENT): SEND_RESPONSE, + }, + SEND_RESPONSE: { + InformationalResponse: SEND_RESPONSE, + Response: SEND_BODY, + (InformationalResponse, _SWITCH_UPGRADE): SWITCHED_PROTOCOL, + (Response, _SWITCH_CONNECT): SWITCHED_PROTOCOL, + }, + SEND_BODY: {Data: SEND_BODY, EndOfMessage: DONE}, + DONE: {ConnectionClosed: CLOSED}, + MUST_CLOSE: {ConnectionClosed: CLOSED}, + CLOSED: {ConnectionClosed: CLOSED}, + SWITCHED_PROTOCOL: {}, + ERROR: {}, + }, +} + +StateTransitionType = Dict[ + Tuple[Type[Sentinel], Type[Sentinel]], Dict[Type[Sentinel], Type[Sentinel]] +] + +# NB: there are also some special-case state-triggered transitions hard-coded +# into _fire_state_triggered_transitions below. +STATE_TRIGGERED_TRANSITIONS: StateTransitionType = { + # (Client state, Server state) -> new states + # Protocol negotiation + (MIGHT_SWITCH_PROTOCOL, SWITCHED_PROTOCOL): {CLIENT: SWITCHED_PROTOCOL}, + # Socket shutdown + (CLOSED, DONE): {SERVER: MUST_CLOSE}, + (CLOSED, IDLE): {SERVER: MUST_CLOSE}, + (ERROR, DONE): {SERVER: MUST_CLOSE}, + (DONE, CLOSED): {CLIENT: MUST_CLOSE}, + (IDLE, CLOSED): {CLIENT: MUST_CLOSE}, + (DONE, ERROR): {CLIENT: MUST_CLOSE}, +} + + +class ConnectionState: + def __init__(self) -> None: + # Extra bits of state that don't quite fit into the state model. + + # If this is False then it enables the automatic DONE -> MUST_CLOSE + # transition. Don't set this directly; call .keep_alive_disabled() + self.keep_alive = True + + # This is a subset of {UPGRADE, CONNECT}, containing the proposals + # made by the client for switching protocols. + self.pending_switch_proposals: Set[Type[Sentinel]] = set() + + self.states: Dict[Type[Sentinel], Type[Sentinel]] = {CLIENT: IDLE, SERVER: IDLE} + + def process_error(self, role: Type[Sentinel]) -> None: + self.states[role] = ERROR + self._fire_state_triggered_transitions() + + def process_keep_alive_disabled(self) -> None: + self.keep_alive = False + self._fire_state_triggered_transitions() + + def process_client_switch_proposal(self, switch_event: Type[Sentinel]) -> None: + self.pending_switch_proposals.add(switch_event) + self._fire_state_triggered_transitions() + + def process_event( + self, + role: Type[Sentinel], + event_type: Type[Event], + server_switch_event: Optional[Type[Sentinel]] = None, + ) -> None: + _event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]] = event_type + if server_switch_event is not None: + assert role is SERVER + if server_switch_event not in self.pending_switch_proposals: + raise LocalProtocolError( + "Received server _SWITCH_UPGRADE event without a pending proposal" + ) + _event_type = (event_type, server_switch_event) + if server_switch_event is None and _event_type is Response: + self.pending_switch_proposals = set() + self._fire_event_triggered_transitions(role, _event_type) + # Special case: the server state does get to see Request + # events. + if _event_type is Request: + assert role is CLIENT + self._fire_event_triggered_transitions(SERVER, (Request, CLIENT)) + self._fire_state_triggered_transitions() + + def _fire_event_triggered_transitions( + self, + role: Type[Sentinel], + event_type: Union[Type[Event], Tuple[Type[Event], Type[Sentinel]]], + ) -> None: + state = self.states[role] + try: + new_state = EVENT_TRIGGERED_TRANSITIONS[role][state][event_type] + except KeyError: + event_type = cast(Type[Event], event_type) + raise LocalProtocolError( + "can't handle event type {} when role={} and state={}".format( + event_type.__name__, role, self.states[role] + ) + ) from None + self.states[role] = new_state + + def _fire_state_triggered_transitions(self) -> None: + # We apply these rules repeatedly until converging on a fixed point + while True: + start_states = dict(self.states) + + # It could happen that both these special-case transitions are + # enabled at the same time: + # + # DONE -> MIGHT_SWITCH_PROTOCOL + # DONE -> MUST_CLOSE + # + # For example, this will always be true of a HTTP/1.0 client + # requesting CONNECT. If this happens, the protocol switch takes + # priority. From there the client will either go to + # SWITCHED_PROTOCOL, in which case it's none of our business when + # they close the connection, or else the server will deny the + # request, in which case the client will go back to DONE and then + # from there to MUST_CLOSE. + if self.pending_switch_proposals: + if self.states[CLIENT] is DONE: + self.states[CLIENT] = MIGHT_SWITCH_PROTOCOL + + if not self.pending_switch_proposals: + if self.states[CLIENT] is MIGHT_SWITCH_PROTOCOL: + self.states[CLIENT] = DONE + + if not self.keep_alive: + for role in (CLIENT, SERVER): + if self.states[role] is DONE: + self.states[role] = MUST_CLOSE + + # Tabular state-triggered transitions + joint_state = (self.states[CLIENT], self.states[SERVER]) + changes = STATE_TRIGGERED_TRANSITIONS.get(joint_state, {}) + self.states.update(changes) + + if self.states == start_states: + # Fixed point reached + return + + def start_next_cycle(self) -> None: + if self.states != {CLIENT: DONE, SERVER: DONE}: + raise LocalProtocolError( + f"not in a reusable state. self.states={self.states}" + ) + # Can't reach DONE/DONE with any of these active, but still, let's be + # sure. + assert self.keep_alive + assert not self.pending_switch_proposals + self.states = {CLIENT: IDLE, SERVER: IDLE} diff --git a/venv/lib/python3.10/site-packages/h11/_util.py b/venv/lib/python3.10/site-packages/h11/_util.py new file mode 100644 index 0000000000000000000000000000000000000000..6718445290770e028ea2f1f662026c9a0b0991db --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11/_util.py @@ -0,0 +1,135 @@ +from typing import Any, Dict, NoReturn, Pattern, Tuple, Type, TypeVar, Union + +__all__ = [ + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", + "validate", + "bytesify", +] + + +class ProtocolError(Exception): + """Exception indicating a violation of the HTTP/1.1 protocol. + + This as an abstract base class, with two concrete base classes: + :exc:`LocalProtocolError`, which indicates that you tried to do something + that HTTP/1.1 says is illegal, and :exc:`RemoteProtocolError`, which + indicates that the remote peer tried to do something that HTTP/1.1 says is + illegal. See :ref:`error-handling` for details. + + In addition to the normal :exc:`Exception` features, it has one attribute: + + .. attribute:: error_status_hint + + This gives a suggestion as to what status code a server might use if + this error occurred as part of a request. + + For a :exc:`RemoteProtocolError`, this is useful as a suggestion for + how you might want to respond to a misbehaving peer, if you're + implementing a server. + + For a :exc:`LocalProtocolError`, this can be taken as a suggestion for + how your peer might have responded to *you* if h11 had allowed you to + continue. + + The default is 400 Bad Request, a generic catch-all for protocol + violations. + + """ + + def __init__(self, msg: str, error_status_hint: int = 400) -> None: + if type(self) is ProtocolError: + raise TypeError("tried to directly instantiate ProtocolError") + Exception.__init__(self, msg) + self.error_status_hint = error_status_hint + + +# Strategy: there are a number of public APIs where a LocalProtocolError can +# be raised (send(), all the different event constructors, ...), and only one +# public API where RemoteProtocolError can be raised +# (receive_data()). Therefore we always raise LocalProtocolError internally, +# and then receive_data will translate this into a RemoteProtocolError. +# +# Internally: +# LocalProtocolError is the generic "ProtocolError". +# Externally: +# LocalProtocolError is for local errors and RemoteProtocolError is for +# remote errors. +class LocalProtocolError(ProtocolError): + def _reraise_as_remote_protocol_error(self) -> NoReturn: + # After catching a LocalProtocolError, use this method to re-raise it + # as a RemoteProtocolError. This method must be called from inside an + # except: block. + # + # An easy way to get an equivalent RemoteProtocolError is just to + # modify 'self' in place. + self.__class__ = RemoteProtocolError # type: ignore + # But the re-raising is somewhat non-trivial -- you might think that + # now that we've modified the in-flight exception object, that just + # doing 'raise' to re-raise it would be enough. But it turns out that + # this doesn't work, because Python tracks the exception type + # (exc_info[0]) separately from the exception object (exc_info[1]), + # and we only modified the latter. So we really do need to re-raise + # the new type explicitly. + # On py3, the traceback is part of the exception object, so our + # in-place modification preserved it and we can just re-raise: + raise self + + +class RemoteProtocolError(ProtocolError): + pass + + +def validate( + regex: Pattern[bytes], data: bytes, msg: str = "malformed data", *format_args: Any +) -> Dict[str, bytes]: + match = regex.fullmatch(data) + if not match: + if format_args: + msg = msg.format(*format_args) + raise LocalProtocolError(msg) + return match.groupdict() + + +# Sentinel values +# +# - Inherit identity-based comparison and hashing from object +# - Have a nice repr +# - Have a *bonus property*: type(sentinel) is sentinel +# +# The bonus property is useful if you want to take the return value from +# next_event() and do some sort of dispatch based on type(event). + +_T_Sentinel = TypeVar("_T_Sentinel", bound="Sentinel") + + +class Sentinel(type): + def __new__( + cls: Type[_T_Sentinel], + name: str, + bases: Tuple[type, ...], + namespace: Dict[str, Any], + **kwds: Any + ) -> _T_Sentinel: + assert bases == (Sentinel,) + v = super().__new__(cls, name, bases, namespace, **kwds) + v.__class__ = v # type: ignore + return v + + def __repr__(self) -> str: + return self.__name__ + + +# Used for methods, request targets, HTTP versions, header names, and header +# values. Accepts ascii-strings, or bytes/bytearray/memoryview/..., and always +# returns bytes. +def bytesify(s: Union[bytes, bytearray, memoryview, int, str]) -> bytes: + # Fast-path: + if type(s) is bytes: + return s + if isinstance(s, str): + s = s.encode("ascii") + if isinstance(s, int): + raise TypeError("expected bytes-like object, not int") + return bytes(s) diff --git a/venv/lib/python3.10/site-packages/h11/_version.py b/venv/lib/python3.10/site-packages/h11/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..76e7327b8617c9d12236f511414d5eb58e98a44b --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11/_version.py @@ -0,0 +1,16 @@ +# This file must be kept very simple, because it is consumed from several +# places -- it is imported by h11/__init__.py, execfile'd by setup.py, etc. + +# We use a simple scheme: +# 1.0.0 -> 1.0.0+dev -> 1.1.0 -> 1.1.0+dev +# where the +dev versions are never released into the wild, they're just what +# we stick into the VCS in between releases. +# +# This is compatible with PEP 440: +# http://legacy.python.org/dev/peps/pep-0440/ +# via the use of the "local suffix" "+dev", which is disallowed on index +# servers and causes 1.0.0+dev to sort after plain 1.0.0, which is what we +# want. (Contrast with the special suffix 1.0.0.dev, which sorts *before* +# 1.0.0.) + +__version__ = "0.16.0" diff --git a/venv/lib/python3.10/site-packages/h11/_writers.py b/venv/lib/python3.10/site-packages/h11/_writers.py new file mode 100644 index 0000000000000000000000000000000000000000..939cdb912a9debaea07fbf3a9ac04549c44d077c --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11/_writers.py @@ -0,0 +1,145 @@ +# Code to read HTTP data +# +# Strategy: each writer takes an event + a write-some-bytes function, which is +# calls. +# +# WRITERS is a dict describing how to pick a reader. It maps states to either: +# - a writer +# - or, for body writers, a dict of framin-dependent writer factories + +from typing import Any, Callable, Dict, List, Tuple, Type, Union + +from ._events import Data, EndOfMessage, Event, InformationalResponse, Request, Response +from ._headers import Headers +from ._state import CLIENT, IDLE, SEND_BODY, SEND_RESPONSE, SERVER +from ._util import LocalProtocolError, Sentinel + +__all__ = ["WRITERS"] + +Writer = Callable[[bytes], Any] + + +def write_headers(headers: Headers, write: Writer) -> None: + # "Since the Host field-value is critical information for handling a + # request, a user agent SHOULD generate Host as the first header field + # following the request-line." - RFC 7230 + raw_items = headers._full_items + for raw_name, name, value in raw_items: + if name == b"host": + write(b"%s: %s\r\n" % (raw_name, value)) + for raw_name, name, value in raw_items: + if name != b"host": + write(b"%s: %s\r\n" % (raw_name, value)) + write(b"\r\n") + + +def write_request(request: Request, write: Writer) -> None: + if request.http_version != b"1.1": + raise LocalProtocolError("I only send HTTP/1.1") + write(b"%s %s HTTP/1.1\r\n" % (request.method, request.target)) + write_headers(request.headers, write) + + +# Shared between InformationalResponse and Response +def write_any_response( + response: Union[InformationalResponse, Response], write: Writer +) -> None: + if response.http_version != b"1.1": + raise LocalProtocolError("I only send HTTP/1.1") + status_bytes = str(response.status_code).encode("ascii") + # We don't bother sending ascii status messages like "OK"; they're + # optional and ignored by the protocol. (But the space after the numeric + # status code is mandatory.) + # + # XX FIXME: could at least make an effort to pull out the status message + # from stdlib's http.HTTPStatus table. Or maybe just steal their enums + # (either by import or copy/paste). We already accept them as status codes + # since they're of type IntEnum < int. + write(b"HTTP/1.1 %s %s\r\n" % (status_bytes, response.reason)) + write_headers(response.headers, write) + + +class BodyWriter: + def __call__(self, event: Event, write: Writer) -> None: + if type(event) is Data: + self.send_data(event.data, write) + elif type(event) is EndOfMessage: + self.send_eom(event.headers, write) + else: # pragma: no cover + assert False + + def send_data(self, data: bytes, write: Writer) -> None: + pass + + def send_eom(self, headers: Headers, write: Writer) -> None: + pass + + +# +# These are all careful not to do anything to 'data' except call len(data) and +# write(data). This allows us to transparently pass-through funny objects, +# like placeholder objects referring to files on disk that will be sent via +# sendfile(2). +# +class ContentLengthWriter(BodyWriter): + def __init__(self, length: int) -> None: + self._length = length + + def send_data(self, data: bytes, write: Writer) -> None: + self._length -= len(data) + if self._length < 0: + raise LocalProtocolError("Too much data for declared Content-Length") + write(data) + + def send_eom(self, headers: Headers, write: Writer) -> None: + if self._length != 0: + raise LocalProtocolError("Too little data for declared Content-Length") + if headers: + raise LocalProtocolError("Content-Length and trailers don't mix") + + +class ChunkedWriter(BodyWriter): + def send_data(self, data: bytes, write: Writer) -> None: + # if we encoded 0-length data in the naive way, it would look like an + # end-of-message. + if not data: + return + write(b"%x\r\n" % len(data)) + write(data) + write(b"\r\n") + + def send_eom(self, headers: Headers, write: Writer) -> None: + write(b"0\r\n") + write_headers(headers, write) + + +class Http10Writer(BodyWriter): + def send_data(self, data: bytes, write: Writer) -> None: + write(data) + + def send_eom(self, headers: Headers, write: Writer) -> None: + if headers: + raise LocalProtocolError("can't send trailers to HTTP/1.0 client") + # no need to close the socket ourselves, that will be taken care of by + # Connection: close machinery + + +WritersType = Dict[ + Union[Tuple[Type[Sentinel], Type[Sentinel]], Type[Sentinel]], + Union[ + Dict[str, Type[BodyWriter]], + Callable[[Union[InformationalResponse, Response], Writer], None], + Callable[[Request, Writer], None], + ], +] + +WRITERS: WritersType = { + (CLIENT, IDLE): write_request, + (SERVER, IDLE): write_any_response, + (SERVER, SEND_RESPONSE): write_any_response, + SEND_BODY: { + "chunked": ChunkedWriter, + "content-length": ContentLengthWriter, + "http/1.0": Http10Writer, + }, +} diff --git a/venv/lib/python3.10/site-packages/h11/py.typed b/venv/lib/python3.10/site-packages/h11/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..f5642f79f21d872f010979dcf6f0c4a415acc19d --- /dev/null +++ b/venv/lib/python3.10/site-packages/h11/py.typed @@ -0,0 +1 @@ +Marker diff --git a/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/INSTALLER b/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/METADATA b/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..092b5b25e2baf8f2c506ffef7e2378e72ef6ccb1 --- /dev/null +++ b/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/METADATA @@ -0,0 +1,87 @@ +Metadata-Version: 2.4 +Name: hf-xet +Version: 1.3.1 +Classifier: Development Status :: 5 - Production/Stable +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Rust +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: 3.14 +Classifier: Programming Language :: Python :: Free Threading +Classifier: Programming Language :: Python :: Free Threading :: 2 - Beta +Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence +Requires-Dist: pytest ; extra == 'tests' +Provides-Extra: tests +License-File: LICENSE +Summary: Fast transfer of large files with the Hugging Face Hub. +Maintainer-email: Rajat Arya , Jared Sulzdorf , Di Xiao , Assaf Vayner , Hoyt Koepke +License-Expression: Apache-2.0 +Requires-Python: >=3.8 +Description-Content-Type: text/markdown; charset=UTF-8; variant=GFM +Project-URL: Documentation, https://huggingface.co/docs/hub/xet/index +Project-URL: Homepage, https://github.com/huggingface/xet-core +Project-URL: Issues, https://github.com/huggingface/xet-core/issues +Project-URL: Repository, https://github.com/huggingface/xet-core.git + + +

+ License + GitHub release + Contributor Covenant +

+ +

+

🤗 hf-xet - xet client tech, used in huggingface_hub

+

+ +## Welcome + +`hf-xet` enables `huggingface_hub` to utilize xet storage for uploading and downloading to HF Hub. Xet storage provides chunk-based deduplication, efficient storage/retrieval with local disk caching, and backwards compatibility with Git LFS. This library is not meant to be used directly, and is instead intended to be used from [huggingface_hub](https://pypi.org/project/huggingface-hub). + +## Key features + +♻ **chunk-based deduplication implementation**: avoid transferring and storing chunks that are shared across binary files (models, datasets, etc). + +🤗 **Python bindings**: bindings for [huggingface_hub](https://github.com/huggingface/huggingface_hub/) package. + +↔ **network communications**: concurrent communication to HF Hub Xet backend services (CAS). + +🔖 **local disk caching**: chunk-based cache that sits alongside the existing [huggingface_hub disk cache](https://huggingface.co/docs/huggingface_hub/guides/manage-cache). + +## Installation + +Install the `hf_xet` package with [pip](https://pypi.org/project/hf-xet/): + +```bash +pip install hf_xet +``` + +## Quick Start + +`hf_xet` is not intended to be run independently as it is expected to be used from `huggingface_hub`, so to get started with `huggingface_hub` check out the documentation [here]("https://hf.co/docs/huggingface_hub"). + +## Contributions (feature requests, bugs, etc.) are encouraged & appreciated 💙💚💛💜🧡❤️ + +Please join us in making hf-xet better. We value everyone's contributions. Code is not the only way to help. Answering questions, helping each other, improving documentation, filing issues all help immensely. If you are interested in contributing (please do!), check out the [contribution guide](https://github.com/huggingface/xet-core/blob/main/CONTRIBUTING.md) for this repository. diff --git a/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/RECORD b/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..142b0a6908cb8680aa913f10c1c493ed5e3d0fd8 --- /dev/null +++ b/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/RECORD @@ -0,0 +1,9 @@ +hf_xet-1.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +hf_xet-1.3.1.dist-info/METADATA,sha256=c8EMP8rcGk7-_0Pyg8YiB_La4qawIVQTc8NKjf9Z6Cw,4882 +hf_xet-1.3.1.dist-info/RECORD,, +hf_xet-1.3.1.dist-info/WHEEL,sha256=ycZRBBQNh-_vBSCR_X0ck7NlL8NDEI7G-tWH-Cjb8uw,143 +hf_xet-1.3.1.dist-info/licenses/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357 +hf_xet-1.3.1.dist-info/sboms/hf_xet.cyclonedx.json,sha256=lvKUn39ZTtvl0XcofThR8x3TLhsIqk0bst_qM1-uTJ0,394826 +hf_xet/__init__.py,sha256=E8UDdyQ8glZ_nve9hHEf22bPang8-RKx4VuApXYeQUo,107 +hf_xet/__pycache__/__init__.cpython-310.pyc,, +hf_xet/hf_xet.abi3.so,sha256=rNWSxzRlW0YzjOwRfMAgzAE80HXbAW3_HR8nas-2WOg,10589672 diff --git a/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/WHEEL b/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..e3c7eedd8e94fbe7277127c82058423fa8d80b38 --- /dev/null +++ b/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: maturin (1.12.4) +Root-Is-Purelib: false +Tag: cp37-abi3-manylinux_2_17_x86_64 +Tag: cp37-abi3-manylinux2014_x86_64 diff --git a/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/licenses/LICENSE b/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..261eeb9e9f8b2b4b0d119366dda99c6fd7d35c64 --- /dev/null +++ b/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/licenses/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/sboms/hf_xet.cyclonedx.json b/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/sboms/hf_xet.cyclonedx.json new file mode 100644 index 0000000000000000000000000000000000000000..d4a65a054762366d7ea12295ca099b7e2ac3f296 --- /dev/null +++ b/venv/lib/python3.10/site-packages/hf_xet-1.3.1.dist-info/sboms/hf_xet.cyclonedx.json @@ -0,0 +1,12019 @@ +{ + "bomFormat": "CycloneDX", + "specVersion": "1.5", + "version": 1, + "serialNumber": "urn:uuid:ede838f0-3b72-4297-b920-6a643907fcf7", + "metadata": { + "timestamp": "2026-02-25T00:48:53.825889783Z", + "tools": [ + { + "vendor": "CycloneDX", + "name": "cargo-cyclonedx", + "version": "0.5.7" + } + ], + "component": { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/hf_xet#1.3.1", + "name": "hf_xet", + "version": "1.3.1", + "scope": "required", + "licenses": [ + { + "expression": "Apache-2.0" + } + ], + "purl": "pkg:cargo/hf_xet@1.3.1?download_url=file://.", + "components": [ + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/hf_xet#1.3.1 bin-target-0", + "name": "hf_xet", + "version": "1.3.1", + "purl": "pkg:cargo/hf_xet@1.3.1?download_url=file://.#src/lib.rs" + } + ] + }, + "properties": [ + { + "name": "cdx:rustc:sbom:target:all_targets", + "value": "true" + } + ] + }, + "components": [ + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/cas_client#0.14.5", + "name": "cas_client", + "version": "0.14.5", + "scope": "required", + "purl": "pkg:cargo/cas_client@0.14.5?download_url=file:///home/runner/work/xet-core/xet-core/cas_client" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/cas_object#0.1.0", + "name": "cas_object", + "version": "0.1.0", + "scope": "required", + "purl": "pkg:cargo/cas_object@0.1.0?download_url=file:///home/runner/work/xet-core/xet-core/cas_object" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/cas_types#0.1.0", + "name": "cas_types", + "version": "0.1.0", + "scope": "required", + "purl": "pkg:cargo/cas_types@0.1.0?download_url=file:///home/runner/work/xet-core/xet-core/cas_types" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/data#0.14.5", + "name": "data", + "version": "0.14.5", + "scope": "required", + "purl": "pkg:cargo/data@0.14.5?download_url=file:///home/runner/work/xet-core/xet-core/data" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/deduplication#0.14.5", + "name": "deduplication", + "version": "0.14.5", + "scope": "required", + "purl": "pkg:cargo/deduplication@0.14.5?download_url=file:///home/runner/work/xet-core/xet-core/deduplication" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/error_printer#0.14.5", + "name": "error_printer", + "version": "0.14.5", + "scope": "required", + "purl": "pkg:cargo/error_printer@0.14.5?download_url=file:///home/runner/work/xet-core/xet-core/error_printer" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/file_reconstruction#0.14.5", + "name": "file_reconstruction", + "version": "0.14.5", + "scope": "required", + "purl": "pkg:cargo/file_reconstruction@0.14.5?download_url=file:///home/runner/work/xet-core/xet-core/file_reconstruction" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/file_utils#0.14.2", + "name": "file_utils", + "version": "0.14.2", + "scope": "required", + "purl": "pkg:cargo/file_utils@0.14.2?download_url=file:///home/runner/work/xet-core/xet-core/file_utils" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/hub_client#0.1.0", + "name": "hub_client", + "version": "0.1.0", + "scope": "required", + "purl": "pkg:cargo/hub_client@0.1.0?download_url=file:///home/runner/work/xet-core/xet-core/hub_client" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/mdb_shard#0.14.5", + "name": "mdb_shard", + "version": "0.14.5", + "scope": "required", + "purl": "pkg:cargo/mdb_shard@0.14.5?download_url=file:///home/runner/work/xet-core/xet-core/mdb_shard" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/merklehash#0.14.5", + "name": "merklehash", + "version": "0.14.5", + "scope": "required", + "purl": "pkg:cargo/merklehash@0.14.5?download_url=file:///home/runner/work/xet-core/xet-core/merklehash" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/progress_tracking#0.1.0", + "name": "progress_tracking", + "version": "0.1.0", + "scope": "required", + "purl": "pkg:cargo/progress_tracking@0.1.0?download_url=file:///home/runner/work/xet-core/xet-core/progress_tracking" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5", + "name": "utils", + "version": "0.14.5", + "scope": "required", + "purl": "pkg:cargo/utils@0.14.5?download_url=file:///home/runner/work/xet-core/xet-core/utils" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/xet_config#0.14.5", + "name": "xet_config", + "version": "0.14.5", + "scope": "required", + "purl": "pkg:cargo/xet_config@0.14.5?download_url=file:///home/runner/work/xet-core/xet-core/xet_config" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/xet_logging#0.14.5", + "name": "xet_logging", + "version": "0.14.5", + "scope": "required", + "purl": "pkg:cargo/xet_logging@0.14.5?download_url=file:///home/runner/work/xet-core/xet-core/xet_logging" + }, + { + "type": "library", + "bom-ref": "path+file:///home/runner/work/xet-core/xet-core/xet_runtime#0.1.0", + "name": "xet_runtime", + "version": "0.1.0", + "scope": "required", + "purl": "pkg:cargo/xet_runtime@0.1.0?download_url=file:///home/runner/work/xet-core/xet-core/xet_runtime" + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#aho-corasick@1.1.4", + "author": "Andrew Gallant ", + "name": "aho-corasick", + "version": "1.1.4", + "description": "Fast multiple substring searching.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" + } + ], + "licenses": [ + { + "expression": "Unlicense OR MIT" + } + ], + "purl": "pkg:cargo/aho-corasick@1.1.4", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/BurntSushi/aho-corasick" + }, + { + "type": "vcs", + "url": "https://github.com/BurntSushi/aho-corasick" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#anstream@0.6.21", + "name": "anstream", + "version": "0.6.21", + "description": "IO stream adapters for writing colored text that will gracefully degrade according to your terminal's capabilities.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/anstream@0.6.21", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-cli/anstyle.git" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#anstyle-parse@0.2.7", + "name": "anstyle-parse", + "version": "0.2.7", + "description": "Parse ANSI Style Escapes", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "4e7644824f0aa2c7b9384579234ef10eb7efb6a0deb83f9630a49594dd9c15c2" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/anstyle-parse@0.2.7", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-cli/anstyle.git" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#anstyle-query@1.1.5", + "name": "anstyle-query", + "version": "1.1.5", + "description": "Look up colored console capabilities", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/anstyle-query@1.1.5", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-cli/anstyle.git" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#anstyle@1.0.13", + "name": "anstyle", + "version": "1.0.13", + "description": "ANSI text styling", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/anstyle@1.0.13", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-cli/anstyle.git" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#anyhow@1.0.101", + "author": "David Tolnay ", + "name": "anyhow", + "version": "1.0.101", + "description": "Flexible concrete Error type built on std::error::Error", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/anyhow@1.0.101", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/anyhow" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/anyhow" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#approx@0.5.1", + "author": "Brendan Zabarauskas ", + "name": "approx", + "version": "0.5.1", + "description": "Approximate floating point equality comparisons and assertions.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "cab112f0a86d568ea0e627cc1d6be74a1e9cd55214684db5561995f6dad897c6" + } + ], + "licenses": [ + { + "expression": "Apache-2.0" + } + ], + "purl": "pkg:cargo/approx@0.5.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/approx" + }, + { + "type": "website", + "url": "https://github.com/brendanzab/approx" + }, + { + "type": "vcs", + "url": "https://github.com/brendanzab/approx" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#arrayref@0.3.9", + "author": "David Roundy ", + "name": "arrayref", + "version": "0.3.9", + "description": "Macros to take array references of slices", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "76a2e8124351fda1ef8aaaa3bbd7ebbcb486bbcd4225aca0aa0d84bb2db8fecb" + } + ], + "licenses": [ + { + "expression": "BSD-2-Clause" + } + ], + "purl": "pkg:cargo/arrayref@0.3.9", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/arrayref" + }, + { + "type": "vcs", + "url": "https://github.com/droundy/arrayref" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#arrayvec@0.7.6", + "author": "bluss", + "name": "arrayvec", + "version": "0.7.6", + "description": "A vector with fixed capacity, backed by an array (it can be stored on the stack too). Implements fixed capacity ArrayVec and ArrayString.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/arrayvec@0.7.6", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/arrayvec/" + }, + { + "type": "vcs", + "url": "https://github.com/bluss/arrayvec" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "author": "David Tolnay ", + "name": "async-trait", + "version": "0.1.89", + "description": "Type erasure for async trait methods", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/async-trait@0.1.89", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/async-trait" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/async-trait" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#atomic-waker@1.1.2", + "author": "Stjepan Glavina , Contributors to futures-rs", + "name": "atomic-waker", + "version": "1.1.2", + "description": "A synchronization primitive for task wakeup", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/atomic-waker@1.1.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/smol-rs/atomic-waker" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#autocfg@1.5.0", + "author": "Josh Stone ", + "name": "autocfg", + "version": "1.5.0", + "description": "Automatic cfg for Rust compiler features", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/autocfg@1.5.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/autocfg/" + }, + { + "type": "vcs", + "url": "https://github.com/cuviper/autocfg" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#aws-lc-rs@1.15.4", + "author": "AWS-LibCrypto", + "name": "aws-lc-rs", + "version": "1.15.4", + "description": "aws-lc-rs is a cryptographic library using AWS-LC for its cryptographic operations. This library strives to be API-compatible with the popular Rust library named ring.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256" + } + ], + "licenses": [ + { + "expression": "ISC AND (Apache-2.0 OR ISC)" + } + ], + "purl": "pkg:cargo/aws-lc-rs@1.15.4", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/crate/aws-lc-rs" + }, + { + "type": "website", + "url": "https://github.com/aws/aws-lc-rs" + }, + { + "type": "other", + "url": "aws_lc_rs_1_15_4_sys" + }, + { + "type": "vcs", + "url": "https://github.com/aws/aws-lc-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#aws-lc-sys@0.37.0", + "author": "AWS-LC", + "name": "aws-lc-sys", + "version": "0.37.0", + "description": "AWS-LC is a general-purpose cryptographic library maintained by the AWS Cryptography team for AWS and their customers. It іs based on code from the Google BoringSSL project and the OpenSSL project.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "5c34dda4df7017c8db52132f0f8a2e0f8161649d15723ed63fc00c82d0f2081a" + } + ], + "licenses": [ + { + "expression": "ISC AND (Apache-2.0 OR ISC) AND OpenSSL" + } + ], + "purl": "pkg:cargo/aws-lc-sys@0.37.0", + "externalReferences": [ + { + "type": "other", + "url": "aws_lc_0_37_0" + }, + { + "type": "vcs", + "url": "https://github.com/aws/aws-lc-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#axum-core@0.5.6", + "name": "axum-core", + "version": "0.5.6", + "description": "Core types and traits for axum", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/axum-core@0.5.6", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/tokio-rs/axum" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/axum" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#axum@0.8.8", + "name": "axum", + "version": "0.8.8", + "description": "Web framework that focuses on ergonomics and modularity", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/axum@0.8.8", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/tokio-rs/axum" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/axum" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#base64@0.22.1", + "author": "Marshall Pierce ", + "name": "base64", + "version": "0.22.1", + "description": "encodes and decodes base64 as bytes or utf8", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/base64@0.22.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/base64" + }, + { + "type": "vcs", + "url": "https://github.com/marshallpierce/rust-base64" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#bincode@1.3.3", + "author": "Ty Overby , Francesco Mazzoli , David Tolnay , Zoey Riordan ", + "name": "bincode", + "version": "1.3.3", + "description": "A binary serialization / deserialization strategy that uses Serde for transforming structs into bytes and vice versa!", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/bincode@1.3.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/bincode" + }, + { + "type": "vcs", + "url": "https://github.com/servo/bincode" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#bitflags@2.10.0", + "author": "The Rust Project Developers", + "name": "bitflags", + "version": "2.10.0", + "description": "A macro to generate structures which behave like bitflags. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/bitflags@2.10.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/bitflags" + }, + { + "type": "website", + "url": "https://github.com/bitflags/bitflags" + }, + { + "type": "vcs", + "url": "https://github.com/bitflags/bitflags" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#blake3@1.8.3", + "author": "Jack O'Connor , Samuel Neves", + "name": "blake3", + "version": "1.8.3", + "description": "the BLAKE3 hash function", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d" + } + ], + "licenses": [ + { + "expression": "CC0-1.0 OR Apache-2.0 OR Apache-2.0 WITH LLVM-exception" + } + ], + "purl": "pkg:cargo/blake3@1.8.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/blake3" + }, + { + "type": "vcs", + "url": "https://github.com/BLAKE3-team/BLAKE3" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#block-buffer@0.10.4", + "author": "RustCrypto Developers", + "name": "block-buffer", + "version": "0.10.4", + "description": "Buffer type for block processing of data", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "3078c7629b62d3f0439517fa394996acacc5cbc91c5a20d8c658e77abd503a71" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/block-buffer@0.10.4", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/block-buffer" + }, + { + "type": "vcs", + "url": "https://github.com/RustCrypto/utils" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#bstr@1.12.1", + "author": "Andrew Gallant ", + "name": "bstr", + "version": "1.12.1", + "description": "A string type that is not required to be valid UTF-8.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/bstr@1.12.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/bstr" + }, + { + "type": "website", + "url": "https://github.com/BurntSushi/bstr" + }, + { + "type": "vcs", + "url": "https://github.com/BurntSushi/bstr" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#bytemuck@1.25.0", + "author": "Lokathor ", + "name": "bytemuck", + "version": "1.25.0", + "description": "A crate for mucking around with piles of bytes.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "c8efb64bd706a16a1bdde310ae86b351e4d21550d98d056f22f8a7f7a2183fec" + } + ], + "licenses": [ + { + "expression": "Zlib OR Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/bytemuck@1.25.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Lokathor/bytemuck" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#byteorder@1.5.0", + "author": "Andrew Gallant ", + "name": "byteorder", + "version": "1.5.0", + "description": "Library for reading/writing numbers in big-endian and little-endian.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" + } + ], + "licenses": [ + { + "expression": "Unlicense OR MIT" + } + ], + "purl": "pkg:cargo/byteorder@1.5.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/byteorder" + }, + { + "type": "website", + "url": "https://github.com/BurntSushi/byteorder" + }, + { + "type": "vcs", + "url": "https://github.com/BurntSushi/byteorder" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "author": "Carl Lerche , Sean McArthur ", + "name": "bytes", + "version": "1.11.1", + "description": "Types and traits for working with bytes", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "1e748733b7cbc798e1434b6ac524f0c1ff2ab456fe201501e6497c8417a4fc33" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/bytes@1.11.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/tokio-rs/bytes" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#cc@1.2.55", + "author": "Alex Crichton ", + "name": "cc", + "version": "1.2.55", + "description": "A build-time dependency for Cargo build scripts to assist in invoking the native C compiler to compile native C code into a static archive to be linked into Rust code. ", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "47b26a0954ae34af09b50f0de26458fa95369a0d478d8236d3f93082b219bd29" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/cc@1.2.55", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/cc" + }, + { + "type": "website", + "url": "https://github.com/rust-lang/cc-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/cc-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#cfg-if@0.1.10", + "author": "Alex Crichton ", + "name": "cfg-if", + "version": "0.1.10", + "description": "A macro to ergonomically define an item depending on a large number of #[cfg] parameters. Structured like an if-else chain, the first matching branch is the item that gets emitted. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/cfg-if@0.1.10", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/cfg-if" + }, + { + "type": "website", + "url": "https://github.com/alexcrichton/cfg-if" + }, + { + "type": "vcs", + "url": "https://github.com/alexcrichton/cfg-if" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "author": "Alex Crichton ", + "name": "cfg-if", + "version": "1.0.4", + "description": "A macro to ergonomically define an item depending on a large number of #[cfg] parameters. Structured like an if-else chain, the first matching branch is the item that gets emitted. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/cfg-if@1.0.4", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-lang/cfg-if" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#cfg_aliases@0.2.1", + "author": "Zicklag ", + "name": "cfg_aliases", + "version": "0.2.1", + "description": "A tiny utility to help save you a lot of effort with long winded `#[cfg()]` checks.", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "613afe47fcd5fac7ccf1db93babcb082c5994d996f20b8b159f2ad1658eb5724" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/cfg_aliases@0.2.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/cfg_aliases" + }, + { + "type": "website", + "url": "https://github.com/katharostech/cfg_aliases" + }, + { + "type": "vcs", + "url": "https://github.com/katharostech/cfg_aliases" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "name": "chrono", + "version": "0.4.43", + "description": "Date and time library for Rust", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/chrono@0.4.43", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/chrono/" + }, + { + "type": "website", + "url": "https://github.com/chronotope/chrono" + }, + { + "type": "vcs", + "url": "https://github.com/chronotope/chrono" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#clap@4.5.57", + "name": "clap", + "version": "4.5.57", + "description": "A simple to use, efficient, and full-featured Command Line Argument Parser", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "6899ea499e3fb9305a65d5ebf6e3d2248c5fab291f300ad0a704fbe142eae31a" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/clap@4.5.57", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/clap-rs/clap" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#clap_builder@4.5.57", + "name": "clap_builder", + "version": "4.5.57", + "description": "A simple to use, efficient, and full-featured Command Line Argument Parser", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7b12c8b680195a62a8364d16b8447b01b6c2c8f9aaf68bee653be34d4245e238" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/clap_builder@4.5.57", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/clap-rs/clap" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#clap_derive@4.5.55", + "name": "clap_derive", + "version": "4.5.55", + "description": "Parse command line argument by defining a struct, derive crate.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/clap_derive@4.5.55", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/clap-rs/clap" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#clap_lex@0.7.7", + "name": "clap_lex", + "version": "0.7.7", + "description": "Minimal, flexible command line parser", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "c3e64b0cc0439b12df2fa678eae89a1c56a529fd067a9115f7827f1fffd22b32" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/clap_lex@0.7.7", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/clap-rs/clap" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#cmake@0.1.57", + "author": "Alex Crichton ", + "name": "cmake", + "version": "0.1.57", + "description": "A build dependency for running `cmake` to build a native library ", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "75443c44cd6b379beb8c5b45d85d0773baf31cce901fe7bb252f4eff3008ef7d" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/cmake@0.1.57", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/cmake" + }, + { + "type": "website", + "url": "https://github.com/rust-lang/cmake-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/cmake-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#colorchoice@1.0.4", + "name": "colorchoice", + "version": "1.0.4", + "description": "Global override of color control", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/colorchoice@1.0.4", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-cli/anstyle.git" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#colored@3.1.1", + "author": "Thomas Wickham ", + "name": "colored", + "version": "3.1.1", + "description": "The most simple way to add colors in your terminal", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "faf9468729b8cbcea668e36183cb69d317348c2e08e994829fb56ebfdfbaac34" + } + ], + "licenses": [ + { + "expression": "MPL-2.0" + } + ], + "purl": "pkg:cargo/colored@3.1.1", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/mackwic/colored" + }, + { + "type": "vcs", + "url": "https://github.com/mackwic/colored" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#const-str@1.1.0", + "author": "Nugine ", + "name": "const-str", + "version": "1.1.0", + "description": "compile-time string operations", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "18f12cc9948ed9604230cdddc7c86e270f9401ccbe3c2e98a4378c5e7632212f" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/const-str@1.1.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Nugine/const-str" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#const_panic@0.2.15", + "author": "rodrimati1992 ", + "name": "const_panic", + "version": "0.2.15", + "description": "const panic with formatting", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e262cdaac42494e3ae34c43969f9cdeb7da178bdb4b66fa6a1ea2edb4c8ae652" + } + ], + "licenses": [ + { + "expression": "Zlib" + } + ], + "purl": "pkg:cargo/const_panic@0.2.15", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rodrimati1992/const_panic/" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#constant_time_eq@0.4.2", + "author": "Cesar Eduardo Barros ", + "name": "constant_time_eq", + "version": "0.4.2", + "description": "Compares two equal-sized byte strings in constant time.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b" + } + ], + "licenses": [ + { + "expression": "CC0-1.0 OR MIT-0 OR Apache-2.0" + } + ], + "purl": "pkg:cargo/constant_time_eq@0.4.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/constant_time_eq" + }, + { + "type": "vcs", + "url": "https://github.com/cesarb/constant_time_eq" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#countio@0.3.0", + "author": "Oleh Martsokha ", + "name": "countio", + "version": "0.3.0", + "description": "Byte counting for std::io::{Read, Write, Seek} and its async variants from futures and tokio. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b9702aee5d1d744c01d82f6915644f950f898e014903385464c773b96fefdecb" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/countio@0.3.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/countio" + }, + { + "type": "website", + "url": "https://github.com/spire-rs/countio" + }, + { + "type": "vcs", + "url": "https://github.com/spire-rs/countio" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#cpufeatures@0.2.17", + "author": "RustCrypto Developers", + "name": "cpufeatures", + "version": "0.2.17", + "description": "Lightweight runtime CPU feature detection for aarch64, loongarch64, and x86/x86_64 targets, with no_std support and support for mobile targets including Android and iOS ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "59ed5838eebb26a2bb2e58f6d5b5316989ae9d08bab10e0e6d103e656d1b0280" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/cpufeatures@0.2.17", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/cpufeatures" + }, + { + "type": "vcs", + "url": "https://github.com/RustCrypto/utils" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#crossbeam-channel@0.5.15", + "name": "crossbeam-channel", + "version": "0.5.15", + "description": "Multi-producer multi-consumer channels for message passing", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "82b8f8f868b36967f9606790d1903570de9ceaf870a7bf9fbbd3016d636a2cb2" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/crossbeam-channel@0.5.15", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-channel" + }, + { + "type": "vcs", + "url": "https://github.com/crossbeam-rs/crossbeam" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#crossbeam-queue@0.3.12", + "name": "crossbeam-queue", + "version": "0.3.12", + "description": "Concurrent queues", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "0f58bbc28f91df819d0aa2a2c00cd19754769c2fad90579b3592b1c9ba7a3115" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/crossbeam-queue@0.3.12", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-queue" + }, + { + "type": "vcs", + "url": "https://github.com/crossbeam-rs/crossbeam" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#crossbeam-utils@0.8.21", + "name": "crossbeam-utils", + "version": "0.8.21", + "description": "Utilities for concurrent programming", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/crossbeam-utils@0.8.21", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/crossbeam-rs/crossbeam/tree/master/crossbeam-utils" + }, + { + "type": "vcs", + "url": "https://github.com/crossbeam-rs/crossbeam" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#crypto-common@0.1.7", + "author": "RustCrypto Developers", + "name": "crypto-common", + "version": "0.1.7", + "description": "Common cryptographic traits", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/crypto-common@0.1.7", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/crypto-common" + }, + { + "type": "vcs", + "url": "https://github.com/RustCrypto/traits" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#csv-core@0.1.13", + "author": "Andrew Gallant ", + "name": "csv-core", + "version": "0.1.13", + "description": "Bare bones CSV parsing with no_std support.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "704a3c26996a80471189265814dbc2c257598b96b8a7feae2d31ace646bb9782" + } + ], + "licenses": [ + { + "expression": "Unlicense OR MIT" + } + ], + "purl": "pkg:cargo/csv-core@0.1.13", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/csv-core" + }, + { + "type": "website", + "url": "https://github.com/BurntSushi/rust-csv" + }, + { + "type": "vcs", + "url": "https://github.com/BurntSushi/rust-csv" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#csv@1.4.0", + "author": "Andrew Gallant ", + "name": "csv", + "version": "1.4.0", + "description": "Fast CSV parsing with support for serde.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "52cd9d68cf7efc6ddfaaee42e7288d3a99d613d4b50f76ce9827ae0c6e14f938" + } + ], + "licenses": [ + { + "expression": "Unlicense OR MIT" + } + ], + "purl": "pkg:cargo/csv@1.4.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/csv" + }, + { + "type": "website", + "url": "https://github.com/BurntSushi/rust-csv" + }, + { + "type": "vcs", + "url": "https://github.com/BurntSushi/rust-csv" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#ctor-proc-macro@0.0.7", + "author": "Matt Mastracci ", + "name": "ctor-proc-macro", + "version": "0.0.7", + "description": "proc-macro support for the ctor crate", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "52560adf09603e58c9a7ee1fe1dcb95a16927b17c127f0ac02d6e768a0e25bc1" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/ctor-proc-macro@0.0.7", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/mmastrac/rust-ctor" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#ctor@0.6.3", + "author": "Matt Mastracci ", + "name": "ctor", + "version": "0.6.3", + "description": "__attribute__((constructor)) for Rust", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "424e0138278faeb2b401f174ad17e715c829512d74f3d1e81eb43365c2e0590e" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/ctor@0.6.3", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/mmastrac/rust-ctor" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#deranged@0.5.5", + "author": "Jacob Pratt ", + "name": "deranged", + "version": "0.5.5", + "description": "Ranged integers", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/deranged@0.5.5", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/jhpratt/deranged" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#derivative@2.2.0", + "author": "mcarton ", + "name": "derivative", + "version": "2.2.0", + "description": "A set of alternative `derive` attributes for Rust", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "fcc3dd5e9e9c0b295d6e1e4d811fb6f157d5ffd784b8d202fc62eac8035a770b" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/derivative@2.2.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://mcarton.github.io/rust-derivative/" + }, + { + "type": "vcs", + "url": "https://github.com/mcarton/rust-derivative" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#digest@0.10.7", + "author": "RustCrypto Developers", + "name": "digest", + "version": "0.10.7", + "description": "Traits for cryptographic hash functions and message authentication codes", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/digest@0.10.7", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/digest" + }, + { + "type": "vcs", + "url": "https://github.com/RustCrypto/traits" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#dirs-sys@0.5.0", + "author": "Simon Ochsenreither ", + "name": "dirs-sys", + "version": "0.5.0", + "description": "System-level helper functions for the dirs and directories crates.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e01a3366d27ee9890022452ee61b2b63a67e6f13f58900b651ff5665f0bb1fab" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/dirs-sys@0.5.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/dirs-dev/dirs-sys-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#dirs@6.0.0", + "author": "Simon Ochsenreither ", + "name": "dirs", + "version": "6.0.0", + "description": "A tiny low-level library that provides platform-specific standard locations of directories for config, cache and other data on Linux, Windows, macOS and Redox by leveraging the mechanisms defined by the XDG base/user directory specifications on Linux, the Known Folder API on Windows, and the Standard Directory guidelines on macOS.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "c3e8aa94d75141228480295a7d0e7feb620b1a5ad9f12bc40be62411e38cce4e" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/dirs@6.0.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/soc/dirs-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#displaydoc@0.2.5", + "author": "Jane Lusby ", + "name": "displaydoc", + "version": "0.2.5", + "description": "A derive macro for implementing the display Trait via a doc comment and string interpolation ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/displaydoc@0.2.5", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/displaydoc" + }, + { + "type": "website", + "url": "https://github.com/yaahc/displaydoc" + }, + { + "type": "vcs", + "url": "https://github.com/yaahc/displaydoc" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#doxygen-rs@0.4.2", + "name": "doxygen-rs", + "version": "0.4.2", + "description": "Transform Doxygen to Rustdoc", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "415b6ec780d34dcf624666747194393603d0373b7141eef01d12ee58881507d9" + } + ], + "licenses": [ + { + "expression": "BSD-3-Clause" + } + ], + "purl": "pkg:cargo/doxygen-rs@0.4.2", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/Techie-Pi/doxygen-rs/" + }, + { + "type": "vcs", + "url": "https://github.com/Techie-Pi/doxygen-rs/" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#dtor-proc-macro@0.0.6", + "author": "Matt Mastracci ", + "name": "dtor-proc-macro", + "version": "0.0.6", + "description": "proc-macro support for the dtor crate", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "f678cf4a922c215c63e0de95eb1ff08a958a81d47e485cf9da1e27bf6305cfa5" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/dtor-proc-macro@0.0.6", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/mmastrac/rust-ctor" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#dtor@0.1.1", + "author": "Matt Mastracci ", + "name": "dtor", + "version": "0.1.1", + "description": "__attribute__((destructor)) for Rust", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "404d02eeb088a82cfd873006cb713fe411306c7d182c344905e101fb1167d301" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/dtor@0.1.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/mmastrac/rust-ctor" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#dunce@1.0.5", + "author": "Kornel ", + "name": "dunce", + "version": "1.0.5", + "description": "Normalize Windows paths to the most compatible format, avoiding UNC where possible", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813" + } + ], + "licenses": [ + { + "expression": "CC0-1.0 OR MIT-0 OR Apache-2.0" + } + ], + "purl": "pkg:cargo/dunce@1.0.5", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/dunce" + }, + { + "type": "website", + "url": "https://lib.rs/crates/dunce" + }, + { + "type": "vcs", + "url": "https://gitlab.com/kornelski/dunce" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#duration-str@0.19.0", + "author": "baoyachi ", + "name": "duration-str", + "version": "0.19.0", + "description": "duration string parser", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "12494809f9915b6132014cc259c4e204ab53ab6c6dd2225672703b5359267d82" + } + ], + "licenses": [ + { + "expression": "Apache-2.0" + } + ], + "purl": "pkg:cargo/duration-str@0.19.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/baoyachi/duration-str" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#either@1.15.0", + "author": "bluss", + "name": "either", + "version": "1.15.0", + "description": "The enum `Either` with variants `Left` and `Right` is a general purpose sum type with two cases. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/either@1.15.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/either/1/" + }, + { + "type": "vcs", + "url": "https://github.com/rayon-rs/either" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#equivalent@1.0.2", + "name": "equivalent", + "version": "1.0.2", + "description": "Traits for key comparison in maps.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/equivalent@1.0.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/indexmap-rs/equivalent" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#errno@0.3.14", + "author": "Chris Wong , Dan Gohman ", + "name": "errno", + "version": "0.3.14", + "description": "Cross-platform interface to the `errno` variable.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/errno@0.3.14", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/errno" + }, + { + "type": "vcs", + "url": "https://github.com/lambda-fairy/rust-errno" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#fastrand@2.3.0", + "author": "Stjepan Glavina ", + "name": "fastrand", + "version": "2.3.0", + "description": "A simple and fast random number generator", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "37909eebbb50d72f9059c3b6d82c0463f2ff062c9e95845c43a6c9c0355411be" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/fastrand@2.3.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/smol-rs/fastrand" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#find-msvc-tools@0.1.9", + "name": "find-msvc-tools", + "version": "0.1.9", + "description": "Find windows-specific tools, read MSVC versions from the registry and from COM interfaces", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/find-msvc-tools@0.1.9", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/find-msvc-tools" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/cc-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#fnv@1.0.7", + "author": "Alex Crichton ", + "name": "fnv", + "version": "1.0.7", + "description": "Fowler–Noll–Vo hash function", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/fnv@1.0.7", + "externalReferences": [ + { + "type": "documentation", + "url": "https://doc.servo.org/fnv/" + }, + { + "type": "vcs", + "url": "https://github.com/servo/rust-fnv" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#form_urlencoded@1.2.2", + "author": "The rust-url developers", + "name": "form_urlencoded", + "version": "1.2.2", + "description": "Parser and serializer for the application/x-www-form-urlencoded syntax, as used by HTML forms.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/form_urlencoded@1.2.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/servo/rust-url" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#fs_extra@1.3.0", + "author": "Denis Kurilenko ", + "name": "fs_extra", + "version": "1.3.0", + "description": "Expanding std::fs and std::io. Recursively copy folders with information about process and much more.", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "42703706b716c37f96a77aea830392ad231f44c9e9a67872fa5548707e11b11c" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/fs_extra@1.3.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/fs_extra" + }, + { + "type": "website", + "url": "https://github.com/webdesus/fs_extra" + }, + { + "type": "vcs", + "url": "https://github.com/webdesus/fs_extra" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#futures-channel@0.3.31", + "name": "futures-channel", + "version": "0.3.31", + "description": "Channels for asynchronous communication using futures-rs. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/futures-channel@0.3.31", + "externalReferences": [ + { + "type": "website", + "url": "https://rust-lang.github.io/futures-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/futures-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "name": "futures-core", + "version": "0.3.31", + "description": "The core traits and types in for the `futures` library. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/futures-core@0.3.31", + "externalReferences": [ + { + "type": "website", + "url": "https://rust-lang.github.io/futures-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/futures-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#futures-executor@0.3.31", + "name": "futures-executor", + "version": "0.3.31", + "description": "Executors for asynchronous tasks based on the futures-rs library. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/futures-executor@0.3.31", + "externalReferences": [ + { + "type": "website", + "url": "https://rust-lang.github.io/futures-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/futures-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#futures-io@0.3.31", + "name": "futures-io", + "version": "0.3.31", + "description": "The `AsyncRead`, `AsyncWrite`, `AsyncSeek`, and `AsyncBufRead` traits for the futures-rs library. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/futures-io@0.3.31", + "externalReferences": [ + { + "type": "website", + "url": "https://rust-lang.github.io/futures-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/futures-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#futures-macro@0.3.31", + "name": "futures-macro", + "version": "0.3.31", + "description": "The futures-rs procedural macro implementations. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/futures-macro@0.3.31", + "externalReferences": [ + { + "type": "website", + "url": "https://rust-lang.github.io/futures-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/futures-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#futures-sink@0.3.31", + "name": "futures-sink", + "version": "0.3.31", + "description": "The asynchronous `Sink` trait for the futures-rs library. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/futures-sink@0.3.31", + "externalReferences": [ + { + "type": "website", + "url": "https://rust-lang.github.io/futures-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/futures-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#futures-task@0.3.31", + "name": "futures-task", + "version": "0.3.31", + "description": "Tools for working with tasks. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/futures-task@0.3.31", + "externalReferences": [ + { + "type": "website", + "url": "https://rust-lang.github.io/futures-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/futures-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#futures-util@0.3.31", + "name": "futures-util", + "version": "0.3.31", + "description": "Common utilities and extension traits for the futures-rs library. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/futures-util@0.3.31", + "externalReferences": [ + { + "type": "website", + "url": "https://rust-lang.github.io/futures-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/futures-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#futures@0.3.31", + "name": "futures", + "version": "0.3.31", + "description": "An implementation of futures and streams featuring zero allocations, composability, and iterator-like interfaces. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/futures@0.3.31", + "externalReferences": [ + { + "type": "website", + "url": "https://rust-lang.github.io/futures-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/futures-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#gearhash@0.1.3", + "author": "Sam Rijs ", + "name": "gearhash", + "version": "0.1.3", + "description": "Fast, SIMD-accelerated hash function for content-defined chunking", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "c8cf82cf76cd16485e56295a1377c775ce708c9f1a0be6b029076d60a245d213" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/gearhash@0.1.3", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/srijs/rust-gearhash" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#generic-array@0.14.7", + "author": "Bartłomiej Kamiński , Aaron Trent ", + "name": "generic-array", + "version": "0.14.7", + "description": "Generic types implementing functionality of arrays", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/generic-array@0.14.7", + "externalReferences": [ + { + "type": "documentation", + "url": "http://fizyk20.github.io/generic-array/generic_array/" + }, + { + "type": "vcs", + "url": "https://github.com/fizyk20/generic-array.git" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.2.17", + "author": "The Rand Project Developers", + "name": "getrandom", + "version": "0.2.17", + "description": "A small cross-platform library for retrieving random data from system source", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/getrandom@0.2.17", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/getrandom" + }, + { + "type": "vcs", + "url": "https://github.com/rust-random/getrandom" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.3.4", + "author": "The Rand Project Developers", + "name": "getrandom", + "version": "0.3.4", + "description": "A small cross-platform library for retrieving random data from system source", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "899def5c37c4fd7b2664648c28120ecec138e4d395b459e5ca34f9cce2dd77fd" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/getrandom@0.3.4", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/getrandom" + }, + { + "type": "vcs", + "url": "https://github.com/rust-random/getrandom" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.4.1", + "author": "The Rand Project Developers", + "name": "getrandom", + "version": "0.4.1", + "description": "A small cross-platform library for retrieving random data from system source", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/getrandom@0.4.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/getrandom" + }, + { + "type": "vcs", + "url": "https://github.com/rust-random/getrandom" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#git-version-macro@0.3.9", + "author": "David Roundy , Maarten de Vries , Mara Bos ", + "name": "git-version-macro", + "version": "0.3.9", + "description": "Internal macro crate for git-version.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "53010ccb100b96a67bc32c0175f0ed1426b31b655d562898e57325f81c023ac0" + } + ], + "licenses": [ + { + "expression": "BSD-2-Clause" + } + ], + "purl": "pkg:cargo/git-version-macro@0.3.9", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/fusion-engineering/rust-git-version" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#git-version@0.3.9", + "author": "Mara Bos , Maarten de Vries , David Roundy ", + "name": "git-version", + "version": "0.3.9", + "description": "Compile the git version (tag name, or hash otherwise) and dirty state into your program.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "1ad568aa3db0fcbc81f2f116137f263d7304f512a1209b35b85150d3ef88ad19" + } + ], + "licenses": [ + { + "expression": "BSD-2-Clause" + } + ], + "purl": "pkg:cargo/git-version@0.3.9", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/git-version/" + }, + { + "type": "vcs", + "url": "https://github.com/fusion-engineering/rust-git-version" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#h2@0.4.13", + "author": "Carl Lerche , Sean McArthur ", + "name": "h2", + "version": "0.4.13", + "description": "An HTTP/2 client and server", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/h2@0.4.13", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/h2" + }, + { + "type": "vcs", + "url": "https://github.com/hyperium/h2" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#half@2.7.1", + "author": "Kathryn Long ", + "name": "half", + "version": "2.7.1", + "description": "Half-precision floating point f16 and bf16 types for Rust implementing the IEEE 754-2008 standard binary16 and bfloat16 types.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "6ea2d84b969582b4b1864a92dc5d27cd2b77b622a8d79306834f1be5ba20d84b" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/half@2.7.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/VoidStarKat/half-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#hashbrown@0.16.1", + "author": "Amanieu d'Antras ", + "name": "hashbrown", + "version": "0.16.1", + "description": "A Rust port of Google's SwissTable hash map", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/hashbrown@0.16.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-lang/hashbrown" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#headers-core@0.3.0", + "author": "Sean McArthur ", + "name": "headers-core", + "version": "0.3.0", + "description": "typed HTTP headers core trait", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/headers-core@0.3.0", + "externalReferences": [ + { + "type": "website", + "url": "https://hyper.rs" + }, + { + "type": "vcs", + "url": "https://github.com/hyperium/headers" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#headers@0.4.1", + "author": "Sean McArthur ", + "name": "headers", + "version": "0.4.1", + "description": "typed HTTP headers", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b3314d5adb5d94bcdf56771f2e50dbbc80bb4bdf88967526706205ac9eff24eb" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/headers@0.4.1", + "externalReferences": [ + { + "type": "website", + "url": "https://hyper.rs" + }, + { + "type": "vcs", + "url": "https://github.com/hyperium/headers" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#heapify@0.2.0", + "name": "heapify", + "version": "0.2.0", + "description": "Convenience functions to turn slices into max-heaps.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "0049b265b7f201ca9ab25475b22b47fe444060126a51abe00f77d986fc5cc52e" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/heapify@0.2.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/ethereal-sheep/heapify" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#heck@0.5.0", + "name": "heck", + "version": "0.5.0", + "description": "heck is a case conversion library.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/heck@0.5.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/withoutboats/heck" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#heed-traits@0.20.0", + "author": "Kerollmops ", + "name": "heed-traits", + "version": "0.20.0", + "description": "The traits used inside of the fully typed LMDB wrapper, heed", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "eb3130048d404c57ce5a1ac61a903696e8fcde7e8c2991e9fcfc1f27c3ef74ff" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/heed-traits@0.20.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Kerollmops/heed" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#heed-types@0.21.0", + "author": "Kerollmops ", + "name": "heed-types", + "version": "0.21.0", + "description": "The types used with the fully typed LMDB wrapper, heed", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "13c255bdf46e07fb840d120a36dcc81f385140d7191c76a7391672675c01a55d" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/heed-types@0.21.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Kerollmops/heed" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#heed@0.22.0", + "author": "Kerollmops ", + "name": "heed", + "version": "0.22.0", + "description": "A fully typed LMDB (mdb.master) wrapper with minimum overhead", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "6a56c94661ddfb51aa9cdfbf102cfcc340aa69267f95ebccc4af08d7c530d393" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/heed@0.22.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Kerollmops/heed" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#http-body-util@0.1.3", + "author": "Carl Lerche , Lucio Franco , Sean McArthur ", + "name": "http-body-util", + "version": "0.1.3", + "description": "Combinators and adapters for HTTP request or response bodies. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b021d93e26becf5dc7e1b75b1bed1fd93124b374ceb73f43d4d4eafec896a64a" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/http-body-util@0.1.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/http-body-util" + }, + { + "type": "vcs", + "url": "https://github.com/hyperium/http-body" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#http-body@1.0.1", + "author": "Carl Lerche , Lucio Franco , Sean McArthur ", + "name": "http-body", + "version": "1.0.1", + "description": "Trait representing an asynchronous, streaming, HTTP request or response body. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "1efedce1fb8e6913f23e0c92de8e62cd5b772a67e7b3946df930a62566c93184" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/http-body@1.0.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/http-body" + }, + { + "type": "vcs", + "url": "https://github.com/hyperium/http-body" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "author": "Alex Crichton , Carl Lerche , Sean McArthur ", + "name": "http", + "version": "1.4.0", + "description": "A set of types for representing HTTP requests and responses. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/http@1.4.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/http" + }, + { + "type": "vcs", + "url": "https://github.com/hyperium/http" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#httparse@1.10.1", + "author": "Sean McArthur ", + "name": "httparse", + "version": "1.10.1", + "description": "A tiny, safe, speedy, zero-copy HTTP/1.x parser.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "6dbf3de79e51f3d586ab4cb9d5c3e2c14aa28ed23d180cf89b4df0454a69cc87" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/httparse@1.10.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/httparse" + }, + { + "type": "vcs", + "url": "https://github.com/seanmonstar/httparse" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#httpdate@1.0.3", + "author": "Pyfisch ", + "name": "httpdate", + "version": "1.0.3", + "description": "HTTP date parsing and formatting", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/httpdate@1.0.3", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/pyfisch/httpdate" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#hyper-rustls@0.27.7", + "name": "hyper-rustls", + "version": "0.27.7", + "description": "Rustls+hyper integration for pure rust HTTPS", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR ISC OR MIT" + } + ], + "purl": "pkg:cargo/hyper-rustls@0.27.7", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/hyper-rustls/" + }, + { + "type": "website", + "url": "https://github.com/rustls/hyper-rustls" + }, + { + "type": "vcs", + "url": "https://github.com/rustls/hyper-rustls" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#hyper-util@0.1.20", + "author": "Sean McArthur ", + "name": "hyper-util", + "version": "0.1.20", + "description": "hyper utilities", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "96547c2556ec9d12fb1578c4eaf448b04993e7fb79cbaad930a656880a6bdfa0" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/hyper-util@0.1.20", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/hyper-util" + }, + { + "type": "website", + "url": "https://hyper.rs" + }, + { + "type": "vcs", + "url": "https://github.com/hyperium/hyper-util" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#hyper@1.8.1", + "author": "Sean McArthur ", + "name": "hyper", + "version": "1.8.1", + "description": "A protective and efficient HTTP library for all.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/hyper@1.8.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/hyper" + }, + { + "type": "website", + "url": "https://hyper.rs" + }, + { + "type": "vcs", + "url": "https://github.com/hyperium/hyper" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#iana-time-zone@0.1.65", + "author": "Andrew Straw , René Kijewski , Ryan Lopopolo ", + "name": "iana-time-zone", + "version": "0.1.65", + "description": "get the IANA time zone for the current system", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e31bc9ad994ba00e440a8aa5c9ef0ec67d5cb5e5cb0cc7f8b744a35b389cc470" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/iana-time-zone@0.1.65", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/strawlab/iana-time-zone" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#icu_collections@2.1.1", + "author": "The ICU4X Project Developers", + "name": "icu_collections", + "version": "2.1.1", + "description": "Collection of API for use in ICU libraries.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "4c6b649701667bbe825c3b7e6388cb521c23d88644678e83c0c4d0a621a34b43" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/icu_collections@2.1.1", + "externalReferences": [ + { + "type": "website", + "url": "https://icu4x.unicode.org" + }, + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#icu_locale_core@2.1.1", + "author": "The ICU4X Project Developers", + "name": "icu_locale_core", + "version": "2.1.1", + "description": "API for managing Unicode Language and Locale Identifiers", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "edba7861004dd3714265b4db54a3c390e880ab658fec5f7db895fae2046b5bb6" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/icu_locale_core@2.1.1", + "externalReferences": [ + { + "type": "website", + "url": "https://icu4x.unicode.org" + }, + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#icu_normalizer@2.1.1", + "author": "The ICU4X Project Developers", + "name": "icu_normalizer", + "version": "2.1.1", + "description": "API for normalizing text into Unicode Normalization Forms", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "5f6c8828b67bf8908d82127b2054ea1b4427ff0230ee9141c54251934ab1b599" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/icu_normalizer@2.1.1", + "externalReferences": [ + { + "type": "website", + "url": "https://icu4x.unicode.org" + }, + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#icu_normalizer_data@2.1.1", + "author": "The ICU4X Project Developers", + "name": "icu_normalizer_data", + "version": "2.1.1", + "description": "Data for the icu_normalizer crate", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7aedcccd01fc5fe81e6b489c15b247b8b0690feb23304303a9e560f37efc560a" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/icu_normalizer_data@2.1.1", + "externalReferences": [ + { + "type": "website", + "url": "https://icu4x.unicode.org" + }, + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#icu_properties@2.1.2", + "author": "The ICU4X Project Developers", + "name": "icu_properties", + "version": "2.1.2", + "description": "Definitions for Unicode properties", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "020bfc02fe870ec3a66d93e677ccca0562506e5872c650f893269e08615d74ec" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/icu_properties@2.1.2", + "externalReferences": [ + { + "type": "website", + "url": "https://icu4x.unicode.org" + }, + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#icu_properties_data@2.1.2", + "author": "The ICU4X Project Developers", + "name": "icu_properties_data", + "version": "2.1.2", + "description": "Data for the icu_properties crate", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "616c294cf8d725c6afcd8f55abc17c56464ef6211f9ed59cccffe534129c77af" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/icu_properties_data@2.1.2", + "externalReferences": [ + { + "type": "website", + "url": "https://icu4x.unicode.org" + }, + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#icu_provider@2.1.1", + "author": "The ICU4X Project Developers", + "name": "icu_provider", + "version": "2.1.1", + "description": "Trait and struct definitions for the ICU data provider", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "85962cf0ce02e1e0a629cc34e7ca3e373ce20dda4c4d7294bbd0bf1fdb59e614" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/icu_provider@2.1.1", + "externalReferences": [ + { + "type": "website", + "url": "https://icu4x.unicode.org" + }, + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#idna@1.1.0", + "author": "The rust-url developers", + "name": "idna", + "version": "1.1.0", + "description": "IDNA (Internationalizing Domain Names in Applications) and Punycode.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/idna@1.1.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/servo/rust-url/" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#idna_adapter@1.2.1", + "author": "The rust-url developers", + "name": "idna_adapter", + "version": "1.2.1", + "description": "Back end adapter for idna", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/idna_adapter@1.2.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/idna_adapter/latest/idna_adapter/" + }, + { + "type": "website", + "url": "https://docs.rs/crate/idna_adapter/latest" + }, + { + "type": "vcs", + "url": "https://github.com/hsivonen/idna_adapter" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#indexmap@2.13.0", + "name": "indexmap", + "version": "2.13.0", + "description": "A hash table with consistent order and fast iteration.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/indexmap@2.13.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/indexmap/" + }, + { + "type": "vcs", + "url": "https://github.com/indexmap-rs/indexmap" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#indoc@2.0.7", + "author": "David Tolnay ", + "name": "indoc", + "version": "2.0.7", + "description": "Indented document literals", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "79cf5c93f93228cf8efb3ba362535fb11199ac548a09ce117c9b1adc3030d706" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/indoc@2.0.7", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/indoc" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/indoc" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#ipnet@2.11.0", + "author": "Kris Price ", + "name": "ipnet", + "version": "2.11.0", + "description": "Provides types and useful methods for working with IPv4 and IPv6 network addresses, commonly called IP prefixes. The new `IpNet`, `Ipv4Net`, and `Ipv6Net` types build on the existing `IpAddr`, `Ipv4Addr`, and `Ipv6Addr` types already provided in Rust's standard library and align to their design to stay consistent. The module also provides useful traits that extend `Ipv4Addr` and `Ipv6Addr` with methods for `Add`, `Sub`, `BitAnd`, and `BitOr` operations. The module only uses stable feature so it is guaranteed to compile using the stable toolchain.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/ipnet@2.11.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/ipnet" + }, + { + "type": "vcs", + "url": "https://github.com/krisprice/ipnet" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#iri-string@0.7.10", + "author": "YOSHIOKA Takuma ", + "name": "iri-string", + "version": "0.7.10", + "description": "IRI as string types", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/iri-string@0.7.10", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/lo48576/iri-string" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#is_terminal_polyfill@1.70.2", + "name": "is_terminal_polyfill", + "version": "1.70.2", + "description": "Polyfill for `is_terminal` stdlib feature for use with older MSRVs", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/is_terminal_polyfill@1.70.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/polyfill-rs/is_terminal_polyfill" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#itertools@0.14.0", + "author": "bluss", + "name": "itertools", + "version": "0.14.0", + "description": "Extra iterator adaptors, iterator methods, free functions, and macros.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/itertools@0.14.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/itertools/" + }, + { + "type": "vcs", + "url": "https://github.com/rust-itertools/itertools" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17", + "author": "David Tolnay ", + "name": "itoa", + "version": "1.0.17", + "description": "Fast integer primitive to string conversion", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/itoa@1.0.17", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/itoa" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/itoa" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#jobserver@0.1.34", + "author": "Alex Crichton ", + "name": "jobserver", + "version": "0.1.34", + "description": "An implementation of the GNU Make jobserver for Rust. ", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/jobserver@0.1.34", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/jobserver" + }, + { + "type": "website", + "url": "https://github.com/rust-lang/jobserver-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/jobserver-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#konst@0.4.3", + "author": "rodrimati1992 ", + "name": "konst", + "version": "0.4.3", + "description": "Const equivalents of std features: comparison, destructuring, iteration, and parsing", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "f660d5f887e3562f9ab6f4a14988795b694099d66b4f5dedc02d197ba9becb1d" + } + ], + "licenses": [ + { + "expression": "Zlib" + } + ], + "purl": "pkg:cargo/konst@0.4.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/konst/" + }, + { + "type": "vcs", + "url": "https://github.com/rodrimati1992/konst/" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#konst_proc_macros@0.4.1", + "author": "rodrimati1992 ", + "name": "konst_proc_macros", + "version": "0.4.1", + "description": "Implementation detail of the `konst` crate", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e037a2e1d8d5fdbd49b16a4ea09d5d6401c1f29eca5ff29d03d3824dba16256a" + } + ], + "licenses": [ + { + "expression": "Zlib" + } + ], + "purl": "pkg:cargo/konst_proc_macros@0.4.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/konst/" + }, + { + "type": "vcs", + "url": "https://github.com/rodrimati1992/konst/" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#lazy_static@1.5.0", + "author": "Marvin Löbel ", + "name": "lazy_static", + "version": "1.5.0", + "description": "A macro for declaring lazily evaluated statics in Rust.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/lazy_static@1.5.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/lazy_static" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang-nursery/lazy-static.rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "author": "The Rust Project Developers", + "name": "libc", + "version": "0.2.181", + "description": "Raw FFI bindings to platform libraries like libc.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "459427e2af2b9c839b132acb702a1c654d95e10f8c326bfc2ad11310e458b1c5" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/libc@0.2.181", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-lang/libc" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#libm@0.2.16", + "author": "Alex Crichton , Amanieu d'Antras , Jorge Aparicio , Trevor Gross ", + "name": "libm", + "version": "0.2.16", + "description": "libm in pure Rust", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b6d2cec3eae94f9f509c767b45932f1ada8350c4bdb85af2fcab4a3c14807981" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/libm@0.2.16", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-lang/compiler-builtins" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#linux-raw-sys@0.11.0", + "author": "Dan Gohman ", + "name": "linux-raw-sys", + "version": "0.11.0", + "description": "Generated bindings for Linux's userspace API", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/linux-raw-sys@0.11.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/linux-raw-sys" + }, + { + "type": "vcs", + "url": "https://github.com/sunfishcode/linux-raw-sys" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#litemap@0.8.1", + "author": "The ICU4X Project Developers", + "name": "litemap", + "version": "0.8.1", + "description": "A key-value Map implementation based on a flat, sorted Vec.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/litemap@0.8.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/litemap" + }, + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#lmdb-master-sys@0.2.5", + "author": "Kerollmops , Dan Burkert , Victor Porof ", + "name": "lmdb-master-sys", + "version": "0.2.5", + "description": "Rust bindings for liblmdb on the mdb.master branch.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "864808e0b19fb6dd3b70ba94ee671b82fce17554cf80aeb0a155c65bb08027df" + } + ], + "licenses": [ + { + "expression": "Apache-2.0" + } + ], + "purl": "pkg:cargo/lmdb-master-sys@0.2.5", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/lmdb-master-sys" + }, + { + "type": "vcs", + "url": "https://github.com/meilisearch/heed/tree/main/lmdb-master-sys" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#lock_api@0.4.14", + "author": "Amanieu d'Antras ", + "name": "lock_api", + "version": "0.4.14", + "description": "Wrappers to create fully-featured Mutex and RwLock types. Compatible with no_std.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "224399e74b87b5f3557511d98dff8b14089b3dadafcab6bb93eab67d3aace965" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/lock_api@0.4.14", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Amanieu/parking_lot" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#log@0.4.29", + "author": "The Rust Project Developers", + "name": "log", + "version": "0.4.29", + "description": "A lightweight logging facade for Rust ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/log@0.4.29", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/log" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/log" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#lru-slab@0.1.2", + "author": "Benjamin Saunders ", + "name": "lru-slab", + "version": "0.1.2", + "description": "Pre-allocated storage with constant-time LRU tracking", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "112b39cec0b298b6c1999fee3e31427f74f676e4cb9879ed1a121b43661a4154" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0 OR Zlib" + } + ], + "purl": "pkg:cargo/lru-slab@0.1.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Ralith/lru-slab" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#lz4_flex@0.12.0", + "author": "Pascal Seitz , Arthur Silva , ticki ", + "name": "lz4_flex", + "version": "0.12.0", + "description": "Fastest LZ4 implementation in Rust, no unsafe by default.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "ab6473172471198271ff72e9379150e9dfd70d8e533e0752a27e515b48dd375e" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/lz4_flex@0.12.0", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/pseitz/lz4_flex" + }, + { + "type": "vcs", + "url": "https://github.com/pseitz/lz4_flex" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#matchers@0.2.0", + "author": "Eliza Weisman ", + "name": "matchers", + "version": "0.2.0", + "description": "Regex matching on character and byte streams. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/matchers@0.2.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/matchers/" + }, + { + "type": "website", + "url": "https://github.com/hawkw/matchers" + }, + { + "type": "vcs", + "url": "https://github.com/hawkw/matchers" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#matchit@0.8.4", + "author": "Ibraheem Ahmed ", + "name": "matchit", + "version": "0.8.4", + "description": "A high performance, zero-copy URL router.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" + } + ], + "licenses": [ + { + "expression": "MIT AND BSD-3-Clause" + } + ], + "purl": "pkg:cargo/matchit@0.8.4", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/ibraheemdev/matchit" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#matrixmultiply@0.3.10", + "author": "bluss, R. Janis Goldschmidt", + "name": "matrixmultiply", + "version": "0.3.10", + "description": "General matrix multiplication for f32 and f64 matrices. Operates on matrices with general layout (they can use arbitrary row and column stride). Detects and uses AVX or SSE2 on x86 platforms transparently for higher performance. Uses a microkernel strategy, so that the implementation is easy to parallelize and optimize. Supports multithreading.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "a06de3016e9fae57a36fd14dba131fccf49f74b40b7fbdb472f96e361ec71a08" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/matrixmultiply@0.3.10", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/matrixmultiply/" + }, + { + "type": "vcs", + "url": "https://github.com/bluss/matrixmultiply/" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "author": "Andrew Gallant , bluss", + "name": "memchr", + "version": "2.8.0", + "description": "Provides extremely fast (uses SIMD on x86_64, aarch64 and wasm32) routines for 1, 2 or 3 byte search and single substring search. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" + } + ], + "licenses": [ + { + "expression": "Unlicense OR MIT" + } + ], + "purl": "pkg:cargo/memchr@2.8.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/memchr/" + }, + { + "type": "website", + "url": "https://github.com/BurntSushi/memchr" + }, + { + "type": "vcs", + "url": "https://github.com/BurntSushi/memchr" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#memoffset@0.9.1", + "author": "Gilad Naaman ", + "name": "memoffset", + "version": "0.9.1", + "description": "offset_of functionality for Rust structs.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "488016bfae457b036d996092f6cb448677611ce4449e970ceaf42695203f218a" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/memoffset@0.9.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Gilnaa/memoffset" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#mime@0.3.17", + "author": "Sean McArthur ", + "name": "mime", + "version": "0.3.17", + "description": "Strongly Typed Mimes", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "6877bb514081ee2a7ff5ef9de3281f14a4dd4bceac4c09388074a6b5df8a139a" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/mime@0.3.17", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/mime" + }, + { + "type": "vcs", + "url": "https://github.com/hyperium/mime" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#mime_guess@2.0.5", + "author": "Austin Bonander ", + "name": "mime_guess", + "version": "2.0.5", + "description": "A simple crate for detection of a file's MIME type by its extension.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "f7c44f8e672c00fe5308fa235f821cb4198414e1c77935c1ab6948d3fd78550e" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/mime_guess@2.0.5", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/mime_guess/" + }, + { + "type": "vcs", + "url": "https://github.com/abonander/mime_guess" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#mio@1.1.1", + "author": "Carl Lerche , Thomas de Zeeuw , Tokio Contributors ", + "name": "mio", + "version": "1.1.1", + "description": "Lightweight non-blocking I/O.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/mio@1.1.1", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/tokio-rs/mio" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/mio" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#more-asserts@0.3.1", + "author": "Thom Chiovoloni ", + "name": "more-asserts", + "version": "0.3.1", + "description": "Small library providing additional assert_* and debug_assert_* macros.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" + } + ], + "licenses": [ + { + "expression": "Unlicense OR MIT OR Apache-2.0 OR CC0-1.0" + } + ], + "purl": "pkg:cargo/more-asserts@0.3.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/more-asserts" + }, + { + "type": "website", + "url": "https://github.com/thomcc/rust-more-asserts" + }, + { + "type": "vcs", + "url": "https://github.com/thomcc/rust-more-asserts" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#nalgebra@0.33.2", + "author": "Sébastien Crozet ", + "name": "nalgebra", + "version": "0.33.2", + "description": "General-purpose linear algebra library with transformations and statically-sized or dynamically-sized matrices.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "26aecdf64b707efd1310e3544d709c5c0ac61c13756046aaaba41be5c4f66a3b" + } + ], + "licenses": [ + { + "expression": "Apache-2.0" + } + ], + "purl": "pkg:cargo/nalgebra@0.33.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://www.nalgebra.org/docs" + }, + { + "type": "website", + "url": "https://nalgebra.org" + }, + { + "type": "vcs", + "url": "https://github.com/dimforge/nalgebra" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#nu-ansi-term@0.50.3", + "author": "ogham@bsago.me, Ryan Scheel (Havvy) , Josh Triplett , The Nushell Project Developers", + "name": "nu-ansi-term", + "version": "0.50.3", + "description": "Library for ANSI terminal colors and styles (bold, underline)", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/nu-ansi-term@0.50.3", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/nushell/nu-ansi-term" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#num-bigint@0.4.6", + "author": "The Rust Project Developers", + "name": "num-bigint", + "version": "0.4.6", + "description": "Big integer implementation for Rust", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "a5e44f723f1133c9deac646763579fdb3ac745e418f2a7af9cd0c431da1f20b9" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/num-bigint@0.4.6", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/num-bigint" + }, + { + "type": "website", + "url": "https://github.com/rust-num/num-bigint" + }, + { + "type": "vcs", + "url": "https://github.com/rust-num/num-bigint" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#num-complex@0.4.6", + "author": "The Rust Project Developers", + "name": "num-complex", + "version": "0.4.6", + "description": "Complex numbers implementation for Rust", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "73f88a1307638156682bada9d7604135552957b7818057dcef22705b4d509495" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/num-complex@0.4.6", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/num-complex" + }, + { + "type": "website", + "url": "https://github.com/rust-num/num-complex" + }, + { + "type": "vcs", + "url": "https://github.com/rust-num/num-complex" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#num-conv@0.2.0", + "author": "Jacob Pratt ", + "name": "num-conv", + "version": "0.2.0", + "description": "`num_conv` is a crate to convert between integer types without using `as` casts. This provides better certainty when refactoring, makes the exact behavior of code more explicit, and allows using turbofish syntax. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "cf97ec579c3c42f953ef76dbf8d55ac91fb219dde70e49aa4a6b7d74e9919050" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/num-conv@0.2.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/jhpratt/num-conv" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#num-integer@0.1.46", + "author": "The Rust Project Developers", + "name": "num-integer", + "version": "0.1.46", + "description": "Integer traits and functions", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7969661fd2958a5cb096e56c8e1ad0444ac2bbcd0061bd28660485a44879858f" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/num-integer@0.1.46", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/num-integer" + }, + { + "type": "website", + "url": "https://github.com/rust-num/num-integer" + }, + { + "type": "vcs", + "url": "https://github.com/rust-num/num-integer" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#num-rational@0.4.2", + "author": "The Rust Project Developers", + "name": "num-rational", + "version": "0.4.2", + "description": "Rational numbers implementation for Rust", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "f83d14da390562dca69fc84082e73e548e1ad308d24accdedd2720017cb37824" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/num-rational@0.4.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/num-rational" + }, + { + "type": "website", + "url": "https://github.com/rust-num/num-rational" + }, + { + "type": "vcs", + "url": "https://github.com/rust-num/num-rational" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19", + "author": "The Rust Project Developers", + "name": "num-traits", + "version": "0.2.19", + "description": "Numeric traits for generic mathematics", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "071dfc062690e90b734c0b2273ce72ad0ffa95f0c74596bc250dcfd960262841" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/num-traits@0.2.19", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/num-traits" + }, + { + "type": "website", + "url": "https://github.com/rust-num/num-traits" + }, + { + "type": "vcs", + "url": "https://github.com/rust-num/num-traits" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "author": "Aleksey Kladov ", + "name": "once_cell", + "version": "1.21.3", + "description": "Single assignment cells and lazy values.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "42f5e15c9953c5e4ccceeb2e7382a716482c34515315f7b03532b8b4e8393d2d" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/once_cell@1.21.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/once_cell" + }, + { + "type": "vcs", + "url": "https://github.com/matklad/once_cell" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#oneshot@0.1.13", + "author": "Linus Färnstrand ", + "name": "oneshot", + "version": "0.1.13", + "description": "Oneshot spsc channel with (potentially) lock-free non-blocking send, and a receiver supporting both thread blocking receive operations as well as Future based async polling. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "269bca4c2591a28585d6bf10d9ed0332b7d76900a1b02bec41bdc3a2cdcda107" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/oneshot@0.1.13", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/faern/oneshot" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#openssl-probe@0.2.1", + "author": "Alex Crichton ", + "name": "openssl-probe", + "version": "0.2.1", + "description": "A library for helping to find system-wide trust anchor (\"root\") certificate locations based on paths typically used by `openssl`. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7c87def4c32ab89d880effc9e097653c8da5d6ef28e6b539d313baaacfbafcbe" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/openssl-probe@0.2.1", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/rustls/openssl-probe" + }, + { + "type": "vcs", + "url": "https://github.com/rustls/openssl-probe" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#option-ext@0.2.0", + "author": "Simon Ochsenreither ", + "name": "option-ext", + "version": "0.2.0", + "description": "Extends `Option` with additional operations", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "04744f49eae99ab78e0d5c0b603ab218f515ea8cfe5a456d7629ad883a3b6e7d" + } + ], + "licenses": [ + { + "expression": "MPL-2.0" + } + ], + "purl": "pkg:cargo/option-ext@0.2.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/option-ext/" + }, + { + "type": "website", + "url": "https://github.com/soc/option-ext" + }, + { + "type": "vcs", + "url": "https://github.com/soc/option-ext.git" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#os_str_bytes@6.6.1", + "author": "dylni", + "name": "os_str_bytes", + "version": "6.6.1", + "description": "Convert between byte sequences and platform-native strings ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e2355d85b9a3786f481747ced0e0ff2ba35213a1f9bd406ed906554d7af805a1" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/os_str_bytes@6.6.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/dylni/os_str_bytes" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#page_size@0.6.0", + "author": "Philip Woods ", + "name": "page_size", + "version": "0.6.0", + "description": "Provides an easy, fast, cross-platform way to retrieve the memory page size", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "30d5b2194ed13191c1999ae0704b7839fb18384fa22e49b57eeaa97d79ce40da" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/page_size@0.6.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/page_size/" + }, + { + "type": "website", + "url": "https://github.com/Elzair/page_size_rs" + }, + { + "type": "vcs", + "url": "https://github.com/Elzair/page_size_rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#parking_lot@0.12.5", + "author": "Amanieu d'Antras ", + "name": "parking_lot", + "version": "0.12.5", + "description": "More compact and efficient implementations of the standard synchronization primitives.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "93857453250e3077bd71ff98b6a65ea6621a19bb0f559a85248955ac12c45a1a" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/parking_lot@0.12.5", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Amanieu/parking_lot" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#parking_lot_core@0.9.12", + "author": "Amanieu d'Antras ", + "name": "parking_lot_core", + "version": "0.9.12", + "description": "An advanced API for creating custom synchronization primitives.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "2621685985a2ebf1c516881c026032ac7deafcda1a2c9b7850dc81e3dfcb64c1" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/parking_lot_core@0.9.12", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Amanieu/parking_lot" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#paste@1.0.15", + "author": "David Tolnay ", + "name": "paste", + "version": "1.0.15", + "description": "Macros for all your token pasting needs", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/paste@1.0.15", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/paste" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/paste" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#percent-encoding@2.3.2", + "author": "The rust-url developers", + "name": "percent-encoding", + "version": "2.3.2", + "description": "Percent encoding and decoding", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/percent-encoding@2.3.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/servo/rust-url/" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#phf@0.11.3", + "author": "Steven Fackler ", + "name": "phf", + "version": "0.11.3", + "description": "Runtime support for perfect hash function data structures", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/phf@0.11.3", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-phf/rust-phf" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#phf_generator@0.11.3", + "author": "Steven Fackler ", + "name": "phf_generator", + "version": "0.11.3", + "description": "PHF generation logic", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "3c80231409c20246a13fddb31776fb942c38553c51e871f8cbd687a4cfb5843d" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/phf_generator@0.11.3", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-phf/rust-phf" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#phf_macros@0.11.3", + "author": "Steven Fackler ", + "name": "phf_macros", + "version": "0.11.3", + "description": "Macros to generate types in the phf crate", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "f84ac04429c13a7ff43785d75ad27569f2951ce0ffd30a3321230db2fc727216" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/phf_macros@0.11.3", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-phf/rust-phf" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#phf_shared@0.11.3", + "author": "Steven Fackler ", + "name": "phf_shared", + "version": "0.11.3", + "description": "Support code shared by PHF libraries", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "67eabc2ef2a60eb7faa00097bd1ffdb5bd28e62bf39990626a582201b7a754e5" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/phf_shared@0.11.3", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-phf/rust-phf" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#pin-project-internal@1.1.10", + "name": "pin-project-internal", + "version": "1.1.10", + "description": "Implementation detail of the `pin-project` crate. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/pin-project-internal@1.1.10", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/taiki-e/pin-project" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "name": "pin-project-lite", + "version": "0.2.16", + "description": "A lightweight version of pin-project written with declarative macros. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "3b3cff922bd51709b605d9ead9aa71031d81447142d828eb4a6eba76fe619f9b" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/pin-project-lite@0.2.16", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/taiki-e/pin-project-lite" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#pin-project@1.1.10", + "name": "pin-project", + "version": "1.1.10", + "description": "A crate for safe and ergonomic pin-projection. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "677f1add503faace112b9f1373e43e9e054bfdd22ff1a63c1bc485eaec6a6a8a" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/pin-project@1.1.10", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/taiki-e/pin-project" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#pin-utils@0.1.0", + "author": "Josef Brandl ", + "name": "pin-utils", + "version": "0.1.0", + "description": "Utilities for pinning ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/pin-utils@0.1.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/pin-utils" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang-nursery/pin-utils" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#potential_utf@0.1.4", + "author": "The ICU4X Project Developers", + "name": "potential_utf", + "version": "0.1.4", + "description": "Unvalidated string and character types", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b73949432f5e2a09657003c25bca5e19a0e9c84f8058ca374f49e0ebe605af77" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/potential_utf@0.1.4", + "externalReferences": [ + { + "type": "website", + "url": "https://icu4x.unicode.org" + }, + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#powerfmt@0.2.0", + "author": "Jacob Pratt ", + "name": "powerfmt", + "version": "0.2.0", + "description": " `powerfmt` is a library that provides utilities for formatting values. This crate makes it significantly easier to support filling to a minimum width with alignment, avoid heap allocation, and avoid repetitive calculations. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/powerfmt@0.2.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/jhpratt/powerfmt" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#ppv-lite86@0.2.21", + "author": "The CryptoCorrosion Contributors", + "name": "ppv-lite86", + "version": "0.2.21", + "description": "Cross-platform cryptography-oriented low-level SIMD library.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/ppv-lite86@0.2.21", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/cryptocorrosion/cryptocorrosion" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "author": "David Tolnay , Alex Crichton ", + "name": "proc-macro2", + "version": "1.0.106", + "description": "A substitute implementation of the compiler's `proc_macro` API to decouple token-based libraries from the procedural macro use case.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/proc-macro2@1.0.106", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/proc-macro2" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/proc-macro2" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#prometheus@0.14.0", + "author": "overvenus@gmail.com, siddontang@gmail.com, vistaswx@gmail.com", + "name": "prometheus", + "version": "0.14.0", + "description": "Prometheus instrumentation library for Rust applications.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "3ca5326d8d0b950a9acd87e6a3f94745394f62e4dae1b1ee22b2bc0c394af43a" + } + ], + "licenses": [ + { + "expression": "Apache-2.0" + } + ], + "purl": "pkg:cargo/prometheus@0.14.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/prometheus" + }, + { + "type": "website", + "url": "https://github.com/tikv/rust-prometheus" + }, + { + "type": "vcs", + "url": "https://github.com/tikv/rust-prometheus" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#protobuf-support@3.7.2", + "author": "Stepan Koltsov ", + "name": "protobuf-support", + "version": "3.7.2", + "description": "Code supporting protobuf implementation. None of code in this crate is public API. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "3e36c2f31e0a47f9280fb347ef5e461ffcd2c52dd520d8e216b52f93b0b0d7d6" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/protobuf-support@3.7.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://github.com/stepancheg/rust-protobuf/blob/master/README.md" + }, + { + "type": "website", + "url": "https://github.com/stepancheg/rust-protobuf/" + }, + { + "type": "vcs", + "url": "https://github.com/stepancheg/rust-protobuf/" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#protobuf@3.7.2", + "author": "Stepan Koltsov ", + "name": "protobuf", + "version": "3.7.2", + "description": "Rust implementation of Google protocol buffers ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "d65a1d4ddae7d8b5de68153b48f6aa3bba8cb002b243dbdbc55a5afbc98f99f4" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/protobuf@3.7.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://github.com/stepancheg/rust-protobuf/blob/master/README.md" + }, + { + "type": "website", + "url": "https://github.com/stepancheg/rust-protobuf/" + }, + { + "type": "vcs", + "url": "https://github.com/stepancheg/rust-protobuf/" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#pyo3-build-config@0.26.0", + "author": "PyO3 Project and Contributors ", + "name": "pyo3-build-config", + "version": "0.26.0", + "description": "Build configuration for the PyO3 ecosystem", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "4fc6ddaf24947d12a9aa31ac65431fb1b851b8f4365426e182901eabfb87df5f" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/pyo3-build-config@0.26.0", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/pyo3/pyo3" + }, + { + "type": "vcs", + "url": "https://github.com/pyo3/pyo3" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#pyo3-ffi@0.26.0", + "author": "PyO3 Project and Contributors ", + "name": "pyo3-ffi", + "version": "0.26.0", + "description": "Python-API bindings for the PyO3 ecosystem", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "025474d3928738efb38ac36d4744a74a400c901c7596199e20e45d98eb194105" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/pyo3-ffi@0.26.0", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/pyo3/pyo3" + }, + { + "type": "other", + "url": "python" + }, + { + "type": "vcs", + "url": "https://github.com/pyo3/pyo3" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#pyo3-macros-backend@0.26.0", + "author": "PyO3 Project and Contributors ", + "name": "pyo3-macros-backend", + "version": "0.26.0", + "description": "Code generation for PyO3 package", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "100246c0ecf400b475341b8455a9213344569af29a3c841d29270e53102e0fcf" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/pyo3-macros-backend@0.26.0", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/pyo3/pyo3" + }, + { + "type": "vcs", + "url": "https://github.com/pyo3/pyo3" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#pyo3-macros@0.26.0", + "author": "PyO3 Project and Contributors ", + "name": "pyo3-macros", + "version": "0.26.0", + "description": "Proc macros for PyO3 package", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "2e64eb489f22fe1c95911b77c44cc41e7c19f3082fc81cce90f657cdc42ffded" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/pyo3-macros@0.26.0", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/pyo3/pyo3" + }, + { + "type": "vcs", + "url": "https://github.com/pyo3/pyo3" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#pyo3@0.26.0", + "author": "PyO3 Project and Contributors ", + "name": "pyo3", + "version": "0.26.0", + "description": "Bindings to Python interpreter", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7ba0117f4212101ee6544044dae45abe1083d30ce7b29c4b5cbdfa2354e07383" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/pyo3@0.26.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/crate/pyo3/" + }, + { + "type": "website", + "url": "https://github.com/pyo3/pyo3" + }, + { + "type": "vcs", + "url": "https://github.com/pyo3/pyo3" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#quinn-proto@0.11.13", + "name": "quinn-proto", + "version": "0.11.13", + "description": "State machine for the QUIC transport protocol", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "f1906b49b0c3bc04b5fe5d86a77925ae6524a19b816ae38ce1e426255f1d8a31" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/quinn-proto@0.11.13", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/quinn-rs/quinn" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#quinn-udp@0.5.14", + "name": "quinn-udp", + "version": "0.5.14", + "description": "UDP sockets with ECN information for the QUIC transport protocol", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "addec6a0dcad8a8d96a771f815f0eaf55f9d1805756410b39f5fa81332574cbd" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/quinn-udp@0.5.14", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/quinn-rs/quinn" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#quinn@0.11.9", + "name": "quinn", + "version": "0.11.9", + "description": "Versatile QUIC transport protocol implementation", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b9e20a958963c291dc322d98411f541009df2ced7b5a4f2bd52337638cfccf20" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/quinn@0.11.9", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/quinn-rs/quinn" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "author": "David Tolnay ", + "name": "quote", + "version": "1.0.44", + "description": "Quasi-quoting macro quote!(...)", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/quote@1.0.44", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/quote/" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/quote" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rand@0.8.5", + "author": "The Rand Project Developers, The Rust Project Developers", + "name": "rand", + "version": "0.8.5", + "description": "Random number generators and other randomness functionality. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/rand@0.8.5", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rand" + }, + { + "type": "website", + "url": "https://rust-random.github.io/book" + }, + { + "type": "vcs", + "url": "https://github.com/rust-random/rand" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2", + "author": "The Rand Project Developers, The Rust Project Developers", + "name": "rand", + "version": "0.9.2", + "description": "Random number generators and other randomness functionality. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/rand@0.9.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rand" + }, + { + "type": "website", + "url": "https://rust-random.github.io/book" + }, + { + "type": "vcs", + "url": "https://github.com/rust-random/rand" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rand_chacha@0.3.1", + "author": "The Rand Project Developers, The Rust Project Developers, The CryptoCorrosion Contributors", + "name": "rand_chacha", + "version": "0.3.1", + "description": "ChaCha random number generator ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/rand_chacha@0.3.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rand_chacha" + }, + { + "type": "website", + "url": "https://rust-random.github.io/book" + }, + { + "type": "vcs", + "url": "https://github.com/rust-random/rand" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rand_chacha@0.9.0", + "author": "The Rand Project Developers, The Rust Project Developers, The CryptoCorrosion Contributors", + "name": "rand_chacha", + "version": "0.9.0", + "description": "ChaCha random number generator ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "d3022b5f1df60f26e1ffddd6c66e8aa15de382ae63b3a0c1bfc0e4d3e3f325cb" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/rand_chacha@0.9.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rand_chacha" + }, + { + "type": "website", + "url": "https://rust-random.github.io/book" + }, + { + "type": "vcs", + "url": "https://github.com/rust-random/rand" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rand_core@0.6.4", + "author": "The Rand Project Developers, The Rust Project Developers", + "name": "rand_core", + "version": "0.6.4", + "description": "Core random number generator traits and tools for implementation. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/rand_core@0.6.4", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rand_core" + }, + { + "type": "website", + "url": "https://rust-random.github.io/book" + }, + { + "type": "vcs", + "url": "https://github.com/rust-random/rand" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rand_core@0.9.5", + "author": "The Rand Project Developers, The Rust Project Developers", + "name": "rand_core", + "version": "0.9.5", + "description": "Core random number generator traits and tools for implementation. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "76afc826de14238e6e8c374ddcc1fa19e374fd8dd986b0d2af0d02377261d83c" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/rand_core@0.9.5", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rand_core" + }, + { + "type": "website", + "url": "https://rust-random.github.io/book" + }, + { + "type": "vcs", + "url": "https://github.com/rust-random/rand" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rand_distr@0.4.3", + "author": "The Rand Project Developers", + "name": "rand_distr", + "version": "0.4.3", + "description": "Sampling from random number distributions ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "32cb0b9bc82b0a0876c2dd994a7e7a2683d3e7390ca40e6886785ef0c7e3ee31" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/rand_distr@0.4.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rand_distr" + }, + { + "type": "website", + "url": "https://rust-random.github.io/book" + }, + { + "type": "vcs", + "url": "https://github.com/rust-random/rand" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rawpointer@0.2.1", + "author": "bluss", + "name": "rawpointer", + "version": "0.2.1", + "description": "Extra methods for raw pointers and `NonNull`. For example `.post_inc()` and `.pre_dec()` (c.f. `ptr++` and `--ptr`), `offset` and `add` for `NonNull`, and the function `ptrdistance`. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "60a357793950651c4ed0f3f52338f53b2f809f32d83a07f72909fa13e4c6c1e3" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/rawpointer@0.2.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rawpointer/" + }, + { + "type": "vcs", + "url": "https://github.com/bluss/rawpointer/" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#regex-automata@0.4.14", + "author": "The Rust Project Developers, Andrew Gallant ", + "name": "regex-automata", + "version": "0.4.14", + "description": "Automata construction and matching using regular expressions.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/regex-automata@0.4.14", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/regex-automata" + }, + { + "type": "website", + "url": "https://github.com/rust-lang/regex/tree/master/regex-automata" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/regex" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#regex-syntax@0.8.9", + "author": "The Rust Project Developers, Andrew Gallant ", + "name": "regex-syntax", + "version": "0.8.9", + "description": "A regular expression parser.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/regex-syntax@0.8.9", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/regex-syntax" + }, + { + "type": "website", + "url": "https://github.com/rust-lang/regex/tree/master/regex-syntax" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/regex" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#regex@1.12.3", + "author": "The Rust Project Developers, Andrew Gallant ", + "name": "regex", + "version": "1.12.3", + "description": "An implementation of regular expressions for Rust. This implementation uses finite automata and guarantees linear time matching on all inputs. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/regex@1.12.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/regex" + }, + { + "type": "website", + "url": "https://github.com/rust-lang/regex" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/regex" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#reqwest-middleware@0.5.1", + "author": "Rodrigo Gryzinski ", + "name": "reqwest-middleware", + "version": "0.5.1", + "description": "Wrapper around reqwest to allow for client middleware chains.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "199dda04a536b532d0cc04d7979e39b1c763ea749bf91507017069c00b96056f" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/reqwest-middleware@0.5.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/TrueLayer/reqwest-middleware" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#reqwest-retry@0.9.1", + "author": "Rodrigo Gryzinski ", + "name": "reqwest-retry", + "version": "0.9.1", + "description": "Retry middleware for reqwest.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "fe2412db2af7d2268e7a5406be0431f37d9eb67ff390f35b395716f5f06c2eaa" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/reqwest-retry@0.9.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/TrueLayer/reqwest-middleware" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#reqwest@0.13.2", + "author": "Sean McArthur ", + "name": "reqwest", + "version": "0.13.2", + "description": "higher level HTTP client library", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "ab3f43e3283ab1488b624b44b0e988d0acea0b3214e694730a055cb6b2efa801" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/reqwest@0.13.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/reqwest" + }, + { + "type": "vcs", + "url": "https://github.com/seanmonstar/reqwest" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#retry-policies@0.5.1", + "author": "Luca Palmieri ", + "name": "retry-policies", + "version": "0.5.1", + "description": "A collection of plug-and-play retry policies for Rust projects.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "46a4bd6027df676bcb752d3724db0ea3c0c5fc1dd0376fec51ac7dcaf9cc69be" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/retry-policies@0.5.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/TrueLayer/retry-policies" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#ring@0.17.14", + "name": "ring", + "version": "0.17.14", + "description": "An experiment.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 AND ISC" + } + ], + "purl": "pkg:cargo/ring@0.17.14", + "externalReferences": [ + { + "type": "other", + "url": "ring_core_0_17_14_" + }, + { + "type": "vcs", + "url": "https://github.com/briansmith/ring" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rust_decimal@1.40.0", + "author": "Paul Mason ", + "name": "rust_decimal", + "version": "1.40.0", + "description": "Decimal number implementation written in pure Rust suitable for financial and fixed-precision calculations.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "61f703d19852dbf87cbc513643fa81428361eb6940f1ac14fd58155d295a3eb0" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/rust_decimal@1.40.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rust_decimal/" + }, + { + "type": "vcs", + "url": "https://github.com/paupino/rust-decimal" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rustc-hash@2.1.1", + "author": "The Rust Project Developers", + "name": "rustc-hash", + "version": "2.1.1", + "description": "A speedy, non-cryptographic hashing algorithm used by rustc", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/rustc-hash@2.1.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rust-lang/rustc-hash" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rustix@1.1.3", + "author": "Dan Gohman , Jakub Konka ", + "name": "rustix", + "version": "1.1.3", + "description": "Safe Rust bindings to POSIX/Unix/Linux/Winsock-like syscalls", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/rustix@1.1.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rustix" + }, + { + "type": "vcs", + "url": "https://github.com/bytecodealliance/rustix" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rustls-native-certs@0.8.3", + "name": "rustls-native-certs", + "version": "0.8.3", + "description": "rustls-native-certs allows rustls to use the platform native certificate store", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR ISC OR MIT" + } + ], + "purl": "pkg:cargo/rustls-native-certs@0.8.3", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/rustls/rustls-native-certs" + }, + { + "type": "vcs", + "url": "https://github.com/rustls/rustls-native-certs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0", + "name": "rustls-pki-types", + "version": "1.14.0", + "description": "Shared types for the rustls PKI ecosystem", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "be040f8b0a225e40375822a563fa9524378b9d63112f53e19ffff34df5d33fdd" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/rustls-pki-types@1.14.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rustls-pki-types" + }, + { + "type": "website", + "url": "https://github.com/rustls/pki-types" + }, + { + "type": "vcs", + "url": "https://github.com/rustls/pki-types" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rustls-platform-verifier@0.6.2", + "name": "rustls-platform-verifier", + "version": "0.6.2", + "description": "rustls-platform-verifier supports verifying TLS certificates in rustls with the operating system verifier", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "1d99feebc72bae7ab76ba994bb5e121b8d83d910ca40b36e0921f53becc41784" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/rustls-platform-verifier@0.6.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rustls/rustls-platform-verifier" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rustls-webpki@0.103.9", + "name": "rustls-webpki", + "version": "0.103.9", + "description": "Web PKI X.509 Certificate Verification.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "d7df23109aa6c1567d1c575b9952556388da57401e4ace1d15f79eedad0d8f53" + } + ], + "licenses": [ + { + "expression": "ISC" + } + ], + "purl": "pkg:cargo/rustls-webpki@0.103.9", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/rustls/webpki" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rustls@0.23.36", + "name": "rustls", + "version": "0.23.36", + "description": "Rustls is a modern TLS library written in Rust.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR ISC OR MIT" + } + ], + "purl": "pkg:cargo/rustls@0.23.36", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/rustls/rustls" + }, + { + "type": "vcs", + "url": "https://github.com/rustls/rustls" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#rustversion@1.0.22", + "author": "David Tolnay ", + "name": "rustversion", + "version": "1.0.22", + "description": "Conditional compilation according to rustc compiler version", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/rustversion@1.0.22", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/rustversion" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/rustversion" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#ryu@1.0.23", + "author": "David Tolnay ", + "name": "ryu", + "version": "1.0.23", + "description": "Fast floating point to string conversion", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR BSL-1.0" + } + ], + "purl": "pkg:cargo/ryu@1.0.23", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/ryu" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/ryu" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#safe-transmute@0.11.3", + "author": "наб , Eduardo Pinho , Lukas Kalbertodt , Philipp Tessenow , Marijn Suijten ", + "name": "safe-transmute", + "version": "0.11.3", + "description": "A safeguarded transmute() for Rust", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "3944826ff8fa8093089aba3acb4ef44b9446a99a16f3bf4e74af3f77d340ab7d" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/safe-transmute@0.11.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://rawcdn.githack.com/nabijaczleweli/safe-transmute-rs/doc/safe_transmute/index.html" + }, + { + "type": "vcs", + "url": "https://github.com/nabijaczleweli/safe-transmute-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#safe_arch@0.7.4", + "author": "Lokathor ", + "name": "safe_arch", + "version": "0.7.4", + "description": "Crate that exposes `core::arch` safely via `#[cfg()]`.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "96b02de82ddbe1b636e6170c21be622223aea188ef2e139be0a5b219ec215323" + } + ], + "licenses": [ + { + "expression": "Zlib OR Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/safe_arch@0.7.4", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Lokathor/safe_arch" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#same-file@1.0.6", + "author": "Andrew Gallant ", + "name": "same-file", + "version": "1.0.6", + "description": "A simple crate for determining whether two file paths point to the same file. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "93fc1dc3aaa9bfed95e02e6eadabb4baf7e3078b0bd1b4d7b6b0b68378900502" + } + ], + "licenses": [ + { + "expression": "Unlicense OR MIT" + } + ], + "purl": "pkg:cargo/same-file@1.0.6", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/same-file" + }, + { + "type": "website", + "url": "https://github.com/BurntSushi/same-file" + }, + { + "type": "vcs", + "url": "https://github.com/BurntSushi/same-file" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#scoped-tls@1.0.1", + "author": "Alex Crichton ", + "name": "scoped-tls", + "version": "1.0.1", + "description": "Library implementation of the standard library's old `scoped_thread_local!` macro for providing scoped access to thread local storage (TLS) so any type can be stored into TLS. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/scoped-tls@1.0.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/scoped-tls" + }, + { + "type": "website", + "url": "https://github.com/alexcrichton/scoped-tls" + }, + { + "type": "vcs", + "url": "https://github.com/alexcrichton/scoped-tls" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#scopeguard@1.2.0", + "author": "bluss", + "name": "scopeguard", + "version": "1.2.0", + "description": "A RAII scope guard that will run a given closure when it goes out of scope, even if the code between panics (assuming unwinding panic). Defines the macros `defer!`, `defer_on_unwind!`, `defer_on_success!` as shorthands for guards with one of the implemented strategies. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/scopeguard@1.2.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/scopeguard/" + }, + { + "type": "vcs", + "url": "https://github.com/bluss/scopeguard" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "author": "Erick Tryzelaar , David Tolnay ", + "name": "serde", + "version": "1.0.228", + "description": "A generic serialization/deserialization framework", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/serde@1.0.228", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/serde" + }, + { + "type": "website", + "url": "https://serde.rs" + }, + { + "type": "vcs", + "url": "https://github.com/serde-rs/serde" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228", + "author": "Erick Tryzelaar , David Tolnay ", + "name": "serde_core", + "version": "1.0.228", + "description": "Serde traits only, with no support for derive -- use the `serde` crate instead", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/serde_core@1.0.228", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/serde_core" + }, + { + "type": "website", + "url": "https://serde.rs" + }, + { + "type": "vcs", + "url": "https://github.com/serde-rs/serde" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#serde_derive@1.0.228", + "author": "Erick Tryzelaar , David Tolnay ", + "name": "serde_derive", + "version": "1.0.228", + "description": "Macros 1.1 implementation of #[derive(Serialize, Deserialize)]", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/serde_derive@1.0.228", + "externalReferences": [ + { + "type": "documentation", + "url": "https://serde.rs/derive.html" + }, + { + "type": "website", + "url": "https://serde.rs" + }, + { + "type": "vcs", + "url": "https://github.com/serde-rs/serde" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "author": "Erick Tryzelaar , David Tolnay ", + "name": "serde_json", + "version": "1.0.149", + "description": "A JSON serialization file format", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/serde_json@1.0.149", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/serde_json" + }, + { + "type": "vcs", + "url": "https://github.com/serde-rs/json" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#serde_path_to_error@0.1.20", + "author": "David Tolnay ", + "name": "serde_path_to_error", + "version": "0.1.20", + "description": "Path to the element that failed to deserialize", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/serde_path_to_error@0.1.20", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/serde_path_to_error" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/path-to-error" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#serde_repr@0.1.20", + "author": "David Tolnay ", + "name": "serde_repr", + "version": "0.1.20", + "description": "Derive Serialize and Deserialize that delegates to the underlying repr of a C-like enum.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/serde_repr@0.1.20", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/serde_repr" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/serde-repr" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#serde_urlencoded@0.7.1", + "author": "Anthony Ramine ", + "name": "serde_urlencoded", + "version": "0.7.1", + "description": "`x-www-form-urlencoded` meets Serde", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/serde_urlencoded@0.7.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/serde_urlencoded/0.7.1/serde_urlencoded/" + }, + { + "type": "vcs", + "url": "https://github.com/nox/serde_urlencoded" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#sha1@0.10.6", + "author": "RustCrypto Developers", + "name": "sha1", + "version": "0.10.6", + "description": "SHA-1 hash function", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e3bf829a2d51ab4a5ddf1352d8470c140cadc8301b2ae1789db023f01cedd6ba" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/sha1@0.10.6", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/sha1" + }, + { + "type": "vcs", + "url": "https://github.com/RustCrypto/hashes" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#sha2-asm@0.6.4", + "author": "RustCrypto Developers", + "name": "sha2-asm", + "version": "0.6.4", + "description": "Assembly implementation of SHA-2 compression functions", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b845214d6175804686b2bd482bcffe96651bb2d1200742b712003504a2dac1ab" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/sha2-asm@0.6.4", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/sha2-asm" + }, + { + "type": "vcs", + "url": "https://github.com/RustCrypto/asm-hashes" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#sha2@0.10.9", + "author": "RustCrypto Developers", + "name": "sha2", + "version": "0.10.9", + "description": "Pure Rust implementation of the SHA-2 hash function family including SHA-224, SHA-256, SHA-384, and SHA-512. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/sha2@0.10.9", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/sha2" + }, + { + "type": "vcs", + "url": "https://github.com/RustCrypto/hashes" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#sharded-slab@0.1.7", + "author": "Eliza Weisman ", + "name": "sharded-slab", + "version": "0.1.7", + "description": "A lock-free concurrent slab. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/sharded-slab@0.1.7", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/sharded-slab/" + }, + { + "type": "website", + "url": "https://github.com/hawkw/sharded-slab" + }, + { + "type": "vcs", + "url": "https://github.com/hawkw/sharded-slab" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#shellexpand@3.1.1", + "author": "Vladimir Matveev , Ian Jackson ", + "name": "shellexpand", + "version": "3.1.1", + "description": "Shell-like expansions in strings", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "8b1fdf65dd6331831494dd616b30351c38e96e45921a27745cf98490458b90bb" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/shellexpand@3.1.1", + "externalReferences": [ + { + "type": "documentation", + "url": "http://docs.rs/shellexpand/" + }, + { + "type": "vcs", + "url": "https://gitlab.com/ijackson/rust-shellexpand" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#shlex@1.3.0", + "author": "comex , Fenhl , Adrian Taylor , Alex Touchet , Daniel Parks , Garrett Berg ", + "name": "shlex", + "version": "1.3.0", + "description": "Split a string into shell words, like Python's shlex.", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/shlex@1.3.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/comex/rust-shlex" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#signal-hook-registry@1.4.8", + "author": "Michal 'vorner' Vaner , Masaki Hara ", + "name": "signal-hook-registry", + "version": "1.4.8", + "description": "Backend crate for signal-hook", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/signal-hook-registry@1.4.8", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/signal-hook-registry" + }, + { + "type": "vcs", + "url": "https://github.com/vorner/signal-hook" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#signal-hook@0.3.18", + "author": "Michal 'vorner' Vaner , Thomas Himmelstoss ", + "name": "signal-hook", + "version": "0.3.18", + "description": "Unix signal handling", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/signal-hook@0.3.18", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/signal-hook" + }, + { + "type": "vcs", + "url": "https://github.com/vorner/signal-hook" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#simba@0.9.1", + "author": "sebcrozet ", + "name": "simba", + "version": "0.9.1", + "description": "SIMD algebra for Rust", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "c99284beb21666094ba2b75bbceda012e610f5479dfcc2d6e2426f53197ffd95" + } + ], + "licenses": [ + { + "expression": "Apache-2.0" + } + ], + "purl": "pkg:cargo/simba@0.9.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/simba" + }, + { + "type": "vcs", + "url": "https://github.com/dimforge/simba" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#siphasher@1.0.2", + "author": "Frank Denis ", + "name": "siphasher", + "version": "1.0.2", + "description": "SipHash-2-4, SipHash-1-3 and 128-bit variants in pure Rust", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "b2aa850e253778c88a04c3d7323b043aeda9d3e30d5971937c1855769763678e" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/siphasher@1.0.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/siphasher" + }, + { + "type": "website", + "url": "https://docs.rs/siphasher" + }, + { + "type": "vcs", + "url": "https://github.com/jedisct1/rust-siphash" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#slab@0.4.12", + "author": "Carl Lerche ", + "name": "slab", + "version": "0.4.12", + "description": "Pre-allocated storage for a uniform data type", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "0c790de23124f9ab44544d7ac05d60440adc586479ce501c1d6d7da3cd8c9cf5" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/slab@0.4.12", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/tokio-rs/slab" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#smallvec@1.15.1", + "author": "The Servo Project Developers", + "name": "smallvec", + "version": "1.15.1", + "description": "'Small vector' optimization: store up to a small number of items on the stack", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/smallvec@1.15.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/smallvec/" + }, + { + "type": "vcs", + "url": "https://github.com/servo/rust-smallvec" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#socket2@0.6.2", + "author": "Alex Crichton , Thomas de Zeeuw ", + "name": "socket2", + "version": "0.6.2", + "description": "Utilities for handling networking sockets with a maximal amount of configuration possible intended. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "86f4aa3ad99f2088c990dfa82d367e19cb29268ed67c574d10d0a4bfe71f07e0" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/socket2@0.6.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/socket2" + }, + { + "type": "website", + "url": "https://github.com/rust-lang/socket2" + }, + { + "type": "vcs", + "url": "https://github.com/rust-lang/socket2" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#stable_deref_trait@1.2.1", + "author": "Robert Grosse ", + "name": "stable_deref_trait", + "version": "1.2.1", + "description": "An unsafe marker trait for types like Box and Rc that dereference to a stable address even when moved, and hence can be used with libraries such as owning_ref and rental. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "6ce2be8dc25455e1f91df71bfa12ad37d7af1092ae736f3a6cd0e37bc7810596" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/stable_deref_trait@1.2.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/stable_deref_trait/1.2.1/stable_deref_trait" + }, + { + "type": "vcs", + "url": "https://github.com/storyyeller/stable_deref_trait" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#static_assertions@1.1.0", + "author": "Nikolai Vazquez", + "name": "static_assertions", + "version": "1.1.0", + "description": "Compile-time assertions to ensure that invariants are met.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/static_assertions@1.1.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/static_assertions/" + }, + { + "type": "website", + "url": "https://github.com/nvzqz/static-assertions-rs" + }, + { + "type": "vcs", + "url": "https://github.com/nvzqz/static-assertions-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#statrs@0.18.0", + "author": "Michael Ma", + "name": "statrs", + "version": "0.18.0", + "description": "Statistical computing library for Rust", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "2a3fe7c28c6512e766b0874335db33c94ad7b8f9054228ae1c2abd47ce7d335e" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/statrs@0.18.0", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/statrs-dev/statrs" + }, + { + "type": "vcs", + "url": "https://github.com/statrs-dev/statrs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#strsim@0.11.1", + "author": "Danny Guo , maxbachmann ", + "name": "strsim", + "version": "0.11.1", + "description": "Implementations of string similarity metrics. Includes Hamming, Levenshtein, OSA, Damerau-Levenshtein, Jaro, Jaro-Winkler, and Sørensen-Dice. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/strsim@0.11.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/strsim/" + }, + { + "type": "website", + "url": "https://github.com/rapidfuzz/strsim-rs" + }, + { + "type": "vcs", + "url": "https://github.com/rapidfuzz/strsim-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#subtle@2.6.1", + "author": "Isis Lovecruft , Henry de Valence ", + "name": "subtle", + "version": "2.6.1", + "description": "Pure-Rust traits and utilities for constant-time cryptographic implementations.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" + } + ], + "licenses": [ + { + "expression": "BSD-3-Clause" + } + ], + "purl": "pkg:cargo/subtle@2.6.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/subtle" + }, + { + "type": "website", + "url": "https://dalek.rs/" + }, + { + "type": "vcs", + "url": "https://github.com/dalek-cryptography/subtle" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#syn@1.0.109", + "author": "David Tolnay ", + "name": "syn", + "version": "1.0.109", + "description": "Parser for Rust source code", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "72b64191b275b66ffe2469e8af2c1cfe3bafa67b529ead792a6d0160888b4237" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/syn@1.0.109", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/syn" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/syn" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114", + "author": "David Tolnay ", + "name": "syn", + "version": "2.0.114", + "description": "Parser for Rust source code", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/syn@2.0.114", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/syn" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/syn" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#sync_wrapper@1.0.2", + "author": "Actyx AG ", + "name": "sync_wrapper", + "version": "1.0.2", + "description": "A tool for enlisting the compiler's help in proving the absence of concurrency", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" + } + ], + "licenses": [ + { + "expression": "Apache-2.0" + } + ], + "purl": "pkg:cargo/sync_wrapper@1.0.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/sync_wrapper" + }, + { + "type": "website", + "url": "https://docs.rs/sync_wrapper" + }, + { + "type": "vcs", + "url": "https://github.com/Actyx/sync_wrapper" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#synchronoise@1.0.1", + "author": "QuietMisdreavus ", + "name": "synchronoise", + "version": "1.0.1", + "description": "Synchronization primitives that build upon the standard library", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "3dbc01390fc626ce8d1cffe3376ded2b72a11bb70e1c75f404a210e4daa4def2" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/synchronoise@1.0.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/synchronoise/" + }, + { + "type": "vcs", + "url": "https://github.com/QuietMisdreavus/synchronoise" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#synstructure@0.13.2", + "author": "Nika Layzell ", + "name": "synstructure", + "version": "0.13.2", + "description": "Helper methods and macros for custom derives", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/synstructure@0.13.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/synstructure" + }, + { + "type": "vcs", + "url": "https://github.com/mystor/synstructure" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#sysinfo@0.38.1", + "author": "Guillaume Gomez ", + "name": "sysinfo", + "version": "0.38.1", + "description": "Library to get system information such as processes, CPUs, disks, components and networks", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "5792d209c2eac902426c0c4a166c9f72147db453af548cf9bf3242644c4d4fe3" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/sysinfo@0.38.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/GuillaumeGomez/sysinfo" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#target-lexicon@0.13.4", + "author": "Dan Gohman ", + "name": "target-lexicon", + "version": "0.13.4", + "description": "LLVM target triple types", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b1dd07eb858a2067e2f3c7155d54e929265c264e6f37efe3ee7a8d1b5a1dd0ba" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 WITH LLVM-exception" + } + ], + "purl": "pkg:cargo/target-lexicon@0.13.4", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/target-lexicon/" + }, + { + "type": "vcs", + "url": "https://github.com/bytecodealliance/target-lexicon" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tempfile@3.25.0", + "author": "Steven Allen , The Rust Project Developers, Ashley Mannix , Jason White ", + "name": "tempfile", + "version": "3.25.0", + "description": "A library for managing temporary files and directories.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/tempfile@3.25.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/tempfile" + }, + { + "type": "website", + "url": "https://stebalien.com/projects/tempfile-rs/" + }, + { + "type": "vcs", + "url": "https://github.com/Stebalien/tempfile" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#thiserror-impl@1.0.69", + "author": "David Tolnay ", + "name": "thiserror-impl", + "version": "1.0.69", + "description": "Implementation detail of the `thiserror` crate", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/thiserror-impl@1.0.69", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/dtolnay/thiserror" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#thiserror-impl@2.0.18", + "author": "David Tolnay ", + "name": "thiserror-impl", + "version": "2.0.18", + "description": "Implementation detail of the `thiserror` crate", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/thiserror-impl@2.0.18", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/dtolnay/thiserror" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#thiserror@1.0.69", + "author": "David Tolnay ", + "name": "thiserror", + "version": "1.0.69", + "description": "derive(Error)", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/thiserror@1.0.69", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/thiserror" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/thiserror" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "author": "David Tolnay ", + "name": "thiserror", + "version": "2.0.18", + "description": "derive(Error)", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/thiserror@2.0.18", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/thiserror" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/thiserror" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#thread_local@1.1.9", + "author": "Amanieu d'Antras ", + "name": "thread_local", + "version": "1.1.9", + "description": "Per-object thread-local storage", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "f60246a4944f24f6e018aa17cdeffb7818b76356965d03b07d6a9886e8962185" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/thread_local@1.1.9", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/thread_local/" + }, + { + "type": "vcs", + "url": "https://github.com/Amanieu/thread_local-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#time-core@0.1.8", + "author": "Jacob Pratt , Time contributors", + "name": "time-core", + "version": "0.1.8", + "description": "This crate is an implementation detail and should not be relied upon directly.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7694e1cfe791f8d31026952abf09c69ca6f6fa4e1a1229e18988f06a04a12dca" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/time-core@0.1.8", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/time-rs/time" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#time-macros@0.2.27", + "author": "Jacob Pratt , Time contributors", + "name": "time-macros", + "version": "0.2.27", + "description": " Procedural macros for the time crate. This crate is an implementation detail and should not be relied upon directly. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "2e70e4c5a0e0a8a4823ad65dfe1a6930e4f4d756dcd9dd7939022b5e8c501215" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/time-macros@0.2.27", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/time-rs/time" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#time@0.3.47", + "author": "Jacob Pratt , Time contributors", + "name": "time", + "version": "0.3.47", + "description": "Date and time library. Fully interoperable with the standard library. Mostly compatible with #![no_std].", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "743bd48c283afc0388f9b8827b976905fb217ad9e647fae3a379a9283c4def2c" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/time@0.3.47", + "externalReferences": [ + { + "type": "website", + "url": "https://time-rs.github.io" + }, + { + "type": "vcs", + "url": "https://github.com/time-rs/time" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tinystr@0.8.2", + "author": "The ICU4X Project Developers", + "name": "tinystr", + "version": "0.8.2", + "description": "A small ASCII-only bounded length string representation.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "42d3e9c45c09de15d06dd8acf5f4e0e399e85927b7f00711024eb7ae10fa4869" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/tinystr@0.8.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tinyvec@1.10.0", + "author": "Lokathor ", + "name": "tinyvec", + "version": "1.10.0", + "description": "`tinyvec` provides 100% safe vec-like data structures.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" + } + ], + "licenses": [ + { + "expression": "Zlib OR Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/tinyvec@1.10.0", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Lokathor/tinyvec" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tinyvec_macros@0.1.1", + "author": "Soveu ", + "name": "tinyvec_macros", + "version": "0.1.1", + "description": "Some macros for tiny containers", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0 OR Zlib" + } + ], + "purl": "pkg:cargo/tinyvec_macros@0.1.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Soveu/tinyvec_macros" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tokio-macros@2.6.0", + "author": "Tokio Contributors ", + "name": "tokio-macros", + "version": "2.6.0", + "description": "Tokio's proc macros. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tokio-macros@2.6.0", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tokio" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tokio-retry@0.3.0", + "author": "Sam Rijs ", + "name": "tokio-retry", + "version": "0.3.0", + "description": "Extensible, asynchronous retry behaviours for futures/tokio", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tokio-retry@0.3.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/tokio-retry" + }, + { + "type": "vcs", + "url": "https://github.com/srijs/rust-tokio-retry" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tokio-rustls@0.26.4", + "name": "tokio-rustls", + "version": "0.26.4", + "description": "Asynchronous TLS/SSL streams for Tokio using Rustls.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/tokio-rustls@0.26.4", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/tokio-rustls" + }, + { + "type": "website", + "url": "https://github.com/rustls/tokio-rustls" + }, + { + "type": "vcs", + "url": "https://github.com/rustls/tokio-rustls" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tokio-util@0.7.18", + "author": "Tokio Contributors ", + "name": "tokio-util", + "version": "0.7.18", + "description": "Additional utilities for working with Tokio. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "9ae9cec805b01e8fc3fd2fe289f89149a9b66dd16786abd8b19cfa7b48cb0098" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tokio-util@0.7.18", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tokio" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "author": "Tokio Contributors ", + "name": "tokio", + "version": "1.49.0", + "description": "An event-driven, non-blocking I/O platform for writing asynchronous I/O backed applications. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "72a2903cd7736441aac9df9d7688bd0ce48edccaadf181c3b90be801e81d3d86" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tokio@1.49.0", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tokio" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tower-http@0.6.8", + "author": "Tower Maintainers ", + "name": "tower-http", + "version": "0.6.8", + "description": "Tower middleware and utilities for HTTP clients and servers", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tower-http@0.6.8", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/tower-rs/tower-http" + }, + { + "type": "vcs", + "url": "https://github.com/tower-rs/tower-http" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tower-layer@0.3.3", + "author": "Tower Maintainers ", + "name": "tower-layer", + "version": "0.3.3", + "description": "Decorates a `Service` to allow easy composition between `Service`s. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "121c2a6cda46980bb0fcd1647ffaf6cd3fc79a013de288782836f6df9c48780e" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tower-layer@0.3.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/tower-layer/0.3.3" + }, + { + "type": "website", + "url": "https://github.com/tower-rs/tower" + }, + { + "type": "vcs", + "url": "https://github.com/tower-rs/tower" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tower-service@0.3.3", + "author": "Tower Maintainers ", + "name": "tower-service", + "version": "0.3.3", + "description": "Trait representing an asynchronous, request / response based, client or server. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tower-service@0.3.3", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/tower-service/0.3.3" + }, + { + "type": "website", + "url": "https://github.com/tower-rs/tower" + }, + { + "type": "vcs", + "url": "https://github.com/tower-rs/tower" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tower@0.5.3", + "author": "Tower Maintainers ", + "name": "tower", + "version": "0.5.3", + "description": "Tower is a library of modular and reusable components for building robust clients and servers. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "ebe5ef63511595f1344e2d5cfa636d973292adc0eec1f0ad45fae9f0851ab1d4" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tower@0.5.3", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/tower-rs/tower" + }, + { + "type": "vcs", + "url": "https://github.com/tower-rs/tower" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-appender@0.2.4", + "author": "Zeki Sherif , Tokio Contributors ", + "name": "tracing-appender", + "version": "0.2.4", + "description": "Provides utilities for file appenders and making non-blocking writers. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "786d480bce6247ab75f005b14ae1624ad978d3029d9113f0a22fa1ac773faeaf" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tracing-appender@0.2.4", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tracing" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-attributes@0.1.31", + "author": "Tokio Contributors , Eliza Weisman , David Barsky ", + "name": "tracing-attributes", + "version": "0.1.31", + "description": "Procedural macro attributes for automatically instrumenting functions. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tracing-attributes@0.1.31", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tracing" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-core@0.1.36", + "author": "Tokio Contributors ", + "name": "tracing-core", + "version": "0.1.36", + "description": "Core primitives for application-level tracing. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "db97caf9d906fbde555dd62fa95ddba9eecfd14cb388e4f491a66d74cd5fb79a" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tracing-core@0.1.36", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tracing" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-log@0.2.0", + "author": "Tokio Contributors ", + "name": "tracing-log", + "version": "0.2.0", + "description": "Provides compatibility between `tracing` and the `log` crate. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tracing-log@0.2.0", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tracing" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-serde@0.2.0", + "author": "Tokio Contributors ", + "name": "tracing-serde", + "version": "0.2.0", + "description": "A compatibility layer for serializing trace data with `serde` ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "704b1aeb7be0d0a84fc9828cae51dab5970fee5088f83d1dd7ee6f6246fc6ff1" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tracing-serde@0.2.0", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tracing" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-subscriber@0.3.22", + "author": "Eliza Weisman , David Barsky , Tokio Contributors ", + "name": "tracing-subscriber", + "version": "0.3.22", + "description": "Utilities for implementing and composing `tracing` subscribers. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tracing-subscriber@0.3.22", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tracing" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "author": "Eliza Weisman , Tokio Contributors ", + "name": "tracing", + "version": "0.1.44", + "description": "Application-level tracing for Rust. ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "63e71662fa4b2a2c3a26f570f037eb95bb1f85397f3cd8076caed2f026a6d100" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/tracing@0.1.44", + "externalReferences": [ + { + "type": "website", + "url": "https://tokio.rs" + }, + { + "type": "vcs", + "url": "https://github.com/tokio-rs/tracing" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#try-lock@0.2.5", + "author": "Sean McArthur ", + "name": "try-lock", + "version": "0.2.5", + "description": "A lightweight atomic lock.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "e421abadd41a4225275504ea4d6566923418b7f05506fbc9c0fe86ba7396114b" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/try-lock@0.2.5", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/try-lock" + }, + { + "type": "website", + "url": "https://github.com/seanmonstar/try-lock" + }, + { + "type": "vcs", + "url": "https://github.com/seanmonstar/try-lock" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#twox-hash@2.1.2", + "author": "Jake Goulding ", + "name": "twox-hash", + "version": "2.1.2", + "description": "A Rust implementation of the XXHash and XXH3 algorithms", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "9ea3136b675547379c4bd395ca6b938e5ad3c3d20fad76e7fe85f9e0d011419c" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/twox-hash@2.1.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/twox-hash/" + }, + { + "type": "vcs", + "url": "https://github.com/shepmaster/twox-hash" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#typenum@1.19.0", + "author": "Paho Lurie-Gregg , Andre Bogus ", + "name": "typenum", + "version": "1.19.0", + "description": "Typenum is a Rust library for type-level numbers evaluated at compile time. It currently supports bits, unsigned integers, and signed integers. It also provides a type-level array of type-level numbers, but its implementation is incomplete.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "562d481066bde0658276a35467c4af00bdc6ee726305698a55b86e61d7ad82bb" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/typenum@1.19.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/typenum" + }, + { + "type": "vcs", + "url": "https://github.com/paholg/typenum" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#typewit@1.14.2", + "author": "rodrimati1992 ", + "name": "typewit", + "version": "1.14.2", + "description": "type-witness-based abstractions, mostly for emulating polymorphism in const fns", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "f8c1ae7cc0fdb8b842d65d127cb981574b0d2b249b74d1c7a2986863dc134f71" + } + ], + "licenses": [ + { + "expression": "Zlib" + } + ], + "purl": "pkg:cargo/typewit@1.14.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/typewit/" + }, + { + "type": "vcs", + "url": "https://github.com/rodrimati1992/typewit/" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#ulid@1.2.1", + "author": "dylanhart ", + "name": "ulid", + "version": "1.2.1", + "description": "a Universally Unique Lexicographically Sortable Identifier implementation", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "470dbf6591da1b39d43c14523b2b469c86879a53e8b758c8e090a470fe7b1fbe" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/ulid@1.2.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/dylanhart/ulid-rs" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#unicase@2.9.0", + "author": "Sean McArthur ", + "name": "unicase", + "version": "2.9.0", + "description": "A case-insensitive wrapper around strings.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/unicase@2.9.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/unicase" + }, + { + "type": "vcs", + "url": "https://github.com/seanmonstar/unicase" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#unicode-ident@1.0.23", + "author": "David Tolnay ", + "name": "unicode-ident", + "version": "1.0.23", + "description": "Determine whether characters have the XID_Start or XID_Continue properties according to Unicode Standard Annex #31", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "537dd038a89878be9b64dd4bd1b260315c1bb94f4d784956b81e27a088d9a09e" + } + ], + "licenses": [ + { + "expression": "(MIT OR Apache-2.0) AND Unicode-3.0" + } + ], + "purl": "pkg:cargo/unicode-ident@1.0.23", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/unicode-ident" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/unicode-ident" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#unindent@0.2.4", + "author": "David Tolnay ", + "name": "unindent", + "version": "0.2.4", + "description": "Remove a column of leading whitespace from a string", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "7264e107f553ccae879d21fbea1d6724ac785e8c3bfc762137959b5802826ef3" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/unindent@0.2.4", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/unindent" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/indoc" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#untrusted@0.9.0", + "author": "Brian Smith ", + "name": "untrusted", + "version": "0.9.0", + "description": "Safe, fast, zero-panic, zero-crashing, zero-allocation parsing of untrusted inputs in Rust.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" + } + ], + "licenses": [ + { + "expression": "ISC" + } + ], + "purl": "pkg:cargo/untrusted@0.9.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://briansmith.org/rustdoc/untrusted/" + }, + { + "type": "vcs", + "url": "https://github.com/briansmith/untrusted" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#url@2.5.8", + "author": "The rust-url developers", + "name": "url", + "version": "2.5.8", + "description": "URL library for Rust, based on the WHATWG URL Standard", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/url@2.5.8", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/url" + }, + { + "type": "vcs", + "url": "https://github.com/servo/rust-url" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#urlencoding@2.1.3", + "author": "Kornel , Bertram Truong ", + "name": "urlencoding", + "version": "2.1.3", + "description": "A Rust library for doing URL percentage encoding.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/urlencoding@2.1.3", + "externalReferences": [ + { + "type": "website", + "url": "https://lib.rs/urlencoding" + }, + { + "type": "vcs", + "url": "https://github.com/kornelski/rust_urlencoding" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#utf8_iter@1.0.4", + "author": "Henri Sivonen ", + "name": "utf8_iter", + "version": "1.0.4", + "description": "Iterator by char over potentially-invalid UTF-8 in &[u8]", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/utf8_iter@1.0.4", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/utf8_iter/" + }, + { + "type": "website", + "url": "https://docs.rs/utf8_iter/" + }, + { + "type": "vcs", + "url": "https://github.com/hsivonen/utf8_iter" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#utf8parse@0.2.2", + "author": "Joe Wilm , Christian Duerr ", + "name": "utf8parse", + "version": "0.2.2", + "description": "Table-driven UTF-8 parser", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/utf8parse@0.2.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/utf8parse/" + }, + { + "type": "vcs", + "url": "https://github.com/alacritty/vte" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#uuid@1.20.0", + "author": "Ashley Mannix, Dylan DPC, Hunar Roop Kahlon", + "name": "uuid", + "version": "1.20.0", + "description": "A library to generate and parse UUIDs.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/uuid@1.20.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/uuid" + }, + { + "type": "website", + "url": "https://github.com/uuid-rs/uuid" + }, + { + "type": "vcs", + "url": "https://github.com/uuid-rs/uuid" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#version_check@0.9.5", + "author": "Sergio Benitez ", + "name": "version_check", + "version": "0.9.5", + "description": "Tiny crate to check the version of the installed/running rustc.", + "scope": "excluded", + "hashes": [ + { + "alg": "SHA-256", + "content": "0b928f33d975fc6ad9f86c8f283853ad26bdd5b10b7f1542aa2fa15e2289105a" + } + ], + "licenses": [ + { + "expression": "MIT OR Apache-2.0" + } + ], + "purl": "pkg:cargo/version_check@0.9.5", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/version_check/" + }, + { + "type": "vcs", + "url": "https://github.com/SergioBenitez/version_check" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#walkdir@2.5.0", + "author": "Andrew Gallant ", + "name": "walkdir", + "version": "2.5.0", + "description": "Recursively walk a directory.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "29790946404f91d9c5d06f9874efddea1dc06c5efe94541a7d6863108e3a5e4b" + } + ], + "licenses": [ + { + "expression": "Unlicense OR MIT" + } + ], + "purl": "pkg:cargo/walkdir@2.5.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/walkdir/" + }, + { + "type": "website", + "url": "https://github.com/BurntSushi/walkdir" + }, + { + "type": "vcs", + "url": "https://github.com/BurntSushi/walkdir" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#want@0.3.1", + "author": "Sean McArthur ", + "name": "want", + "version": "0.3.1", + "description": "Detect when another Future wants a result.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "bfa7760aed19e106de2c7c0b581b509f2f25d3dacaf737cb82ac61bc6d760b0e" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/want@0.3.1", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/want" + }, + { + "type": "vcs", + "url": "https://github.com/seanmonstar/want" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#warp@0.4.2", + "author": "Sean McArthur ", + "name": "warp", + "version": "0.4.2", + "description": "serve the web at warp speeds", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "51d06d9202adc1f15d709c4f4a2069be5428aa912cc025d6f268ac441ab066b0" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/warp@0.4.2", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/warp" + }, + { + "type": "vcs", + "url": "https://github.com/seanmonstar/warp" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#whoami@2.1.0", + "name": "whoami", + "version": "2.1.0", + "description": "Rust library for getting information about the current user and environment", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "8fae98cf96deed1b7572272dfc777713c249ae40aa1cf8862e091e8b745f5361" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR BSL-1.0 OR MIT" + } + ], + "purl": "pkg:cargo/whoami@2.1.0", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/whoami" + }, + { + "type": "website", + "url": "https://github.com/ardaku/whoami/releases" + }, + { + "type": "vcs", + "url": "https://github.com/ardaku/whoami" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#wide@0.7.33", + "author": "Lokathor ", + "name": "wide", + "version": "0.7.33", + "description": "A crate to help you go wide.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "0ce5da8ecb62bcd8ec8b7ea19f69a51275e91299be594ea5cc6ef7819e16cd03" + } + ], + "licenses": [ + { + "expression": "Zlib OR Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/wide@0.7.33", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/Lokathor/wide" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#winnow@0.7.14", + "name": "winnow", + "version": "0.7.14", + "description": "A byte-oriented, zero-copy, parser combinators library", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/winnow@0.7.14", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/winnow-rs/winnow" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#writeable@0.6.2", + "author": "The ICU4X Project Developers", + "name": "writeable", + "version": "0.6.2", + "description": "A more efficient alternative to fmt::Display", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "9edde0db4769d2dc68579893f2306b26c6ecfbe0ef499b013d731b7b9247e0b9" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/writeable@0.6.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#yoke-derive@0.8.1", + "author": "Manish Goregaokar ", + "name": "yoke-derive", + "version": "0.8.1", + "description": "Custom derive for the yoke crate", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/yoke-derive@0.8.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#yoke@0.8.1", + "author": "Manish Goregaokar ", + "name": "yoke", + "version": "0.8.1", + "description": "Abstraction allowing borrowed data to be carried along with the backing data it borrows from", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "72d6e5c6afb84d73944e5cedb052c4680d5657337201555f9f2a16b7406d4954" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/yoke@0.8.1", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#zerocopy-derive@0.8.39", + "author": "Joshua Liebow-Feeser , Jack Wrenn ", + "name": "zerocopy-derive", + "version": "0.8.39", + "description": "Custom derive for traits from the zerocopy crate", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" + } + ], + "licenses": [ + { + "expression": "BSD-2-Clause OR Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/zerocopy-derive@0.8.39", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/google/zerocopy" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#zerocopy@0.8.39", + "author": "Joshua Liebow-Feeser , Jack Wrenn ", + "name": "zerocopy", + "version": "0.8.39", + "description": "Zerocopy makes zero-cost memory manipulation effortless. We write \"unsafe\" so you don't have to.", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" + } + ], + "licenses": [ + { + "expression": "BSD-2-Clause OR Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/zerocopy@0.8.39", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/google/zerocopy" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#zerofrom-derive@0.1.6", + "author": "Manish Goregaokar ", + "name": "zerofrom-derive", + "version": "0.1.6", + "description": "Custom derive for the zerofrom crate", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/zerofrom-derive@0.1.6", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#zerofrom@0.1.6", + "author": "Manish Goregaokar ", + "name": "zerofrom", + "version": "0.1.6", + "description": "ZeroFrom trait for constructing", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "50cc42e0333e05660c3587f3bf9d0478688e15d870fab3346451ce7f8c9fbea5" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/zerofrom@0.1.6", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#zeroize@1.8.2", + "author": "The RustCrypto Project Developers", + "name": "zeroize", + "version": "1.8.2", + "description": "Securely clear secrets from memory with a simple trait built on stable Rust primitives which guarantee memory is zeroed using an operation will not be 'optimized away' by the compiler. Uses a portable pure Rust implementation that works everywhere, even WASM! ", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "b97154e67e32c85465826e8bcc1c59429aaaf107c1e4a9e53c8d8ccd5eff88d0" + } + ], + "licenses": [ + { + "expression": "Apache-2.0 OR MIT" + } + ], + "purl": "pkg:cargo/zeroize@1.8.2", + "externalReferences": [ + { + "type": "website", + "url": "https://github.com/RustCrypto/utils/tree/master/zeroize" + }, + { + "type": "vcs", + "url": "https://github.com/RustCrypto/utils" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#zerotrie@0.2.3", + "author": "The ICU4X Project Developers", + "name": "zerotrie", + "version": "0.2.3", + "description": "A data structure that efficiently maps strings to integers", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "2a59c17a5562d507e4b54960e8569ebee33bee890c70aa3fe7b97e85a9fd7851" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/zerotrie@0.2.3", + "externalReferences": [ + { + "type": "website", + "url": "https://icu4x.unicode.org" + }, + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#zerovec-derive@0.11.2", + "author": "Manish Goregaokar ", + "name": "zerovec-derive", + "version": "0.11.2", + "description": "Custom derive for the zerovec crate", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/zerovec-derive@0.11.2", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#zerovec@0.11.5", + "author": "The ICU4X Project Developers", + "name": "zerovec", + "version": "0.11.5", + "description": "Zero-copy vector backed by a byte array", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "6c28719294829477f525be0186d13efa9a3c602f7ec202ca9e353d310fb9a002" + } + ], + "licenses": [ + { + "expression": "Unicode-3.0" + } + ], + "purl": "pkg:cargo/zerovec@0.11.5", + "externalReferences": [ + { + "type": "vcs", + "url": "https://github.com/unicode-org/icu4x" + } + ] + }, + { + "type": "library", + "bom-ref": "registry+https://github.com/rust-lang/crates.io-index#zmij@1.0.20", + "author": "David Tolnay ", + "name": "zmij", + "version": "1.0.20", + "description": "A double-to-string conversion algorithm based on Schubfach and yy", + "scope": "required", + "hashes": [ + { + "alg": "SHA-256", + "content": "4de98dfa5d5b7fef4ee834d0073d560c9ca7b6c46a71d058c48db7960f8cfaf7" + } + ], + "licenses": [ + { + "expression": "MIT" + } + ], + "purl": "pkg:cargo/zmij@1.0.20", + "externalReferences": [ + { + "type": "documentation", + "url": "https://docs.rs/zmij" + }, + { + "type": "vcs", + "url": "https://github.com/dtolnay/zmij" + } + ] + } + ], + "dependencies": [ + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/cas_client#0.14.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#anyhow@1.0.101", + "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "registry+https://github.com/rust-lang/crates.io-index#axum@0.8.8", + "registry+https://github.com/rust-lang/crates.io-index#base64@0.22.1", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "path+file:///home/runner/work/xet-core/xet-core/cas_object#0.1.0", + "path+file:///home/runner/work/xet-core/xet-core/cas_types#0.1.0", + "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "registry+https://github.com/rust-lang/crates.io-index#clap@4.5.57", + "path+file:///home/runner/work/xet-core/xet-core/deduplication#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/error_printer#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/file_utils#0.14.2", + "registry+https://github.com/rust-lang/crates.io-index#futures@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-util@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#heed@0.22.0", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#hyper@1.8.1", + "registry+https://github.com/rust-lang/crates.io-index#lazy_static@1.5.0", + "path+file:///home/runner/work/xet-core/xet-core/mdb_shard#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/merklehash#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#more-asserts@0.3.1", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2", + "registry+https://github.com/rust-lang/crates.io-index#reqwest@0.13.2", + "registry+https://github.com/rust-lang/crates.io-index#reqwest-middleware@0.5.1", + "registry+https://github.com/rust-lang/crates.io-index#reqwest-retry@0.9.1", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "registry+https://github.com/rust-lang/crates.io-index#statrs@0.18.0", + "registry+https://github.com/rust-lang/crates.io-index#tempfile@3.25.0", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tokio-retry@0.3.0", + "registry+https://github.com/rust-lang/crates.io-index#tower-http@0.6.8", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "registry+https://github.com/rust-lang/crates.io-index#tracing-log@0.2.0", + "registry+https://github.com/rust-lang/crates.io-index#tracing-subscriber@0.3.22", + "registry+https://github.com/rust-lang/crates.io-index#url@2.5.8", + "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#warp@0.4.2", + "path+file:///home/runner/work/xet-core/xet-core/xet_runtime#0.1.0" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/cas_object#0.1.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#anyhow@1.0.101", + "registry+https://github.com/rust-lang/crates.io-index#blake3@1.8.3", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#clap@4.5.57", + "registry+https://github.com/rust-lang/crates.io-index#countio@0.3.0", + "registry+https://github.com/rust-lang/crates.io-index#csv@1.4.0", + "path+file:///home/runner/work/xet-core/xet-core/deduplication#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#futures@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#half@2.7.1", + "registry+https://github.com/rust-lang/crates.io-index#lz4_flex@0.12.0", + "path+file:///home/runner/work/xet-core/xet-core/mdb_shard#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/merklehash#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#more-asserts@0.3.1", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/xet_runtime#0.1.0" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/cas_types#0.1.0", + "dependsOn": [ + "path+file:///home/runner/work/xet-core/xet-core/merklehash#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_repr@0.1.20", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/data#0.14.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#anyhow@1.0.101", + "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "path+file:///home/runner/work/xet-core/xet-core/cas_client#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/cas_object#0.1.0", + "path+file:///home/runner/work/xet-core/xet-core/cas_types#0.1.0", + "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "registry+https://github.com/rust-lang/crates.io-index#clap@4.5.57", + "path+file:///home/runner/work/xet-core/xet-core/deduplication#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/error_printer#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/file_reconstruction#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "path+file:///home/runner/work/xet-core/xet-core/hub_client#0.1.0", + "registry+https://github.com/rust-lang/crates.io-index#lazy_static@1.5.0", + "path+file:///home/runner/work/xet-core/xet-core/mdb_shard#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/merklehash#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#more-asserts@0.3.1", + "path+file:///home/runner/work/xet-core/xet-core/progress_tracking#0.1.0", + "registry+https://github.com/rust-lang/crates.io-index#prometheus@0.14.0", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2", + "registry+https://github.com/rust-lang/crates.io-index#regex@1.12.3", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "registry+https://github.com/rust-lang/crates.io-index#sha2@0.10.9", + "registry+https://github.com/rust-lang/crates.io-index#tempfile@3.25.0", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "registry+https://github.com/rust-lang/crates.io-index#ulid@1.2.1", + "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#walkdir@2.5.0", + "path+file:///home/runner/work/xet-core/xet-core/xet_runtime#0.1.0" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/deduplication#0.14.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#gearhash@0.1.3", + "registry+https://github.com/rust-lang/crates.io-index#lazy_static@1.5.0", + "path+file:///home/runner/work/xet-core/xet-core/mdb_shard#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/merklehash#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#more-asserts@0.3.1", + "path+file:///home/runner/work/xet-core/xet-core/progress_tracking#0.1.0", + "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/xet_runtime#0.1.0" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/error_printer#0.14.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/file_reconstruction#0.14.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "path+file:///home/runner/work/xet-core/xet-core/cas_client#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/cas_types#0.1.0", + "path+file:///home/runner/work/xet-core/xet-core/merklehash#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#more-asserts@0.3.1", + "path+file:///home/runner/work/xet-core/xet-core/progress_tracking#0.1.0", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/xet_config#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/xet_runtime#0.1.0" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/file_utils#0.14.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#colored@3.1.1", + "registry+https://github.com/rust-lang/crates.io-index#lazy_static@1.5.0", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "registry+https://github.com/rust-lang/crates.io-index#whoami@2.1.0" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/hf_xet#1.3.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "path+file:///home/runner/work/xet-core/xet-core/cas_client#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "path+file:///home/runner/work/xet-core/xet-core/data#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/error_printer#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#itertools@0.14.0", + "registry+https://github.com/rust-lang/crates.io-index#lazy_static@1.5.0", + "path+file:///home/runner/work/xet-core/xet-core/progress_tracking#0.1.0", + "registry+https://github.com/rust-lang/crates.io-index#pyo3@0.26.0", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2", + "registry+https://github.com/rust-lang/crates.io-index#signal-hook@0.3.18", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/xet_config#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/xet_logging#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/xet_runtime#0.1.0" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/hub_client#0.1.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#anyhow@1.0.101", + "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "path+file:///home/runner/work/xet-core/xet-core/cas_client#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#reqwest@0.13.2", + "registry+https://github.com/rust-lang/crates.io-index#reqwest-middleware@0.5.1", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#urlencoding@2.1.3" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/mdb_shard#0.14.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#anyhow@1.0.101", + "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "registry+https://github.com/rust-lang/crates.io-index#blake3@1.8.3", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#clap@4.5.57", + "registry+https://github.com/rust-lang/crates.io-index#futures@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-util@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#heapify@0.2.0", + "registry+https://github.com/rust-lang/crates.io-index#itertools@0.14.0", + "registry+https://github.com/rust-lang/crates.io-index#lazy_static@1.5.0", + "path+file:///home/runner/work/xet-core/xet-core/merklehash#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#more-asserts@0.3.1", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2", + "registry+https://github.com/rust-lang/crates.io-index#regex@1.12.3", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#static_assertions@1.1.0", + "registry+https://github.com/rust-lang/crates.io-index#tempfile@3.25.0", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#uuid@1.20.0", + "path+file:///home/runner/work/xet-core/xet-core/xet_runtime#0.1.0" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/merklehash#0.14.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#base64@0.22.1", + "registry+https://github.com/rust-lang/crates.io-index#blake3@1.8.3", + "registry+https://github.com/rust-lang/crates.io-index#bytemuck@1.25.0", + "registry+https://github.com/rust-lang/crates.io-index#heed@0.22.0", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2", + "registry+https://github.com/rust-lang/crates.io-index#safe-transmute@0.11.3", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/progress_tracking#0.1.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "path+file:///home/runner/work/xet-core/xet-core/merklehash#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#more-asserts@0.3.1", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "registry+https://github.com/rust-lang/crates.io-index#bincode@1.3.3", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "registry+https://github.com/rust-lang/crates.io-index#ctor@0.6.3", + "registry+https://github.com/rust-lang/crates.io-index#derivative@2.2.0", + "registry+https://github.com/rust-lang/crates.io-index#duration-str@0.19.0", + "path+file:///home/runner/work/xet-core/xet-core/error_printer#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#futures@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#lazy_static@1.5.0", + "path+file:///home/runner/work/xet-core/xet-core/merklehash#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#pin-project@1.1.10", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#shellexpand@3.1.1", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tokio-util@0.7.18", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/xet_config#0.14.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#const-str@1.1.0", + "registry+https://github.com/rust-lang/crates.io-index#konst@0.4.3", + "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/xet_logging#0.14.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "path+file:///home/runner/work/xet-core/xet-core/error_printer#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#git-version@0.3.9", + "registry+https://github.com/rust-lang/crates.io-index#sysinfo@0.38.1", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "registry+https://github.com/rust-lang/crates.io-index#tracing-appender@0.2.4", + "registry+https://github.com/rust-lang/crates.io-index#tracing-subscriber@0.3.22", + "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/xet_runtime#0.1.0" + ] + }, + { + "ref": "path+file:///home/runner/work/xet-core/xet-core/xet_runtime#0.1.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#dirs@6.0.0", + "path+file:///home/runner/work/xet-core/xet-core/error_printer#0.14.5", + "registry+https://github.com/rust-lang/crates.io-index#oneshot@0.1.13", + "registry+https://github.com/rust-lang/crates.io-index#reqwest@0.13.2", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "path+file:///home/runner/work/xet-core/xet-core/utils#0.14.5", + "path+file:///home/runner/work/xet-core/xet-core/xet_config#0.14.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#aho-corasick@1.1.4", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#anstream@0.6.21", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#anstyle@1.0.13", + "registry+https://github.com/rust-lang/crates.io-index#anstyle-parse@0.2.7", + "registry+https://github.com/rust-lang/crates.io-index#anstyle-query@1.1.5", + "registry+https://github.com/rust-lang/crates.io-index#colorchoice@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#is_terminal_polyfill@1.70.2", + "registry+https://github.com/rust-lang/crates.io-index#utf8parse@0.2.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#anstyle-parse@0.2.7", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#utf8parse@0.2.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#anstyle-query@1.1.5", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#anstyle@1.0.13", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#anyhow@1.0.101", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#approx@0.5.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#arrayref@0.3.9", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#arrayvec@0.7.6", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#atomic-waker@1.1.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#autocfg@1.5.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#aws-lc-rs@1.15.4", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#aws-lc-sys@0.37.0", + "registry+https://github.com/rust-lang/crates.io-index#zeroize@1.8.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#aws-lc-sys@0.37.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cc@1.2.55", + "registry+https://github.com/rust-lang/crates.io-index#cmake@0.1.57", + "registry+https://github.com/rust-lang/crates.io-index#dunce@1.0.5", + "registry+https://github.com/rust-lang/crates.io-index#fs_extra@1.3.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#axum-core@0.5.6", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#http-body@1.0.1", + "registry+https://github.com/rust-lang/crates.io-index#http-body-util@0.1.3", + "registry+https://github.com/rust-lang/crates.io-index#mime@0.3.17", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#sync_wrapper@1.0.2", + "registry+https://github.com/rust-lang/crates.io-index#tower-layer@0.3.3", + "registry+https://github.com/rust-lang/crates.io-index#tower-service@0.3.3", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#axum@0.8.8", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#axum-core@0.5.6", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#form_urlencoded@1.2.2", + "registry+https://github.com/rust-lang/crates.io-index#futures-util@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#http-body@1.0.1", + "registry+https://github.com/rust-lang/crates.io-index#http-body-util@0.1.3", + "registry+https://github.com/rust-lang/crates.io-index#hyper@1.8.1", + "registry+https://github.com/rust-lang/crates.io-index#hyper-util@0.1.20", + "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17", + "registry+https://github.com/rust-lang/crates.io-index#matchit@0.8.4", + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "registry+https://github.com/rust-lang/crates.io-index#mime@0.3.17", + "registry+https://github.com/rust-lang/crates.io-index#percent-encoding@2.3.2", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "registry+https://github.com/rust-lang/crates.io-index#serde_path_to_error@0.1.20", + "registry+https://github.com/rust-lang/crates.io-index#serde_urlencoded@0.7.1", + "registry+https://github.com/rust-lang/crates.io-index#sync_wrapper@1.0.2", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tower@0.5.3", + "registry+https://github.com/rust-lang/crates.io-index#tower-layer@0.3.3", + "registry+https://github.com/rust-lang/crates.io-index#tower-service@0.3.3", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#base64@0.22.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#bincode@1.3.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#bitflags@2.10.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#blake3@1.8.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#arrayref@0.3.9", + "registry+https://github.com/rust-lang/crates.io-index#arrayvec@0.7.6", + "registry+https://github.com/rust-lang/crates.io-index#cc@1.2.55", + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#constant_time_eq@0.4.2", + "registry+https://github.com/rust-lang/crates.io-index#cpufeatures@0.2.17" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#block-buffer@0.10.4", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#generic-array@0.14.7" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#bstr@1.12.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "registry+https://github.com/rust-lang/crates.io-index#regex-automata@0.4.14", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#bytemuck@1.25.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#byteorder@1.5.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#cc@1.2.55", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#find-msvc-tools@0.1.9", + "registry+https://github.com/rust-lang/crates.io-index#jobserver@0.1.34", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#shlex@1.3.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#cfg-if@0.1.10", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#cfg_aliases@0.2.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#iana-time-zone@0.1.65", + "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#clap@4.5.57", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#clap_builder@4.5.57", + "registry+https://github.com/rust-lang/crates.io-index#clap_derive@4.5.55" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#clap_builder@4.5.57", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#anstream@0.6.21", + "registry+https://github.com/rust-lang/crates.io-index#anstyle@1.0.13", + "registry+https://github.com/rust-lang/crates.io-index#clap_lex@0.7.7", + "registry+https://github.com/rust-lang/crates.io-index#strsim@0.11.1" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#clap_derive@4.5.55", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#heck@0.5.0", + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#clap_lex@0.7.7", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#cmake@0.1.57", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cc@1.2.55" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#colorchoice@1.0.4", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#colored@3.1.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#const-str@1.1.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#const_panic@0.2.15", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#typewit@1.14.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#constant_time_eq@0.4.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#countio@0.3.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#futures-io@0.3.31" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#cpufeatures@0.2.17", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#crossbeam-channel@0.5.15", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#crossbeam-utils@0.8.21" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#crossbeam-queue@0.3.12", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#crossbeam-utils@0.8.21" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#crossbeam-utils@0.8.21", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#crypto-common@0.1.7", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#generic-array@0.14.7", + "registry+https://github.com/rust-lang/crates.io-index#typenum@1.19.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#csv-core@0.1.13", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#csv@1.4.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#csv-core@0.1.13", + "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17", + "registry+https://github.com/rust-lang/crates.io-index#ryu@1.0.23", + "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#ctor-proc-macro@0.0.7", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#ctor@0.6.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#ctor-proc-macro@0.0.7", + "registry+https://github.com/rust-lang/crates.io-index#dtor@0.1.1" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#deranged@0.5.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#powerfmt@0.2.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#derivative@2.2.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@1.0.109" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#digest@0.10.7", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#block-buffer@0.10.4", + "registry+https://github.com/rust-lang/crates.io-index#crypto-common@0.1.7" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#dirs-sys@0.5.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#option-ext@0.2.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#dirs@6.0.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#dirs-sys@0.5.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#displaydoc@0.2.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#doxygen-rs@0.4.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#phf@0.11.3" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#dtor-proc-macro@0.0.6", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#dtor@0.1.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#dtor-proc-macro@0.0.6" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#dunce@1.0.5", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#duration-str@0.19.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#chrono@0.4.43", + "registry+https://github.com/rust-lang/crates.io-index#rust_decimal@1.40.0", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#time@0.3.47", + "registry+https://github.com/rust-lang/crates.io-index#winnow@0.7.14" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#either@1.15.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#equivalent@1.0.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#errno@0.3.14", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#fastrand@2.3.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#find-msvc-tools@0.1.9", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#fnv@1.0.7", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#form_urlencoded@1.2.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#percent-encoding@2.3.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#fs_extra@1.3.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#futures-channel@0.3.31", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-sink@0.3.31" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#futures-executor@0.3.31", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-task@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-util@0.3.31" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#futures-io@0.3.31", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#futures-macro@0.3.31", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#futures-sink@0.3.31", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#futures-task@0.3.31", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#futures-util@0.3.31", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#futures-channel@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-io@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-macro@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-sink@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-task@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#pin-utils@0.1.0", + "registry+https://github.com/rust-lang/crates.io-index#slab@0.4.12" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#futures@0.3.31", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#futures-channel@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-executor@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-io@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-sink@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-task@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-util@0.3.31" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#gearhash@0.1.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@0.1.10" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#generic-array@0.14.7", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#typenum@1.19.0", + "registry+https://github.com/rust-lang/crates.io-index#version_check@0.9.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.2.17", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.3.4", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.4.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#git-version-macro@0.3.9", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#git-version@0.3.9", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#git-version-macro@0.3.9" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#h2@0.4.13", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#atomic-waker@1.1.2", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#fnv@1.0.7", + "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-sink@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#indexmap@2.13.0", + "registry+https://github.com/rust-lang/crates.io-index#slab@0.4.12", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tokio-util@0.7.18", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#half@2.7.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#zerocopy@0.8.39" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#hashbrown@0.16.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#headers-core@0.3.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#headers@0.4.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#base64@0.22.1", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#headers-core@0.3.0", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#httpdate@1.0.3", + "registry+https://github.com/rust-lang/crates.io-index#mime@0.3.17", + "registry+https://github.com/rust-lang/crates.io-index#sha1@0.10.6" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#heapify@0.2.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#heck@0.5.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#heed-traits@0.20.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#heed-types@0.21.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bincode@1.3.3", + "registry+https://github.com/rust-lang/crates.io-index#byteorder@1.5.0", + "registry+https://github.com/rust-lang/crates.io-index#heed-traits@0.20.0", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#heed@0.22.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bitflags@2.10.0", + "registry+https://github.com/rust-lang/crates.io-index#byteorder@1.5.0", + "registry+https://github.com/rust-lang/crates.io-index#heed-traits@0.20.0", + "registry+https://github.com/rust-lang/crates.io-index#heed-types@0.21.0", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#lmdb-master-sys@0.2.5", + "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "registry+https://github.com/rust-lang/crates.io-index#page_size@0.6.0", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#synchronoise@1.0.1" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#http-body-util@0.1.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#http-body@1.0.1", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#http-body@1.0.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#httparse@1.10.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#httpdate@1.0.3", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#hyper-rustls@0.27.7", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#hyper@1.8.1", + "registry+https://github.com/rust-lang/crates.io-index#hyper-util@0.1.20", + "registry+https://github.com/rust-lang/crates.io-index#rustls@0.23.36", + "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tokio-rustls@0.26.4", + "registry+https://github.com/rust-lang/crates.io-index#tower-service@0.3.3" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#hyper-util@0.1.20", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#base64@0.22.1", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#futures-channel@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-util@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#http-body@1.0.1", + "registry+https://github.com/rust-lang/crates.io-index#hyper@1.8.1", + "registry+https://github.com/rust-lang/crates.io-index#ipnet@2.11.0", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#percent-encoding@2.3.2", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#socket2@0.6.2", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tower-service@0.3.3", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#hyper@1.8.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#atomic-waker@1.1.2", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#futures-channel@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#h2@0.4.13", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#http-body@1.0.1", + "registry+https://github.com/rust-lang/crates.io-index#httparse@1.10.1", + "registry+https://github.com/rust-lang/crates.io-index#httpdate@1.0.3", + "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#pin-utils@0.1.0", + "registry+https://github.com/rust-lang/crates.io-index#smallvec@1.15.1", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#want@0.3.1" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#iana-time-zone@0.1.65", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#icu_collections@2.1.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#displaydoc@0.2.5", + "registry+https://github.com/rust-lang/crates.io-index#potential_utf@0.1.4", + "registry+https://github.com/rust-lang/crates.io-index#yoke@0.8.1", + "registry+https://github.com/rust-lang/crates.io-index#zerofrom@0.1.6", + "registry+https://github.com/rust-lang/crates.io-index#zerovec@0.11.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#icu_locale_core@2.1.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#displaydoc@0.2.5", + "registry+https://github.com/rust-lang/crates.io-index#litemap@0.8.1", + "registry+https://github.com/rust-lang/crates.io-index#tinystr@0.8.2", + "registry+https://github.com/rust-lang/crates.io-index#writeable@0.6.2", + "registry+https://github.com/rust-lang/crates.io-index#zerovec@0.11.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#icu_normalizer@2.1.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#icu_collections@2.1.1", + "registry+https://github.com/rust-lang/crates.io-index#icu_normalizer_data@2.1.1", + "registry+https://github.com/rust-lang/crates.io-index#icu_properties@2.1.2", + "registry+https://github.com/rust-lang/crates.io-index#icu_provider@2.1.1", + "registry+https://github.com/rust-lang/crates.io-index#smallvec@1.15.1", + "registry+https://github.com/rust-lang/crates.io-index#zerovec@0.11.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#icu_normalizer_data@2.1.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#icu_properties@2.1.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#icu_collections@2.1.1", + "registry+https://github.com/rust-lang/crates.io-index#icu_locale_core@2.1.1", + "registry+https://github.com/rust-lang/crates.io-index#icu_properties_data@2.1.2", + "registry+https://github.com/rust-lang/crates.io-index#icu_provider@2.1.1", + "registry+https://github.com/rust-lang/crates.io-index#zerotrie@0.2.3", + "registry+https://github.com/rust-lang/crates.io-index#zerovec@0.11.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#icu_properties_data@2.1.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#icu_provider@2.1.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#displaydoc@0.2.5", + "registry+https://github.com/rust-lang/crates.io-index#icu_locale_core@2.1.1", + "registry+https://github.com/rust-lang/crates.io-index#writeable@0.6.2", + "registry+https://github.com/rust-lang/crates.io-index#yoke@0.8.1", + "registry+https://github.com/rust-lang/crates.io-index#zerofrom@0.1.6", + "registry+https://github.com/rust-lang/crates.io-index#zerotrie@0.2.3", + "registry+https://github.com/rust-lang/crates.io-index#zerovec@0.11.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#idna@1.1.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#idna_adapter@1.2.1", + "registry+https://github.com/rust-lang/crates.io-index#smallvec@1.15.1", + "registry+https://github.com/rust-lang/crates.io-index#utf8_iter@1.0.4" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#idna_adapter@1.2.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#icu_normalizer@2.1.1", + "registry+https://github.com/rust-lang/crates.io-index#icu_properties@2.1.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#indexmap@2.13.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#equivalent@1.0.2", + "registry+https://github.com/rust-lang/crates.io-index#hashbrown@0.16.1", + "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#indoc@2.0.7", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#rustversion@1.0.22" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#ipnet@2.11.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#iri-string@0.7.10", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#is_terminal_polyfill@1.70.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#itertools@0.14.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#either@1.15.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#jobserver@0.1.34", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#konst@0.4.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#const_panic@0.2.15", + "registry+https://github.com/rust-lang/crates.io-index#konst_proc_macros@0.4.1", + "registry+https://github.com/rust-lang/crates.io-index#typewit@1.14.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#konst_proc_macros@0.4.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#lazy_static@1.5.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#libm@0.2.16", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#linux-raw-sys@0.11.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#litemap@0.8.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#lmdb-master-sys@0.2.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cc@1.2.55", + "registry+https://github.com/rust-lang/crates.io-index#doxygen-rs@0.4.2", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#lock_api@0.4.14", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#scopeguard@1.2.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#log@0.4.29", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#lru-slab@0.1.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#lz4_flex@0.12.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#twox-hash@2.1.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#matchers@0.2.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#regex-automata@0.4.14" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#matchit@0.8.4", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#matrixmultiply@0.3.10", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#autocfg@1.5.0", + "registry+https://github.com/rust-lang/crates.io-index#rawpointer@0.2.1" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#memoffset@0.9.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#autocfg@1.5.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#mime@0.3.17", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#mime_guess@2.0.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#mime@0.3.17", + "registry+https://github.com/rust-lang/crates.io-index#unicase@2.9.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#mio@1.1.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#more-asserts@0.3.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#nalgebra@0.33.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#approx@0.5.1", + "registry+https://github.com/rust-lang/crates.io-index#matrixmultiply@0.3.10", + "registry+https://github.com/rust-lang/crates.io-index#num-complex@0.4.6", + "registry+https://github.com/rust-lang/crates.io-index#num-rational@0.4.2", + "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.8.5", + "registry+https://github.com/rust-lang/crates.io-index#rand_distr@0.4.3", + "registry+https://github.com/rust-lang/crates.io-index#simba@0.9.1", + "registry+https://github.com/rust-lang/crates.io-index#typenum@1.19.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#nu-ansi-term@0.50.3", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#num-bigint@0.4.6", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#num-integer@0.1.46", + "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#num-complex@0.4.6", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#num-conv@0.2.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#num-integer@0.1.46", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#num-rational@0.4.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#num-bigint@0.4.6", + "registry+https://github.com/rust-lang/crates.io-index#num-integer@0.1.46", + "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#autocfg@1.5.0", + "registry+https://github.com/rust-lang/crates.io-index#libm@0.2.16" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#oneshot@0.1.13", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#openssl-probe@0.2.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#option-ext@0.2.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#os_str_bytes@6.6.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#page_size@0.6.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#parking_lot@0.12.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#lock_api@0.4.14", + "registry+https://github.com/rust-lang/crates.io-index#parking_lot_core@0.9.12" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#parking_lot_core@0.9.12", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#smallvec@1.15.1" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#paste@1.0.15", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#percent-encoding@2.3.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#phf@0.11.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#phf_macros@0.11.3", + "registry+https://github.com/rust-lang/crates.io-index#phf_shared@0.11.3" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#phf_generator@0.11.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#phf_shared@0.11.3", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.8.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#phf_macros@0.11.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#phf_generator@0.11.3", + "registry+https://github.com/rust-lang/crates.io-index#phf_shared@0.11.3", + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#phf_shared@0.11.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#siphasher@1.0.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#pin-project-internal@1.1.10", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#pin-project@1.1.10", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#pin-project-internal@1.1.10" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#pin-utils@0.1.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#potential_utf@0.1.4", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#zerovec@0.11.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#powerfmt@0.2.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#ppv-lite86@0.2.21", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#zerocopy@0.8.39" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#unicode-ident@1.0.23" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#prometheus@0.14.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#fnv@1.0.7", + "registry+https://github.com/rust-lang/crates.io-index#lazy_static@1.5.0", + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "registry+https://github.com/rust-lang/crates.io-index#parking_lot@0.12.5", + "registry+https://github.com/rust-lang/crates.io-index#protobuf@3.7.2", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#protobuf-support@3.7.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#thiserror@1.0.69" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#protobuf@3.7.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "registry+https://github.com/rust-lang/crates.io-index#protobuf-support@3.7.2", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@1.0.69" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#pyo3-build-config@0.26.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#target-lexicon@0.13.4" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#pyo3-ffi@0.26.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#pyo3-build-config@0.26.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#pyo3-macros-backend@0.26.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#heck@0.5.0", + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#pyo3-build-config@0.26.0", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#pyo3-macros@0.26.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#pyo3-macros-backend@0.26.0", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#pyo3@0.26.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#indoc@2.0.7", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#memoffset@0.9.1", + "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "registry+https://github.com/rust-lang/crates.io-index#pyo3-build-config@0.26.0", + "registry+https://github.com/rust-lang/crates.io-index#pyo3-ffi@0.26.0", + "registry+https://github.com/rust-lang/crates.io-index#pyo3-macros@0.26.0", + "registry+https://github.com/rust-lang/crates.io-index#unindent@0.2.4" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#quinn-proto@0.11.13", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#aws-lc-rs@1.15.4", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#lru-slab@0.1.2", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2", + "registry+https://github.com/rust-lang/crates.io-index#rustc-hash@2.1.1", + "registry+https://github.com/rust-lang/crates.io-index#rustls@0.23.36", + "registry+https://github.com/rust-lang/crates.io-index#slab@0.4.12", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tinyvec@1.10.0", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#quinn-udp@0.5.14", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg_aliases@0.2.1", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#socket2@0.6.2", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#quinn@0.11.9", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#cfg_aliases@0.2.1", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#quinn-proto@0.11.13", + "registry+https://github.com/rust-lang/crates.io-index#quinn-udp@0.5.14", + "registry+https://github.com/rust-lang/crates.io-index#rustc-hash@2.1.1", + "registry+https://github.com/rust-lang/crates.io-index#rustls@0.23.36", + "registry+https://github.com/rust-lang/crates.io-index#socket2@0.6.2", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rand@0.8.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#rand_chacha@0.3.1", + "registry+https://github.com/rust-lang/crates.io-index#rand_core@0.6.4" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#rand_chacha@0.9.0", + "registry+https://github.com/rust-lang/crates.io-index#rand_core@0.9.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rand_chacha@0.3.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#ppv-lite86@0.2.21", + "registry+https://github.com/rust-lang/crates.io-index#rand_core@0.6.4" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rand_chacha@0.9.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#ppv-lite86@0.2.21", + "registry+https://github.com/rust-lang/crates.io-index#rand_core@0.9.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rand_core@0.6.4", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.2.17" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rand_core@0.9.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.3.4" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rand_distr@0.4.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.8.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rawpointer@0.2.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#regex-automata@0.4.14", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#aho-corasick@1.1.4", + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "registry+https://github.com/rust-lang/crates.io-index#regex-syntax@0.8.9" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#regex-syntax@0.8.9", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#regex@1.12.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#aho-corasick@1.1.4", + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "registry+https://github.com/rust-lang/crates.io-index#regex-automata@0.4.14", + "registry+https://github.com/rust-lang/crates.io-index#regex-syntax@0.8.9" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#reqwest-middleware@0.5.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#anyhow@1.0.101", + "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#reqwest@0.13.2", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tower-service@0.3.3" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#reqwest-retry@0.9.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#anyhow@1.0.101", + "registry+https://github.com/rust-lang/crates.io-index#async-trait@0.1.89", + "registry+https://github.com/rust-lang/crates.io-index#futures@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#hyper@1.8.1", + "registry+https://github.com/rust-lang/crates.io-index#reqwest@0.13.2", + "registry+https://github.com/rust-lang/crates.io-index#reqwest-middleware@0.5.1", + "registry+https://github.com/rust-lang/crates.io-index#retry-policies@0.5.1", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#reqwest@0.13.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#base64@0.22.1", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-util@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#http-body@1.0.1", + "registry+https://github.com/rust-lang/crates.io-index#http-body-util@0.1.3", + "registry+https://github.com/rust-lang/crates.io-index#hyper@1.8.1", + "registry+https://github.com/rust-lang/crates.io-index#hyper-rustls@0.27.7", + "registry+https://github.com/rust-lang/crates.io-index#hyper-util@0.1.20", + "registry+https://github.com/rust-lang/crates.io-index#log@0.4.29", + "registry+https://github.com/rust-lang/crates.io-index#percent-encoding@2.3.2", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#quinn@0.11.9", + "registry+https://github.com/rust-lang/crates.io-index#rustls@0.23.36", + "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0", + "registry+https://github.com/rust-lang/crates.io-index#rustls-platform-verifier@0.6.2", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "registry+https://github.com/rust-lang/crates.io-index#sync_wrapper@1.0.2", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tokio-rustls@0.26.4", + "registry+https://github.com/rust-lang/crates.io-index#tokio-util@0.7.18", + "registry+https://github.com/rust-lang/crates.io-index#tower@0.5.3", + "registry+https://github.com/rust-lang/crates.io-index#tower-http@0.6.8", + "registry+https://github.com/rust-lang/crates.io-index#tower-service@0.3.3", + "registry+https://github.com/rust-lang/crates.io-index#url@2.5.8" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#retry-policies@0.5.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#ring@0.17.14", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cc@1.2.55", + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.2.17", + "registry+https://github.com/rust-lang/crates.io-index#untrusted@0.9.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rust_decimal@1.40.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#arrayvec@0.7.6", + "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rustc-hash@2.1.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rustix@1.1.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bitflags@2.10.0", + "registry+https://github.com/rust-lang/crates.io-index#errno@0.3.14", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#linux-raw-sys@0.11.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rustls-native-certs@0.8.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#openssl-probe@0.2.1", + "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#zeroize@1.8.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rustls-platform-verifier@0.6.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#log@0.4.29", + "registry+https://github.com/rust-lang/crates.io-index#rustls@0.23.36", + "registry+https://github.com/rust-lang/crates.io-index#rustls-native-certs@0.8.3", + "registry+https://github.com/rust-lang/crates.io-index#rustls-webpki@0.103.9" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rustls-webpki@0.103.9", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#aws-lc-rs@1.15.4", + "registry+https://github.com/rust-lang/crates.io-index#ring@0.17.14", + "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0", + "registry+https://github.com/rust-lang/crates.io-index#untrusted@0.9.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rustls@0.23.36", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#aws-lc-rs@1.15.4", + "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "registry+https://github.com/rust-lang/crates.io-index#rustls-pki-types@1.14.0", + "registry+https://github.com/rust-lang/crates.io-index#rustls-webpki@0.103.9", + "registry+https://github.com/rust-lang/crates.io-index#subtle@2.6.1", + "registry+https://github.com/rust-lang/crates.io-index#zeroize@1.8.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#rustversion@1.0.22", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#ryu@1.0.23", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#safe-transmute@0.11.3", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#safe_arch@0.7.4", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bytemuck@1.25.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#same-file@1.0.6", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#scoped-tls@1.0.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#scopeguard@1.2.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_derive@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#serde_derive@1.0.228", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17", + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0", + "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#zmij@1.0.20" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#serde_path_to_error@0.1.20", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17", + "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#serde_repr@0.1.20", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#serde_urlencoded@0.7.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#form_urlencoded@1.2.2", + "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17", + "registry+https://github.com/rust-lang/crates.io-index#ryu@1.0.23", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#sha1@0.10.6", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#cpufeatures@0.2.17", + "registry+https://github.com/rust-lang/crates.io-index#digest@0.10.7" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#sha2-asm@0.6.4", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cc@1.2.55" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#sha2@0.10.9", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4", + "registry+https://github.com/rust-lang/crates.io-index#cpufeatures@0.2.17", + "registry+https://github.com/rust-lang/crates.io-index#digest@0.10.7", + "registry+https://github.com/rust-lang/crates.io-index#sha2-asm@0.6.4" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#sharded-slab@0.1.7", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#lazy_static@1.5.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#shellexpand@3.1.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bstr@1.12.1", + "registry+https://github.com/rust-lang/crates.io-index#dirs@6.0.0", + "registry+https://github.com/rust-lang/crates.io-index#os_str_bytes@6.6.1" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#shlex@1.3.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#signal-hook-registry@1.4.8", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#errno@0.3.14", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#signal-hook@0.3.18", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#signal-hook-registry@1.4.8" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#simba@0.9.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#approx@0.5.1", + "registry+https://github.com/rust-lang/crates.io-index#num-complex@0.4.6", + "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19", + "registry+https://github.com/rust-lang/crates.io-index#paste@1.0.15", + "registry+https://github.com/rust-lang/crates.io-index#wide@0.7.33" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#siphasher@1.0.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#slab@0.4.12", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#smallvec@1.15.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#socket2@0.6.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#stable_deref_trait@1.2.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#static_assertions@1.1.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#statrs@0.18.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#approx@0.5.1", + "registry+https://github.com/rust-lang/crates.io-index#nalgebra@0.33.2", + "registry+https://github.com/rust-lang/crates.io-index#num-traits@0.2.19", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.8.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#strsim@0.11.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#subtle@2.6.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#syn@1.0.109", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#unicode-ident@1.0.23" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#unicode-ident@1.0.23" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#sync_wrapper@1.0.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#synchronoise@1.0.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#crossbeam-queue@0.3.12" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#synstructure@0.13.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#sysinfo@0.38.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#target-lexicon@0.13.4", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tempfile@3.25.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#fastrand@2.3.0", + "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.4.1", + "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "registry+https://github.com/rust-lang/crates.io-index#rustix@1.1.3" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#thiserror-impl@1.0.69", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#thiserror-impl@2.0.18", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#thiserror@1.0.69", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#thiserror-impl@1.0.69" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#thiserror-impl@2.0.18" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#thread_local@1.1.9", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#cfg-if@1.0.4" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#time-core@0.1.8", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#time-macros@0.2.27", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#num-conv@0.2.0", + "registry+https://github.com/rust-lang/crates.io-index#time-core@0.1.8" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#time@0.3.47", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#deranged@0.5.5", + "registry+https://github.com/rust-lang/crates.io-index#itoa@1.0.17", + "registry+https://github.com/rust-lang/crates.io-index#num-conv@0.2.0", + "registry+https://github.com/rust-lang/crates.io-index#powerfmt@0.2.0", + "registry+https://github.com/rust-lang/crates.io-index#serde_core@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#time-core@0.1.8", + "registry+https://github.com/rust-lang/crates.io-index#time-macros@0.2.27" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tinystr@0.8.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#displaydoc@0.2.5", + "registry+https://github.com/rust-lang/crates.io-index#zerovec@0.11.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tinyvec@1.10.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#tinyvec_macros@0.1.1" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tinyvec_macros@0.1.1", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tokio-macros@2.6.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tokio-retry@0.3.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#pin-project@1.1.10", + "registry+https://github.com/rust-lang/crates.io-index#rand@0.8.5", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tokio-rustls@0.26.4", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#rustls@0.23.36", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tokio-util@0.7.18", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-sink@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#libc@0.2.181", + "registry+https://github.com/rust-lang/crates.io-index#mio@1.1.1", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#socket2@0.6.2", + "registry+https://github.com/rust-lang/crates.io-index#tokio-macros@2.6.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tower-http@0.6.8", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bitflags@2.10.0", + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#futures-util@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#http-body@1.0.1", + "registry+https://github.com/rust-lang/crates.io-index#iri-string@0.7.10", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#tower@0.5.3", + "registry+https://github.com/rust-lang/crates.io-index#tower-layer@0.3.3", + "registry+https://github.com/rust-lang/crates.io-index#tower-service@0.3.3" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tower-layer@0.3.3", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tower-service@0.3.3", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tower@0.5.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#futures-core@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#futures-util@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#sync_wrapper@1.0.2", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tower-layer@0.3.3", + "registry+https://github.com/rust-lang/crates.io-index#tower-service@0.3.3", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-appender@0.2.4", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#crossbeam-channel@0.5.15", + "registry+https://github.com/rust-lang/crates.io-index#thiserror@2.0.18", + "registry+https://github.com/rust-lang/crates.io-index#time@0.3.47", + "registry+https://github.com/rust-lang/crates.io-index#tracing-subscriber@0.3.22" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-attributes@0.1.31", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-core@0.1.36", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-log@0.2.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#log@0.4.29", + "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "registry+https://github.com/rust-lang/crates.io-index#tracing-core@0.1.36" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-serde@0.2.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#tracing-core@0.1.36" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tracing-subscriber@0.3.22", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#matchers@0.2.0", + "registry+https://github.com/rust-lang/crates.io-index#nu-ansi-term@0.50.3", + "registry+https://github.com/rust-lang/crates.io-index#once_cell@1.21.3", + "registry+https://github.com/rust-lang/crates.io-index#regex-automata@0.4.14", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "registry+https://github.com/rust-lang/crates.io-index#sharded-slab@0.1.7", + "registry+https://github.com/rust-lang/crates.io-index#smallvec@1.15.1", + "registry+https://github.com/rust-lang/crates.io-index#thread_local@1.1.9", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "registry+https://github.com/rust-lang/crates.io-index#tracing-core@0.1.36", + "registry+https://github.com/rust-lang/crates.io-index#tracing-log@0.2.0", + "registry+https://github.com/rust-lang/crates.io-index#tracing-serde@0.2.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#log@0.4.29", + "registry+https://github.com/rust-lang/crates.io-index#pin-project-lite@0.2.16", + "registry+https://github.com/rust-lang/crates.io-index#tracing-attributes@0.1.31", + "registry+https://github.com/rust-lang/crates.io-index#tracing-core@0.1.36" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#try-lock@0.2.5", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#twox-hash@2.1.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#typenum@1.19.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#typewit@1.14.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#ulid@1.2.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#rand@0.9.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#unicase@2.9.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#unicode-ident@1.0.23", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#unindent@0.2.4", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#untrusted@0.9.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#url@2.5.8", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#form_urlencoded@1.2.2", + "registry+https://github.com/rust-lang/crates.io-index#idna@1.1.0", + "registry+https://github.com/rust-lang/crates.io-index#percent-encoding@2.3.2", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#urlencoding@2.1.3", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#utf8_iter@1.0.4", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#utf8parse@0.2.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#uuid@1.20.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#getrandom@0.3.4" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#version_check@0.9.5", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#walkdir@2.5.0", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#same-file@1.0.6" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#want@0.3.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#try-lock@0.2.5" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#warp@0.4.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bytes@1.11.1", + "registry+https://github.com/rust-lang/crates.io-index#futures-util@0.3.31", + "registry+https://github.com/rust-lang/crates.io-index#headers@0.4.1", + "registry+https://github.com/rust-lang/crates.io-index#http@1.4.0", + "registry+https://github.com/rust-lang/crates.io-index#http-body@1.0.1", + "registry+https://github.com/rust-lang/crates.io-index#http-body-util@0.1.3", + "registry+https://github.com/rust-lang/crates.io-index#hyper@1.8.1", + "registry+https://github.com/rust-lang/crates.io-index#hyper-util@0.1.20", + "registry+https://github.com/rust-lang/crates.io-index#log@0.4.29", + "registry+https://github.com/rust-lang/crates.io-index#mime@0.3.17", + "registry+https://github.com/rust-lang/crates.io-index#mime_guess@2.0.5", + "registry+https://github.com/rust-lang/crates.io-index#percent-encoding@2.3.2", + "registry+https://github.com/rust-lang/crates.io-index#pin-project@1.1.10", + "registry+https://github.com/rust-lang/crates.io-index#scoped-tls@1.0.1", + "registry+https://github.com/rust-lang/crates.io-index#serde@1.0.228", + "registry+https://github.com/rust-lang/crates.io-index#serde_json@1.0.149", + "registry+https://github.com/rust-lang/crates.io-index#serde_urlencoded@0.7.1", + "registry+https://github.com/rust-lang/crates.io-index#tokio@1.49.0", + "registry+https://github.com/rust-lang/crates.io-index#tokio-util@0.7.18", + "registry+https://github.com/rust-lang/crates.io-index#tower-service@0.3.3", + "registry+https://github.com/rust-lang/crates.io-index#tracing@0.1.44" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#whoami@2.1.0", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#wide@0.7.33", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#bytemuck@1.25.0", + "registry+https://github.com/rust-lang/crates.io-index#safe_arch@0.7.4" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#winnow@0.7.14", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#memchr@2.8.0" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#writeable@0.6.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#yoke-derive@0.8.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114", + "registry+https://github.com/rust-lang/crates.io-index#synstructure@0.13.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#yoke@0.8.1", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#stable_deref_trait@1.2.1", + "registry+https://github.com/rust-lang/crates.io-index#yoke-derive@0.8.1", + "registry+https://github.com/rust-lang/crates.io-index#zerofrom@0.1.6" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#zerocopy-derive@0.8.39", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#zerocopy@0.8.39", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#zerocopy-derive@0.8.39" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#zerofrom-derive@0.1.6", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114", + "registry+https://github.com/rust-lang/crates.io-index#synstructure@0.13.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#zerofrom@0.1.6", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#zerofrom-derive@0.1.6" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#zeroize@1.8.2", + "dependsOn": [] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#zerotrie@0.2.3", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#displaydoc@0.2.5", + "registry+https://github.com/rust-lang/crates.io-index#yoke@0.8.1", + "registry+https://github.com/rust-lang/crates.io-index#zerofrom@0.1.6" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#zerovec-derive@0.11.2", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#proc-macro2@1.0.106", + "registry+https://github.com/rust-lang/crates.io-index#quote@1.0.44", + "registry+https://github.com/rust-lang/crates.io-index#syn@2.0.114" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#zerovec@0.11.5", + "dependsOn": [ + "registry+https://github.com/rust-lang/crates.io-index#yoke@0.8.1", + "registry+https://github.com/rust-lang/crates.io-index#zerofrom@0.1.6", + "registry+https://github.com/rust-lang/crates.io-index#zerovec-derive@0.11.2" + ] + }, + { + "ref": "registry+https://github.com/rust-lang/crates.io-index#zmij@1.0.20", + "dependsOn": [] + } + ] +} \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/hf_xet/__init__.py b/venv/lib/python3.10/site-packages/hf_xet/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..96ed54a8a066d681e4973e5841a0f5577b619698 --- /dev/null +++ b/venv/lib/python3.10/site-packages/hf_xet/__init__.py @@ -0,0 +1,5 @@ +from .hf_xet import * + +__doc__ = hf_xet.__doc__ +if hasattr(hf_xet, "__all__"): + __all__ = hf_xet.__all__ \ No newline at end of file diff --git a/venv/lib/python3.10/site-packages/hf_xet/__pycache__/__init__.cpython-310.pyc b/venv/lib/python3.10/site-packages/hf_xet/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97adc22a1cd34226e9ea4abba6d222c454c635e7 Binary files /dev/null and b/venv/lib/python3.10/site-packages/hf_xet/__pycache__/__init__.cpython-310.pyc differ diff --git a/venv/lib/python3.10/site-packages/httpcore/__init__.py b/venv/lib/python3.10/site-packages/httpcore/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9a92dc4a440bdf6f259ec1083c89c817eb7b631b --- /dev/null +++ b/venv/lib/python3.10/site-packages/httpcore/__init__.py @@ -0,0 +1,141 @@ +from ._api import request, stream +from ._async import ( + AsyncConnectionInterface, + AsyncConnectionPool, + AsyncHTTP2Connection, + AsyncHTTP11Connection, + AsyncHTTPConnection, + AsyncHTTPProxy, + AsyncSOCKSProxy, +) +from ._backends.base import ( + SOCKET_OPTION, + AsyncNetworkBackend, + AsyncNetworkStream, + NetworkBackend, + NetworkStream, +) +from ._backends.mock import AsyncMockBackend, AsyncMockStream, MockBackend, MockStream +from ._backends.sync import SyncBackend +from ._exceptions import ( + ConnectError, + ConnectionNotAvailable, + ConnectTimeout, + LocalProtocolError, + NetworkError, + PoolTimeout, + ProtocolError, + ProxyError, + ReadError, + ReadTimeout, + RemoteProtocolError, + TimeoutException, + UnsupportedProtocol, + WriteError, + WriteTimeout, +) +from ._models import URL, Origin, Proxy, Request, Response +from ._ssl import default_ssl_context +from ._sync import ( + ConnectionInterface, + ConnectionPool, + HTTP2Connection, + HTTP11Connection, + HTTPConnection, + HTTPProxy, + SOCKSProxy, +) + +# The 'httpcore.AnyIOBackend' class is conditional on 'anyio' being installed. +try: + from ._backends.anyio import AnyIOBackend +except ImportError: # pragma: nocover + + class AnyIOBackend: # type: ignore + def __init__(self, *args, **kwargs): # type: ignore + msg = ( + "Attempted to use 'httpcore.AnyIOBackend' but 'anyio' is not installed." + ) + raise RuntimeError(msg) + + +# The 'httpcore.TrioBackend' class is conditional on 'trio' being installed. +try: + from ._backends.trio import TrioBackend +except ImportError: # pragma: nocover + + class TrioBackend: # type: ignore + def __init__(self, *args, **kwargs): # type: ignore + msg = "Attempted to use 'httpcore.TrioBackend' but 'trio' is not installed." + raise RuntimeError(msg) + + +__all__ = [ + # top-level requests + "request", + "stream", + # models + "Origin", + "URL", + "Request", + "Response", + "Proxy", + # async + "AsyncHTTPConnection", + "AsyncConnectionPool", + "AsyncHTTPProxy", + "AsyncHTTP11Connection", + "AsyncHTTP2Connection", + "AsyncConnectionInterface", + "AsyncSOCKSProxy", + # sync + "HTTPConnection", + "ConnectionPool", + "HTTPProxy", + "HTTP11Connection", + "HTTP2Connection", + "ConnectionInterface", + "SOCKSProxy", + # network backends, implementations + "SyncBackend", + "AnyIOBackend", + "TrioBackend", + # network backends, mock implementations + "AsyncMockBackend", + "AsyncMockStream", + "MockBackend", + "MockStream", + # network backends, interface + "AsyncNetworkStream", + "AsyncNetworkBackend", + "NetworkStream", + "NetworkBackend", + # util + "default_ssl_context", + "SOCKET_OPTION", + # exceptions + "ConnectionNotAvailable", + "ProxyError", + "ProtocolError", + "LocalProtocolError", + "RemoteProtocolError", + "UnsupportedProtocol", + "TimeoutException", + "PoolTimeout", + "ConnectTimeout", + "ReadTimeout", + "WriteTimeout", + "NetworkError", + "ConnectError", + "ReadError", + "WriteError", +] + +__version__ = "1.0.9" + + +__locals = locals() +for __name in __all__: + # Exclude SOCKET_OPTION, it causes AttributeError on Python 3.14 + if not __name.startswith(("__", "SOCKET_OPTION")): + setattr(__locals[__name], "__module__", "httpcore") # noqa diff --git a/venv/lib/python3.10/site-packages/httpcore/_api.py b/venv/lib/python3.10/site-packages/httpcore/_api.py new file mode 100644 index 0000000000000000000000000000000000000000..38b961d10de88bebc98c758d0d1f14af1e7c0370 --- /dev/null +++ b/venv/lib/python3.10/site-packages/httpcore/_api.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +import contextlib +import typing + +from ._models import URL, Extensions, HeaderTypes, Response +from ._sync.connection_pool import ConnectionPool + + +def request( + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes | typing.Iterator[bytes] | None = None, + extensions: Extensions | None = None, +) -> Response: + """ + Sends an HTTP request, returning the response. + + ``` + response = httpcore.request("GET", "https://www.example.com/") + ``` + + Arguments: + method: The HTTP method for the request. Typically one of `"GET"`, + `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`. + url: The URL of the HTTP request. Either as an instance of `httpcore.URL`, + or as str/bytes. + headers: The HTTP request headers. Either as a dictionary of str/bytes, + or as a list of two-tuples of str/bytes. + content: The content of the request body. Either as bytes, + or as a bytes iterator. + extensions: A dictionary of optional extra information included on the request. + Possible keys include `"timeout"`. + + Returns: + An instance of `httpcore.Response`. + """ + with ConnectionPool() as pool: + return pool.request( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) + + +@contextlib.contextmanager +def stream( + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes | typing.Iterator[bytes] | None = None, + extensions: Extensions | None = None, +) -> typing.Iterator[Response]: + """ + Sends an HTTP request, returning the response within a content manager. + + ``` + with httpcore.stream("GET", "https://www.example.com/") as response: + ... + ``` + + When using the `stream()` function, the body of the response will not be + automatically read. If you want to access the response body you should + either use `content = response.read()`, or `for chunk in response.iter_content()`. + + Arguments: + method: The HTTP method for the request. Typically one of `"GET"`, + `"OPTIONS"`, `"HEAD"`, `"POST"`, `"PUT"`, `"PATCH"`, or `"DELETE"`. + url: The URL of the HTTP request. Either as an instance of `httpcore.URL`, + or as str/bytes. + headers: The HTTP request headers. Either as a dictionary of str/bytes, + or as a list of two-tuples of str/bytes. + content: The content of the request body. Either as bytes, + or as a bytes iterator. + extensions: A dictionary of optional extra information included on the request. + Possible keys include `"timeout"`. + + Returns: + An instance of `httpcore.Response`. + """ + with ConnectionPool() as pool: + with pool.stream( + method=method, + url=url, + headers=headers, + content=content, + extensions=extensions, + ) as response: + yield response diff --git a/venv/lib/python3.10/site-packages/httpcore/_exceptions.py b/venv/lib/python3.10/site-packages/httpcore/_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..bc28d44f55bdc4b872951a74780469a3999d9ab4 --- /dev/null +++ b/venv/lib/python3.10/site-packages/httpcore/_exceptions.py @@ -0,0 +1,81 @@ +import contextlib +import typing + +ExceptionMapping = typing.Mapping[typing.Type[Exception], typing.Type[Exception]] + + +@contextlib.contextmanager +def map_exceptions(map: ExceptionMapping) -> typing.Iterator[None]: + try: + yield + except Exception as exc: # noqa: PIE786 + for from_exc, to_exc in map.items(): + if isinstance(exc, from_exc): + raise to_exc(exc) from exc + raise # pragma: nocover + + +class ConnectionNotAvailable(Exception): + pass + + +class ProxyError(Exception): + pass + + +class UnsupportedProtocol(Exception): + pass + + +class ProtocolError(Exception): + pass + + +class RemoteProtocolError(ProtocolError): + pass + + +class LocalProtocolError(ProtocolError): + pass + + +# Timeout errors + + +class TimeoutException(Exception): + pass + + +class PoolTimeout(TimeoutException): + pass + + +class ConnectTimeout(TimeoutException): + pass + + +class ReadTimeout(TimeoutException): + pass + + +class WriteTimeout(TimeoutException): + pass + + +# Network errors + + +class NetworkError(Exception): + pass + + +class ConnectError(NetworkError): + pass + + +class ReadError(NetworkError): + pass + + +class WriteError(NetworkError): + pass diff --git a/venv/lib/python3.10/site-packages/httpcore/_models.py b/venv/lib/python3.10/site-packages/httpcore/_models.py new file mode 100644 index 0000000000000000000000000000000000000000..8a65f13347d6621289a166d08123cbc8e1ad0157 --- /dev/null +++ b/venv/lib/python3.10/site-packages/httpcore/_models.py @@ -0,0 +1,516 @@ +from __future__ import annotations + +import base64 +import ssl +import typing +import urllib.parse + +# Functions for typechecking... + + +ByteOrStr = typing.Union[bytes, str] +HeadersAsSequence = typing.Sequence[typing.Tuple[ByteOrStr, ByteOrStr]] +HeadersAsMapping = typing.Mapping[ByteOrStr, ByteOrStr] +HeaderTypes = typing.Union[HeadersAsSequence, HeadersAsMapping, None] + +Extensions = typing.MutableMapping[str, typing.Any] + + +def enforce_bytes(value: bytes | str, *, name: str) -> bytes: + """ + Any arguments that are ultimately represented as bytes can be specified + either as bytes or as strings. + + However we enforce that any string arguments must only contain characters in + the plain ASCII range. chr(0)...chr(127). If you need to use characters + outside that range then be precise, and use a byte-wise argument. + """ + if isinstance(value, str): + try: + return value.encode("ascii") + except UnicodeEncodeError: + raise TypeError(f"{name} strings may not include unicode characters.") + elif isinstance(value, bytes): + return value + + seen_type = type(value).__name__ + raise TypeError(f"{name} must be bytes or str, but got {seen_type}.") + + +def enforce_url(value: URL | bytes | str, *, name: str) -> URL: + """ + Type check for URL parameters. + """ + if isinstance(value, (bytes, str)): + return URL(value) + elif isinstance(value, URL): + return value + + seen_type = type(value).__name__ + raise TypeError(f"{name} must be a URL, bytes, or str, but got {seen_type}.") + + +def enforce_headers( + value: HeadersAsMapping | HeadersAsSequence | None = None, *, name: str +) -> list[tuple[bytes, bytes]]: + """ + Convienence function that ensure all items in request or response headers + are either bytes or strings in the plain ASCII range. + """ + if value is None: + return [] + elif isinstance(value, typing.Mapping): + return [ + ( + enforce_bytes(k, name="header name"), + enforce_bytes(v, name="header value"), + ) + for k, v in value.items() + ] + elif isinstance(value, typing.Sequence): + return [ + ( + enforce_bytes(k, name="header name"), + enforce_bytes(v, name="header value"), + ) + for k, v in value + ] + + seen_type = type(value).__name__ + raise TypeError( + f"{name} must be a mapping or sequence of two-tuples, but got {seen_type}." + ) + + +def enforce_stream( + value: bytes | typing.Iterable[bytes] | typing.AsyncIterable[bytes] | None, + *, + name: str, +) -> typing.Iterable[bytes] | typing.AsyncIterable[bytes]: + if value is None: + return ByteStream(b"") + elif isinstance(value, bytes): + return ByteStream(value) + return value + + +# * https://tools.ietf.org/html/rfc3986#section-3.2.3 +# * https://url.spec.whatwg.org/#url-miscellaneous +# * https://url.spec.whatwg.org/#scheme-state +DEFAULT_PORTS = { + b"ftp": 21, + b"http": 80, + b"https": 443, + b"ws": 80, + b"wss": 443, +} + + +def include_request_headers( + headers: list[tuple[bytes, bytes]], + *, + url: "URL", + content: None | bytes | typing.Iterable[bytes] | typing.AsyncIterable[bytes], +) -> list[tuple[bytes, bytes]]: + headers_set = set(k.lower() for k, v in headers) + + if b"host" not in headers_set: + default_port = DEFAULT_PORTS.get(url.scheme) + if url.port is None or url.port == default_port: + header_value = url.host + else: + header_value = b"%b:%d" % (url.host, url.port) + headers = [(b"Host", header_value)] + headers + + if ( + content is not None + and b"content-length" not in headers_set + and b"transfer-encoding" not in headers_set + ): + if isinstance(content, bytes): + content_length = str(len(content)).encode("ascii") + headers += [(b"Content-Length", content_length)] + else: + headers += [(b"Transfer-Encoding", b"chunked")] # pragma: nocover + + return headers + + +# Interfaces for byte streams... + + +class ByteStream: + """ + A container for non-streaming content, and that supports both sync and async + stream iteration. + """ + + def __init__(self, content: bytes) -> None: + self._content = content + + def __iter__(self) -> typing.Iterator[bytes]: + yield self._content + + async def __aiter__(self) -> typing.AsyncIterator[bytes]: + yield self._content + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{len(self._content)} bytes]>" + + +class Origin: + def __init__(self, scheme: bytes, host: bytes, port: int) -> None: + self.scheme = scheme + self.host = host + self.port = port + + def __eq__(self, other: typing.Any) -> bool: + return ( + isinstance(other, Origin) + and self.scheme == other.scheme + and self.host == other.host + and self.port == other.port + ) + + def __str__(self) -> str: + scheme = self.scheme.decode("ascii") + host = self.host.decode("ascii") + port = str(self.port) + return f"{scheme}://{host}:{port}" + + +class URL: + """ + Represents the URL against which an HTTP request may be made. + + The URL may either be specified as a plain string, for convienence: + + ```python + url = httpcore.URL("https://www.example.com/") + ``` + + Or be constructed with explicitily pre-parsed components: + + ```python + url = httpcore.URL(scheme=b'https', host=b'www.example.com', port=None, target=b'/') + ``` + + Using this second more explicit style allows integrations that are using + `httpcore` to pass through URLs that have already been parsed in order to use + libraries such as `rfc-3986` rather than relying on the stdlib. It also ensures + that URL parsing is treated identically at both the networking level and at any + higher layers of abstraction. + + The four components are important here, as they allow the URL to be precisely + specified in a pre-parsed format. They also allow certain types of request to + be created that could not otherwise be expressed. + + For example, an HTTP request to `http://www.example.com/` forwarded via a proxy + at `http://localhost:8080`... + + ```python + # Constructs an HTTP request with a complete URL as the target: + # GET https://www.example.com/ HTTP/1.1 + url = httpcore.URL( + scheme=b'http', + host=b'localhost', + port=8080, + target=b'https://www.example.com/' + ) + request = httpcore.Request( + method="GET", + url=url + ) + ``` + + Another example is constructing an `OPTIONS *` request... + + ```python + # Constructs an 'OPTIONS *' HTTP request: + # OPTIONS * HTTP/1.1 + url = httpcore.URL(scheme=b'https', host=b'www.example.com', target=b'*') + request = httpcore.Request(method="OPTIONS", url=url) + ``` + + This kind of request is not possible to formulate with a URL string, + because the `/` delimiter is always used to demark the target from the + host/port portion of the URL. + + For convenience, string-like arguments may be specified either as strings or + as bytes. However, once a request is being issue over-the-wire, the URL + components are always ultimately required to be a bytewise representation. + + In order to avoid any ambiguity over character encodings, when strings are used + as arguments, they must be strictly limited to the ASCII range `chr(0)`-`chr(127)`. + If you require a bytewise representation that is outside this range you must + handle the character encoding directly, and pass a bytes instance. + """ + + def __init__( + self, + url: bytes | str = "", + *, + scheme: bytes | str = b"", + host: bytes | str = b"", + port: int | None = None, + target: bytes | str = b"", + ) -> None: + """ + Parameters: + url: The complete URL as a string or bytes. + scheme: The URL scheme as a string or bytes. + Typically either `"http"` or `"https"`. + host: The URL host as a string or bytes. Such as `"www.example.com"`. + port: The port to connect to. Either an integer or `None`. + target: The target of the HTTP request. Such as `"/items?search=red"`. + """ + if url: + parsed = urllib.parse.urlparse(enforce_bytes(url, name="url")) + self.scheme = parsed.scheme + self.host = parsed.hostname or b"" + self.port = parsed.port + self.target = (parsed.path or b"/") + ( + b"?" + parsed.query if parsed.query else b"" + ) + else: + self.scheme = enforce_bytes(scheme, name="scheme") + self.host = enforce_bytes(host, name="host") + self.port = port + self.target = enforce_bytes(target, name="target") + + @property + def origin(self) -> Origin: + default_port = { + b"http": 80, + b"https": 443, + b"ws": 80, + b"wss": 443, + b"socks5": 1080, + b"socks5h": 1080, + }[self.scheme] + return Origin( + scheme=self.scheme, host=self.host, port=self.port or default_port + ) + + def __eq__(self, other: typing.Any) -> bool: + return ( + isinstance(other, URL) + and other.scheme == self.scheme + and other.host == self.host + and other.port == self.port + and other.target == self.target + ) + + def __bytes__(self) -> bytes: + if self.port is None: + return b"%b://%b%b" % (self.scheme, self.host, self.target) + return b"%b://%b:%d%b" % (self.scheme, self.host, self.port, self.target) + + def __repr__(self) -> str: + return ( + f"{self.__class__.__name__}(scheme={self.scheme!r}, " + f"host={self.host!r}, port={self.port!r}, target={self.target!r})" + ) + + +class Request: + """ + An HTTP request. + """ + + def __init__( + self, + method: bytes | str, + url: URL | bytes | str, + *, + headers: HeaderTypes = None, + content: bytes + | typing.Iterable[bytes] + | typing.AsyncIterable[bytes] + | None = None, + extensions: Extensions | None = None, + ) -> None: + """ + Parameters: + method: The HTTP request method, either as a string or bytes. + For example: `GET`. + url: The request URL, either as a `URL` instance, or as a string or bytes. + For example: `"https://www.example.com".` + headers: The HTTP request headers. + content: The content of the request body. + extensions: A dictionary of optional extra information included on + the request. Possible keys include `"timeout"`, and `"trace"`. + """ + self.method: bytes = enforce_bytes(method, name="method") + self.url: URL = enforce_url(url, name="url") + self.headers: list[tuple[bytes, bytes]] = enforce_headers( + headers, name="headers" + ) + self.stream: typing.Iterable[bytes] | typing.AsyncIterable[bytes] = ( + enforce_stream(content, name="content") + ) + self.extensions = {} if extensions is None else extensions + + if "target" in self.extensions: + self.url = URL( + scheme=self.url.scheme, + host=self.url.host, + port=self.url.port, + target=self.extensions["target"], + ) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.method!r}]>" + + +class Response: + """ + An HTTP response. + """ + + def __init__( + self, + status: int, + *, + headers: HeaderTypes = None, + content: bytes + | typing.Iterable[bytes] + | typing.AsyncIterable[bytes] + | None = None, + extensions: Extensions | None = None, + ) -> None: + """ + Parameters: + status: The HTTP status code of the response. For example `200`. + headers: The HTTP response headers. + content: The content of the response body. + extensions: A dictionary of optional extra information included on + the responseself.Possible keys include `"http_version"`, + `"reason_phrase"`, and `"network_stream"`. + """ + self.status: int = status + self.headers: list[tuple[bytes, bytes]] = enforce_headers( + headers, name="headers" + ) + self.stream: typing.Iterable[bytes] | typing.AsyncIterable[bytes] = ( + enforce_stream(content, name="content") + ) + self.extensions = {} if extensions is None else extensions + + self._stream_consumed = False + + @property + def content(self) -> bytes: + if not hasattr(self, "_content"): + if isinstance(self.stream, typing.Iterable): + raise RuntimeError( + "Attempted to access 'response.content' on a streaming response. " + "Call 'response.read()' first." + ) + else: + raise RuntimeError( + "Attempted to access 'response.content' on a streaming response. " + "Call 'await response.aread()' first." + ) + return self._content + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} [{self.status}]>" + + # Sync interface... + + def read(self) -> bytes: + if not isinstance(self.stream, typing.Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to read an asynchronous response using 'response.read()'. " + "You should use 'await response.aread()' instead." + ) + if not hasattr(self, "_content"): + self._content = b"".join([part for part in self.iter_stream()]) + return self._content + + def iter_stream(self) -> typing.Iterator[bytes]: + if not isinstance(self.stream, typing.Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to stream an asynchronous response using 'for ... in " + "response.iter_stream()'. " + "You should use 'async for ... in response.aiter_stream()' instead." + ) + if self._stream_consumed: + raise RuntimeError( + "Attempted to call 'for ... in response.iter_stream()' more than once." + ) + self._stream_consumed = True + for chunk in self.stream: + yield chunk + + def close(self) -> None: + if not isinstance(self.stream, typing.Iterable): # pragma: nocover + raise RuntimeError( + "Attempted to close an asynchronous response using 'response.close()'. " + "You should use 'await response.aclose()' instead." + ) + if hasattr(self.stream, "close"): + self.stream.close() + + # Async interface... + + async def aread(self) -> bytes: + if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to read an synchronous response using " + "'await response.aread()'. " + "You should use 'response.read()' instead." + ) + if not hasattr(self, "_content"): + self._content = b"".join([part async for part in self.aiter_stream()]) + return self._content + + async def aiter_stream(self) -> typing.AsyncIterator[bytes]: + if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to stream an synchronous response using 'async for ... in " + "response.aiter_stream()'. " + "You should use 'for ... in response.iter_stream()' instead." + ) + if self._stream_consumed: + raise RuntimeError( + "Attempted to call 'async for ... in response.aiter_stream()' " + "more than once." + ) + self._stream_consumed = True + async for chunk in self.stream: + yield chunk + + async def aclose(self) -> None: + if not isinstance(self.stream, typing.AsyncIterable): # pragma: nocover + raise RuntimeError( + "Attempted to close a synchronous response using " + "'await response.aclose()'. " + "You should use 'response.close()' instead." + ) + if hasattr(self.stream, "aclose"): + await self.stream.aclose() + + +class Proxy: + def __init__( + self, + url: URL | bytes | str, + auth: tuple[bytes | str, bytes | str] | None = None, + headers: HeadersAsMapping | HeadersAsSequence | None = None, + ssl_context: ssl.SSLContext | None = None, + ): + self.url = enforce_url(url, name="url") + self.headers = enforce_headers(headers, name="headers") + self.ssl_context = ssl_context + + if auth is not None: + username = enforce_bytes(auth[0], name="auth") + password = enforce_bytes(auth[1], name="auth") + userpass = username + b":" + password + authorization = b"Basic " + base64.b64encode(userpass) + self.auth: tuple[bytes, bytes] | None = (username, password) + self.headers = [(b"Proxy-Authorization", authorization)] + self.headers + else: + self.auth = None diff --git a/venv/lib/python3.10/site-packages/httpcore/_ssl.py b/venv/lib/python3.10/site-packages/httpcore/_ssl.py new file mode 100644 index 0000000000000000000000000000000000000000..c99c5a67945b8a3a3544d481e979c791ab45fe23 --- /dev/null +++ b/venv/lib/python3.10/site-packages/httpcore/_ssl.py @@ -0,0 +1,9 @@ +import ssl + +import certifi + + +def default_ssl_context() -> ssl.SSLContext: + context = ssl.create_default_context() + context.load_verify_locations(certifi.where()) + return context diff --git a/venv/lib/python3.10/site-packages/httpcore/_synchronization.py b/venv/lib/python3.10/site-packages/httpcore/_synchronization.py new file mode 100644 index 0000000000000000000000000000000000000000..2ecc9e9c363e2f16c4f934cf41cf871826d6a495 --- /dev/null +++ b/venv/lib/python3.10/site-packages/httpcore/_synchronization.py @@ -0,0 +1,318 @@ +from __future__ import annotations + +import threading +import types + +from ._exceptions import ExceptionMapping, PoolTimeout, map_exceptions + +# Our async synchronization primatives use either 'anyio' or 'trio' depending +# on if they're running under asyncio or trio. + +try: + import trio +except (ImportError, NotImplementedError): # pragma: nocover + trio = None # type: ignore + +try: + import anyio +except ImportError: # pragma: nocover + anyio = None # type: ignore + + +def current_async_library() -> str: + # Determine if we're running under trio or asyncio. + # See https://sniffio.readthedocs.io/en/latest/ + try: + import sniffio + except ImportError: # pragma: nocover + environment = "asyncio" + else: + environment = sniffio.current_async_library() + + if environment not in ("asyncio", "trio"): # pragma: nocover + raise RuntimeError("Running under an unsupported async environment.") + + if environment == "asyncio" and anyio is None: # pragma: nocover + raise RuntimeError( + "Running with asyncio requires installation of 'httpcore[asyncio]'." + ) + + if environment == "trio" and trio is None: # pragma: nocover + raise RuntimeError( + "Running with trio requires installation of 'httpcore[trio]'." + ) + + return environment + + +class AsyncLock: + """ + This is a standard lock. + + In the sync case `Lock` provides thread locking. + In the async case `AsyncLock` provides async locking. + """ + + def __init__(self) -> None: + self._backend = "" + + def setup(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a lock with the correct implementation. + """ + self._backend = current_async_library() + if self._backend == "trio": + self._trio_lock = trio.Lock() + elif self._backend == "asyncio": + self._anyio_lock = anyio.Lock() + + async def __aenter__(self) -> AsyncLock: + if not self._backend: + self.setup() + + if self._backend == "trio": + await self._trio_lock.acquire() + elif self._backend == "asyncio": + await self._anyio_lock.acquire() + + return self + + async def __aexit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + if self._backend == "trio": + self._trio_lock.release() + elif self._backend == "asyncio": + self._anyio_lock.release() + + +class AsyncThreadLock: + """ + This is a threading-only lock for no-I/O contexts. + + In the sync case `ThreadLock` provides thread locking. + In the async case `AsyncThreadLock` is a no-op. + """ + + def __enter__(self) -> AsyncThreadLock: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + pass + + +class AsyncEvent: + def __init__(self) -> None: + self._backend = "" + + def setup(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a lock with the correct implementation. + """ + self._backend = current_async_library() + if self._backend == "trio": + self._trio_event = trio.Event() + elif self._backend == "asyncio": + self._anyio_event = anyio.Event() + + def set(self) -> None: + if not self._backend: + self.setup() + + if self._backend == "trio": + self._trio_event.set() + elif self._backend == "asyncio": + self._anyio_event.set() + + async def wait(self, timeout: float | None = None) -> None: + if not self._backend: + self.setup() + + if self._backend == "trio": + trio_exc_map: ExceptionMapping = {trio.TooSlowError: PoolTimeout} + timeout_or_inf = float("inf") if timeout is None else timeout + with map_exceptions(trio_exc_map): + with trio.fail_after(timeout_or_inf): + await self._trio_event.wait() + elif self._backend == "asyncio": + anyio_exc_map: ExceptionMapping = {TimeoutError: PoolTimeout} + with map_exceptions(anyio_exc_map): + with anyio.fail_after(timeout): + await self._anyio_event.wait() + + +class AsyncSemaphore: + def __init__(self, bound: int) -> None: + self._bound = bound + self._backend = "" + + def setup(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a semaphore with the correct implementation. + """ + self._backend = current_async_library() + if self._backend == "trio": + self._trio_semaphore = trio.Semaphore( + initial_value=self._bound, max_value=self._bound + ) + elif self._backend == "asyncio": + self._anyio_semaphore = anyio.Semaphore( + initial_value=self._bound, max_value=self._bound + ) + + async def acquire(self) -> None: + if not self._backend: + self.setup() + + if self._backend == "trio": + await self._trio_semaphore.acquire() + elif self._backend == "asyncio": + await self._anyio_semaphore.acquire() + + async def release(self) -> None: + if self._backend == "trio": + self._trio_semaphore.release() + elif self._backend == "asyncio": + self._anyio_semaphore.release() + + +class AsyncShieldCancellation: + # For certain portions of our codebase where we're dealing with + # closing connections during exception handling we want to shield + # the operation from being cancelled. + # + # with AsyncShieldCancellation(): + # ... # clean-up operations, shielded from cancellation. + + def __init__(self) -> None: + """ + Detect if we're running under 'asyncio' or 'trio' and create + a shielded scope with the correct implementation. + """ + self._backend = current_async_library() + + if self._backend == "trio": + self._trio_shield = trio.CancelScope(shield=True) + elif self._backend == "asyncio": + self._anyio_shield = anyio.CancelScope(shield=True) + + def __enter__(self) -> AsyncShieldCancellation: + if self._backend == "trio": + self._trio_shield.__enter__() + elif self._backend == "asyncio": + self._anyio_shield.__enter__() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + if self._backend == "trio": + self._trio_shield.__exit__(exc_type, exc_value, traceback) + elif self._backend == "asyncio": + self._anyio_shield.__exit__(exc_type, exc_value, traceback) + + +# Our thread-based synchronization primitives... + + +class Lock: + """ + This is a standard lock. + + In the sync case `Lock` provides thread locking. + In the async case `AsyncLock` provides async locking. + """ + + def __init__(self) -> None: + self._lock = threading.Lock() + + def __enter__(self) -> Lock: + self._lock.acquire() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + self._lock.release() + + +class ThreadLock: + """ + This is a threading-only lock for no-I/O contexts. + + In the sync case `ThreadLock` provides thread locking. + In the async case `AsyncThreadLock` is a no-op. + """ + + def __init__(self) -> None: + self._lock = threading.Lock() + + def __enter__(self) -> ThreadLock: + self._lock.acquire() + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + self._lock.release() + + +class Event: + def __init__(self) -> None: + self._event = threading.Event() + + def set(self) -> None: + self._event.set() + + def wait(self, timeout: float | None = None) -> None: + if timeout == float("inf"): # pragma: no cover + timeout = None + if not self._event.wait(timeout=timeout): + raise PoolTimeout() # pragma: nocover + + +class Semaphore: + def __init__(self, bound: int) -> None: + self._semaphore = threading.Semaphore(value=bound) + + def acquire(self) -> None: + self._semaphore.acquire() + + def release(self) -> None: + self._semaphore.release() + + +class ShieldCancellation: + # Thread-synchronous codebases don't support cancellation semantics. + # We have this class because we need to mirror the async and sync + # cases within our package, but it's just a no-op. + def __enter__(self) -> ShieldCancellation: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None = None, + exc_value: BaseException | None = None, + traceback: types.TracebackType | None = None, + ) -> None: + pass