diff --git a/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1905f9a47070070020aecac07b637784e791f969
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/__init__.py
@@ -0,0 +1,23 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""oauthlib integration for Google Auth
+
+This library provides `oauthlib `__
+integration with `google-auth `__.
+"""
+
+from .interactive import get_user_credentials
+
+__all__ = ["get_user_credentials"]
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/flow.py b/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/flow.py
new file mode 100644
index 0000000000000000000000000000000000000000..e564ca4302645209e49089923e494c1afd718fb3
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/flow.py
@@ -0,0 +1,507 @@
+# Copyright 2016 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""OAuth 2.0 Authorization Flow
+
+This module provides integration with `requests-oauthlib`_ for running the
+`OAuth 2.0 Authorization Flow`_ and acquiring user credentials. See
+`Using OAuth 2.0 to Access Google APIs`_ for an overview of OAuth 2.0
+authorization scenarios Google APIs support.
+
+Here's an example of using :class:`InstalledAppFlow`::
+
+ from google_auth_oauthlib.flow import InstalledAppFlow
+
+ # Create the flow using the client secrets file from the Google API
+ # Console.
+ flow = InstalledAppFlow.from_client_secrets_file(
+ 'client_secrets.json',
+ scopes=['profile', 'email'])
+
+ flow.run_local_server()
+
+ # You can use flow.credentials, or you can just get a requests session
+ # using flow.authorized_session.
+ session = flow.authorized_session()
+
+ profile_info = session.get(
+ 'https://www.googleapis.com/userinfo/v2/me').json()
+
+ print(profile_info)
+ # {'name': '...', 'email': '...', ...}
+
+.. _requests-oauthlib: http://requests-oauthlib.readthedocs.io/en/latest/
+.. _OAuth 2.0 Authorization Flow:
+ https://tools.ietf.org/html/rfc6749#section-1.2
+.. _Using OAuth 2.0 to Access Google APIs:
+ https://developers.google.com/identity/protocols/oauth2
+
+"""
+from base64 import urlsafe_b64encode
+import hashlib
+import json
+import logging
+
+try:
+ from secrets import SystemRandom
+except ImportError: # pragma: NO COVER
+ from random import SystemRandom
+from string import ascii_letters, digits
+import webbrowser
+import wsgiref.simple_server
+import wsgiref.util
+
+import google.auth.transport.requests
+import google.oauth2.credentials
+
+import google_auth_oauthlib.helpers
+
+
+_LOGGER = logging.getLogger(__name__)
+
+
+class Flow(object):
+ """OAuth 2.0 Authorization Flow
+
+ This class uses a :class:`requests_oauthlib.OAuth2Session` instance at
+ :attr:`oauth2session` to perform all of the OAuth 2.0 logic. This class
+ just provides convenience methods and sane defaults for doing Google's
+ particular flavors of OAuth 2.0.
+
+ Typically you'll construct an instance of this flow using
+ :meth:`from_client_secrets_file` and a `client secrets file`_ obtained
+ from the `Google API Console`_.
+
+ .. _client secrets file:
+ https://developers.google.com/identity/protocols/oauth2/web-server
+ #creatingcred
+ .. _Google API Console:
+ https://console.developers.google.com/apis/credentials
+ """
+
+ def __init__(
+ self,
+ oauth2session,
+ client_type,
+ client_config,
+ redirect_uri=None,
+ code_verifier=None,
+ autogenerate_code_verifier=True,
+ ):
+ """
+ Args:
+ oauth2session (requests_oauthlib.OAuth2Session):
+ The OAuth 2.0 session from ``requests-oauthlib``.
+ client_type (str): The client type, either ``web`` or
+ ``installed``.
+ client_config (Mapping[str, Any]): The client
+ configuration in the Google `client secrets`_ format.
+ redirect_uri (str): The OAuth 2.0 redirect URI if known at flow
+ creation time. Otherwise, it will need to be set using
+ :attr:`redirect_uri`.
+ code_verifier (str): random string of 43-128 chars used to verify
+ the key exchange.using PKCE.
+ autogenerate_code_verifier (bool): If true, auto-generate a
+ code_verifier.
+ .. _client secrets:
+ https://github.com/googleapis/google-api-python-client/blob
+ /main/docs/client-secrets.md
+ """
+ self.client_type = client_type
+ """str: The client type, either ``'web'`` or ``'installed'``"""
+ self.client_config = client_config[client_type]
+ """Mapping[str, Any]: The OAuth 2.0 client configuration."""
+ self.oauth2session = oauth2session
+ """requests_oauthlib.OAuth2Session: The OAuth 2.0 session."""
+ self.redirect_uri = redirect_uri
+ self.code_verifier = code_verifier
+ self.autogenerate_code_verifier = autogenerate_code_verifier
+
+ @classmethod
+ def from_client_config(cls, client_config, scopes, **kwargs):
+ """Creates a :class:`requests_oauthlib.OAuth2Session` from client
+ configuration loaded from a Google-format client secrets file.
+
+ Args:
+ client_config (Mapping[str, Any]): The client
+ configuration in the Google `client secrets`_ format.
+ scopes (Sequence[str]): The list of scopes to request during the
+ flow.
+ kwargs: Any additional parameters passed to
+ :class:`requests_oauthlib.OAuth2Session`
+
+ Returns:
+ Flow: The constructed Flow instance.
+
+ Raises:
+ ValueError: If the client configuration is not in the correct
+ format.
+
+ .. _client secrets:
+ https://github.com/googleapis/google-api-python-client/blob/main/docs/client-secrets.md
+ """
+ if "web" in client_config:
+ client_type = "web"
+ elif "installed" in client_config:
+ client_type = "installed"
+ else:
+ raise ValueError("Client secrets must be for a web or installed app.")
+
+ # these args cannot be passed to requests_oauthlib.OAuth2Session
+ code_verifier = kwargs.pop("code_verifier", None)
+ autogenerate_code_verifier = kwargs.pop("autogenerate_code_verifier", None)
+
+ (
+ session,
+ client_config,
+ ) = google_auth_oauthlib.helpers.session_from_client_config(
+ client_config, scopes, **kwargs
+ )
+
+ redirect_uri = kwargs.get("redirect_uri", None)
+
+ return cls(
+ session,
+ client_type,
+ client_config,
+ redirect_uri,
+ code_verifier,
+ autogenerate_code_verifier,
+ )
+
+ @classmethod
+ def from_client_secrets_file(cls, client_secrets_file, scopes, **kwargs):
+ """Creates a :class:`Flow` instance from a Google client secrets file.
+
+ Args:
+ client_secrets_file (str): The path to the client secrets .json
+ file.
+ scopes (Sequence[str]): The list of scopes to request during the
+ flow.
+ kwargs: Any additional parameters passed to
+ :class:`requests_oauthlib.OAuth2Session`
+
+ Returns:
+ Flow: The constructed Flow instance.
+ """
+ with open(client_secrets_file, "r") as json_file:
+ client_config = json.load(json_file)
+
+ return cls.from_client_config(client_config, scopes=scopes, **kwargs)
+
+ @property
+ def redirect_uri(self):
+ """The OAuth 2.0 redirect URI. Pass-through to
+ ``self.oauth2session.redirect_uri``."""
+ return self.oauth2session.redirect_uri
+
+ @redirect_uri.setter
+ def redirect_uri(self, value):
+ """The OAuth 2.0 redirect URI. Pass-through to
+ ``self.oauth2session.redirect_uri``."""
+ self.oauth2session.redirect_uri = value
+
+ def authorization_url(self, **kwargs):
+ """Generates an authorization URL.
+
+ This is the first step in the OAuth 2.0 Authorization Flow. The user's
+ browser should be redirected to the returned URL.
+
+ This method calls
+ :meth:`requests_oauthlib.OAuth2Session.authorization_url`
+ and specifies the client configuration's authorization URI (usually
+ Google's authorization server) and specifies that "offline" access is
+ desired. This is required in order to obtain a refresh token.
+
+ Args:
+ kwargs: Additional arguments passed through to
+ :meth:`requests_oauthlib.OAuth2Session.authorization_url`
+
+ Returns:
+ Tuple[str, str]: The generated authorization URL and state. The
+ user must visit the URL to complete the flow. The state is used
+ when completing the flow to verify that the request originated
+ from your application. If your application is using a different
+ :class:`Flow` instance to obtain the token, you will need to
+ specify the ``state`` when constructing the :class:`Flow`.
+ """
+ kwargs.setdefault("access_type", "offline")
+ if self.autogenerate_code_verifier:
+ chars = ascii_letters + digits + "-._~"
+ rnd = SystemRandom()
+ random_verifier = [rnd.choice(chars) for _ in range(0, 128)]
+ self.code_verifier = "".join(random_verifier)
+
+ if self.code_verifier:
+ code_hash = hashlib.sha256()
+ code_hash.update(str.encode(self.code_verifier))
+ unencoded_challenge = code_hash.digest()
+ b64_challenge = urlsafe_b64encode(unencoded_challenge)
+ code_challenge = b64_challenge.decode().split("=")[0]
+ kwargs.setdefault("code_challenge", code_challenge)
+ kwargs.setdefault("code_challenge_method", "S256")
+ url, state = self.oauth2session.authorization_url(
+ self.client_config["auth_uri"], **kwargs
+ )
+
+ return url, state
+
+ def fetch_token(self, **kwargs):
+ """Completes the Authorization Flow and obtains an access token.
+
+ This is the final step in the OAuth 2.0 Authorization Flow. This is
+ called after the user consents.
+
+ This method calls
+ :meth:`requests_oauthlib.OAuth2Session.fetch_token`
+ and specifies the client configuration's token URI (usually Google's
+ token server).
+
+ Args:
+ kwargs: Arguments passed through to
+ :meth:`requests_oauthlib.OAuth2Session.fetch_token`. At least
+ one of ``code`` or ``authorization_response`` must be
+ specified.
+
+ Returns:
+ Mapping[str, str]: The obtained tokens. Typically, you will not use
+ return value of this function and instead use
+ :meth:`credentials` to obtain a
+ :class:`~google.auth.credentials.Credentials` instance.
+ """
+ kwargs.setdefault("client_secret", self.client_config["client_secret"])
+ kwargs.setdefault("code_verifier", self.code_verifier)
+ return self.oauth2session.fetch_token(self.client_config["token_uri"], **kwargs)
+
+ @property
+ def credentials(self):
+ """Returns credentials from the OAuth 2.0 session.
+
+ :meth:`fetch_token` must be called before accessing this. This method
+ constructs a :class:`google.oauth2.credentials.Credentials` class using
+ the session's token and the client config.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The constructed credentials.
+
+ Raises:
+ ValueError: If there is no access token in the session.
+ """
+ return google_auth_oauthlib.helpers.credentials_from_session(
+ self.oauth2session, self.client_config
+ )
+
+ def authorized_session(self):
+ """Returns a :class:`requests.Session` authorized with credentials.
+
+ :meth:`fetch_token` must be called before this method. This method
+ constructs a :class:`google.auth.transport.requests.AuthorizedSession`
+ class using this flow's :attr:`credentials`.
+
+ Returns:
+ google.auth.transport.requests.AuthorizedSession: The constructed
+ session.
+ """
+ return google.auth.transport.requests.AuthorizedSession(self.credentials)
+
+
+class InstalledAppFlow(Flow):
+ """Authorization flow helper for installed applications.
+
+ This :class:`Flow` subclass makes it easier to perform the
+ `Installed Application Authorization Flow`_. This flow is useful for
+ local development or applications that are installed on a desktop operating
+ system.
+
+ This flow uses a local server strategy provided by :meth:`run_local_server`.
+
+ Example::
+
+ from google_auth_oauthlib.flow import InstalledAppFlow
+
+ flow = InstalledAppFlow.from_client_secrets_file(
+ 'client_secrets.json',
+ scopes=['profile', 'email'])
+
+ flow.run_local_server()
+
+ session = flow.authorized_session()
+
+ profile_info = session.get(
+ 'https://www.googleapis.com/userinfo/v2/me').json()
+
+ print(profile_info)
+ # {'name': '...', 'email': '...', ...}
+
+
+ Note that this isn't the only way to accomplish the installed
+ application flow, just one of the most common. You can use the
+ :class:`Flow` class to perform the same flow with different methods of
+ presenting the authorization URL to the user or obtaining the authorization
+ response, such as using an embedded web view.
+
+ .. _Installed Application Authorization Flow:
+ https://github.com/googleapis/google-api-python-client/blob/main/docs/oauth-installed.md
+ """
+
+ _DEFAULT_AUTH_PROMPT_MESSAGE = (
+ "Please visit this URL to authorize this application: {url}"
+ )
+ """str: The message to display when prompting the user for
+ authorization."""
+ _DEFAULT_AUTH_CODE_MESSAGE = "Enter the authorization code: "
+ """str: The message to display when prompting the user for the
+ authorization code. Used only by the console strategy."""
+
+ _DEFAULT_WEB_SUCCESS_MESSAGE = (
+ "The authentication flow has completed. You may close this window."
+ )
+
+ def run_local_server(
+ self,
+ host="localhost",
+ bind_addr=None,
+ port=8080,
+ authorization_prompt_message=_DEFAULT_AUTH_PROMPT_MESSAGE,
+ success_message=_DEFAULT_WEB_SUCCESS_MESSAGE,
+ open_browser=True,
+ redirect_uri_trailing_slash=True,
+ timeout_seconds=None,
+ token_audience=None,
+ browser=None,
+ **kwargs
+ ):
+ """Run the flow using the server strategy.
+
+ The server strategy instructs the user to open the authorization URL in
+ their browser and will attempt to automatically open the URL for them.
+ It will start a local web server to listen for the authorization
+ response. Once authorization is complete the authorization server will
+ redirect the user's browser to the local web server. The web server
+ will get the authorization code from the response and shutdown. The
+ code is then exchanged for a token.
+
+ Args:
+ host (str): The hostname for the local redirect server. This will
+ be served over http, not https.
+ bind_addr (str): Optionally provide an ip address for the redirect
+ server to listen on when it is not the same as host
+ (e.g. in a container). Default value is None,
+ which means that the redirect server will listen
+ on the ip address specified in the host parameter.
+ port (int): The port for the local redirect server.
+ authorization_prompt_message (str | None): The message to display to tell
+ the user to navigate to the authorization URL. If None or empty,
+ don't display anything.
+ success_message (str): The message to display in the web browser
+ the authorization flow is complete.
+ open_browser (bool): Whether or not to open the authorization URL
+ in the user's browser.
+ redirect_uri_trailing_slash (bool): whether or not to add trailing
+ slash when constructing the redirect_uri. Default value is True.
+ timeout_seconds (int): It will raise an error after the timeout timing
+ if there are no credentials response. The value is in seconds.
+ When set to None there is no timeout.
+ Default value is None.
+ token_audience (str): Passed along with the request for an access
+ token. Determines the endpoints with which the token can be
+ used. Optional.
+ browser (str): specify which browser to open for authentication. If not
+ specified this defaults to default browser.
+ kwargs: Additional keyword arguments passed through to
+ :meth:`authorization_url`.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The OAuth 2.0 credentials
+ for the user.
+ """
+ wsgi_app = _RedirectWSGIApp(success_message)
+ # Fail fast if the address is occupied
+ wsgiref.simple_server.WSGIServer.allow_reuse_address = False
+ local_server = wsgiref.simple_server.make_server(
+ bind_addr or host, port, wsgi_app, handler_class=_WSGIRequestHandler
+ )
+
+ try:
+ redirect_uri_format = (
+ "http://{}:{}/" if redirect_uri_trailing_slash else "http://{}:{}"
+ )
+ self.redirect_uri = redirect_uri_format.format(
+ host, local_server.server_port
+ )
+ auth_url, _ = self.authorization_url(**kwargs)
+
+ if open_browser:
+ # if browser is None it defaults to default browser
+ webbrowser.get(browser).open(auth_url, new=1, autoraise=True)
+
+ if authorization_prompt_message:
+ print(authorization_prompt_message.format(url=auth_url))
+
+ local_server.timeout = timeout_seconds
+ local_server.handle_request()
+
+ # Note: using https here because oauthlib is very picky that
+ # OAuth 2.0 should only occur over https.
+ authorization_response = wsgi_app.last_request_uri.replace("http", "https")
+ self.fetch_token(
+ authorization_response=authorization_response, audience=token_audience
+ )
+ finally:
+ local_server.server_close()
+
+ return self.credentials
+
+
+class _WSGIRequestHandler(wsgiref.simple_server.WSGIRequestHandler):
+ """Custom WSGIRequestHandler.
+
+ Uses a named logger instead of printing to stderr.
+ """
+
+ def log_message(self, format, *args):
+ # pylint: disable=redefined-builtin
+ # (format is the argument name defined in the superclass.)
+ _LOGGER.info(format, *args)
+
+
+class _RedirectWSGIApp(object):
+ """WSGI app to handle the authorization redirect.
+
+ Stores the request URI and displays the given success message.
+ """
+
+ def __init__(self, success_message):
+ """
+ Args:
+ success_message (str): The message to display in the web browser
+ the authorization flow is complete.
+ """
+ self.last_request_uri = None
+ self._success_message = success_message
+
+ def __call__(self, environ, start_response):
+ """WSGI Callable.
+
+ Args:
+ environ (Mapping[str, Any]): The WSGI environment.
+ start_response (Callable[str, list]): The WSGI start_response
+ callable.
+
+ Returns:
+ Iterable[bytes]: The response body.
+ """
+ start_response("200 OK", [("Content-type", "text/plain; charset=utf-8")])
+ self.last_request_uri = wsgiref.util.request_uri(environ)
+ return [self._success_message.encode("utf-8")]
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/helpers.py b/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..25462f4c193c84aad5c935ad55910faa70f760e8
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/helpers.py
@@ -0,0 +1,151 @@
+# Copyright 2017 Google Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Integration helpers.
+
+This module provides helpers for integrating with `requests-oauthlib`_.
+Typically, you'll want to use the higher-level helpers in
+:mod:`google_auth_oauthlib.flow`.
+
+.. _requests-oauthlib: http://requests-oauthlib.readthedocs.io/en/latest/
+"""
+
+import datetime
+import json
+
+from google.auth import external_account_authorized_user
+import google.oauth2.credentials
+import requests_oauthlib
+
+_REQUIRED_CONFIG_KEYS = frozenset(("auth_uri", "token_uri", "client_id"))
+
+
+def session_from_client_config(client_config, scopes, **kwargs):
+ """Creates a :class:`requests_oauthlib.OAuth2Session` from client
+ configuration loaded from a Google-format client secrets file.
+
+ Args:
+ client_config (Mapping[str, Any]): The client
+ configuration in the Google `client secrets`_ format.
+ scopes (Sequence[str]): The list of scopes to request during the
+ flow.
+ kwargs: Any additional parameters passed to
+ :class:`requests_oauthlib.OAuth2Session`
+
+ Raises:
+ ValueError: If the client configuration is not in the correct
+ format.
+
+ Returns:
+ Tuple[requests_oauthlib.OAuth2Session, Mapping[str, Any]]: The new
+ oauthlib session and the validated client configuration.
+
+ .. _client secrets:
+ https://github.com/googleapis/google-api-python-client/blob/main/docs/client-secrets.md
+ """
+
+ if "web" in client_config:
+ config = client_config["web"]
+ elif "installed" in client_config:
+ config = client_config["installed"]
+ else:
+ raise ValueError("Client secrets must be for a web or installed app.")
+
+ if not _REQUIRED_CONFIG_KEYS.issubset(config.keys()):
+ raise ValueError("Client secrets is not in the correct format.")
+
+ session = requests_oauthlib.OAuth2Session(
+ client_id=config["client_id"], scope=scopes, **kwargs
+ )
+
+ return session, client_config
+
+
+def session_from_client_secrets_file(client_secrets_file, scopes, **kwargs):
+ """Creates a :class:`requests_oauthlib.OAuth2Session` instance from a
+ Google-format client secrets file.
+
+ Args:
+ client_secrets_file (str): The path to the `client secrets`_ .json
+ file.
+ scopes (Sequence[str]): The list of scopes to request during the
+ flow.
+ kwargs: Any additional parameters passed to
+ :class:`requests_oauthlib.OAuth2Session`
+
+ Returns:
+ Tuple[requests_oauthlib.OAuth2Session, Mapping[str, Any]]: The new
+ oauthlib session and the validated client configuration.
+
+ .. _client secrets:
+ https://github.com/googleapis/google-api-python-client/blob/main/docs/client-secrets.md
+ """
+ with open(client_secrets_file, "r") as json_file:
+ client_config = json.load(json_file)
+
+ return session_from_client_config(client_config, scopes, **kwargs)
+
+
+def credentials_from_session(session, client_config=None):
+ """Creates :class:`google.oauth2.credentials.Credentials` from a
+ :class:`requests_oauthlib.OAuth2Session`.
+
+ :meth:`fetch_token` must be called on the session before before calling
+ this. This uses the session's auth token and the provided client
+ configuration to create :class:`google.oauth2.credentials.Credentials`.
+ This allows you to use the credentials from the session with Google
+ API client libraries.
+
+ Args:
+ session (requests_oauthlib.OAuth2Session): The OAuth 2.0 session.
+ client_config (Mapping[str, Any]): The subset of the client
+ configuration to use. For example, if you have a web client
+ you would pass in `client_config['web']`.
+
+ Returns:
+ google.oauth2.credentials.Credentials: The constructed credentials.
+
+ Raises:
+ ValueError: If there is no access token in the session.
+ """
+ client_config = client_config if client_config is not None else {}
+
+ if not session.token:
+ raise ValueError(
+ "There is no access token for this session, did you call " "fetch_token?"
+ )
+
+ if "3pi" in client_config:
+ credentials = external_account_authorized_user.Credentials(
+ token=session.token["access_token"],
+ refresh_token=session.token.get("refresh_token"),
+ token_url=client_config.get("token_uri"),
+ client_id=client_config.get("client_id"),
+ client_secret=client_config.get("client_secret"),
+ token_info_url=client_config.get("token_info_url"),
+ scopes=session.scope,
+ )
+ else:
+ credentials = google.oauth2.credentials.Credentials(
+ session.token["access_token"],
+ refresh_token=session.token.get("refresh_token"),
+ id_token=session.token.get("id_token"),
+ token_uri=client_config.get("token_uri"),
+ client_id=client_config.get("client_id"),
+ client_secret=client_config.get("client_secret"),
+ scopes=session.scope,
+ granted_scopes=session.token.get("scope"),
+ )
+ credentials.expiry = datetime.datetime.utcfromtimestamp(session.token["expires_at"])
+ return credentials
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/interactive.py b/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/interactive.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1ed990ef435c9127430a2f4a3c269151463000e
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/interactive.py
@@ -0,0 +1,172 @@
+# Copyright 2019 Google LLC
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+"""Get user credentials from interactive code environments.
+
+This module contains helpers for getting user credentials from interactive
+code environments installed on a development machine, such as Jupyter
+notebooks.
+"""
+
+from __future__ import absolute_import
+
+import contextlib
+import socket
+
+import google_auth_oauthlib.flow
+
+
+LOCALHOST = "localhost"
+DEFAULT_PORTS_TO_TRY = 100
+
+
+def is_port_open(port):
+ """Check if a port is open on localhost.
+ Based on StackOverflow answer: https://stackoverflow.com/a/43238489/101923
+ Parameters
+ ----------
+ port : int
+ A port to check on localhost.
+ Returns
+ -------
+ is_open : bool
+ True if a socket can be opened at the requested port.
+ """
+ with contextlib.closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
+ try:
+ sock.bind((LOCALHOST, port))
+ sock.listen(1)
+ except socket.error:
+ is_open = False
+ else:
+ is_open = True
+ return is_open
+
+
+def find_open_port(start=8080, stop=None):
+ """Find an open port between ``start`` and ``stop``.
+ Parameters
+ ----------
+ start : Optional[int]
+ Beginning of range of ports to try. Defaults to 8080.
+ stop : Optional[int]
+ End of range of ports to try (not including exactly equals ``stop``).
+ This function tries 100 possible ports if no ``stop`` is specified.
+ Returns
+ -------
+ Optional[int]
+ ``None`` if no open port is found, otherwise an integer indicating an
+ open port.
+ """
+ if not stop:
+ stop = start + DEFAULT_PORTS_TO_TRY
+
+ for port in range(start, stop):
+ if is_port_open(port):
+ return port
+
+ # No open ports found.
+ return None
+
+
+def get_user_credentials(
+ scopes, client_id, client_secret, minimum_port=8080, maximum_port=None
+):
+ """Gets credentials associated with your Google user account.
+
+ This function authenticates using your user credentials by going through
+ the OAuth 2.0 flow. You'll open a browser window to authenticate to your
+ Google account. The permissions it requests correspond to the scopes
+ you've provided.
+
+ To obtain the ``client_id`` and ``client_secret``, create an **OAuth
+ client ID** with application type **Other** from the `Credentials page on
+ the Google Developer's Console
+ `_. Learn more
+ with the `Authenticating as an end user
+ `_ guide.
+
+ Args:
+ scopes (Sequence[str]):
+ A list of scopes to use when authenticating to Google APIs. See
+ the `list of OAuth 2.0 scopes for Google APIs
+ `_.
+ client_id (str):
+ A string that identifies your application to Google APIs. Find
+ this value in the `Credentials page on the Google Developer's
+ Console
+ `_.
+ client_secret (str):
+ A string that verifies your application to Google APIs. Find this
+ value in the `Credentials page on the Google Developer's Console
+ `_.
+ minimum_port (int):
+ Beginning of range of ports to try for redirect URI HTTP server.
+ Defaults to 8080.
+ maximum_port (Optional[int]):
+ End of range of ports to try (not including exactly equals ``stop``).
+ This function tries 100 possible ports if no ``stop`` is specified.
+
+ Returns:
+ google.oauth2.credentials.Credentials:
+ The OAuth 2.0 credentials for the user.
+
+ Examples:
+ Get credentials for your user account and use them to run a query
+ with BigQuery::
+
+ import google_auth_oauthlib
+
+ # TODO: Create a client ID for your project.
+ client_id = "YOUR-CLIENT-ID.apps.googleusercontent.com"
+ client_secret = "abc_ThIsIsAsEcReT"
+
+ # TODO: Choose the needed scopes for your applications.
+ scopes = ["https://www.googleapis.com/auth/cloud-platform"]
+
+ credentials = google_auth_oauthlib.get_user_credentials(
+ scopes, client_id, client_secret
+ )
+
+ # 1. Open the link.
+ # 2. Authorize the application to have access to your account.
+ # 3. Copy and paste the authorization code to the prompt.
+
+ # Use the credentials to construct a client for Google APIs.
+ from google.cloud import bigquery
+
+ bigquery_client = bigquery.Client(
+ credentials=credentials, project="your-project-id"
+ )
+ print(list(bigquery_client.query("SELECT 1").result()))
+ """
+
+ client_config = {
+ "installed": {
+ "client_id": client_id,
+ "client_secret": client_secret,
+ "auth_uri": "https://accounts.google.com/o/oauth2/auth",
+ "token_uri": "https://oauth2.googleapis.com/token",
+ }
+ }
+
+ app_flow = google_auth_oauthlib.flow.InstalledAppFlow.from_client_config(
+ client_config, scopes=scopes
+ )
+
+ port = find_open_port(start=minimum_port, stop=maximum_port)
+ if not port:
+ raise ConnectionError("Could not find open port.")
+
+ return app_flow.run_local_server(host=LOCALHOST, port=port)
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/tool/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/tool/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/tool/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/tool/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bb8cf319bb4dbd4e9870965cfcab5da545515819
Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/google_auth_oauthlib/tool/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/h11-0.14.0.dist-info/LICENSE.txt b/evalkit_cambrian/lib/python3.10/site-packages/h11-0.14.0.dist-info/LICENSE.txt
new file mode 100644
index 0000000000000000000000000000000000000000..8f080eae848f759c9173bfc0c79506357ebe5090
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/h11-0.14.0.dist-info/LICENSE.txt
@@ -0,0 +1,22 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Nathaniel J. Smith and other contributors
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/INSTALLER b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/METADATA b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..2046024a647b9fc5cee6a4a1844ef7ab7aa14c8c
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/METADATA
@@ -0,0 +1,97 @@
+Metadata-Version: 2.1
+Name: matplotlib-inline
+Version: 0.1.7
+Summary: Inline Matplotlib backend for Jupyter
+Author-email: IPython Development Team
+License: BSD 3-Clause License
+
+ Copyright (c) 2019-2022, IPython Development Team.
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ 3. Neither the name of the copyright holder nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Project-URL: Homepage, https://github.com/ipython/matplotlib-inline
+Keywords: ipython,jupyter,matplotlib,python
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Framework :: IPython
+Classifier: Framework :: Jupyter
+Classifier: Framework :: Jupyter :: JupyterLab
+Classifier: Framework :: Jupyter :: JupyterLab :: 3
+Classifier: Framework :: Jupyter :: JupyterLab :: 4
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3.12
+Classifier: Topic :: Multimedia :: Graphics
+Requires-Python: >=3.8
+Description-Content-Type: text/markdown
+License-File: LICENSE
+Requires-Dist: traitlets
+
+# Matplotlib Inline Back-end for IPython and Jupyter
+
+This package provides support for matplotlib to display figures directly inline in the Jupyter notebook and related clients, as shown below.
+
+## Installation
+
+With conda:
+
+```bash
+conda install -c conda-forge matplotlib-inline
+```
+
+With pip:
+
+```bash
+pip install matplotlib-inline
+```
+
+## Usage
+
+Note that in current versions of JupyterLab and Jupyter Notebook, the explicit use of the `%matplotlib inline` directive is not needed anymore, though other third-party clients may still require it.
+
+This will produce a figure immediately below:
+
+```python
+%matplotlib inline
+
+import matplotlib.pyplot as plt
+import numpy as np
+
+x = np.linspace(0, 3*np.pi, 500)
+plt.plot(x, np.sin(x**2))
+plt.title('A simple chirp');
+```
+
+## License
+
+Licensed under the terms of the BSD 3-Clause License, by the IPython Development Team (see `LICENSE` file).
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/RECORD b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..2601ff52925e1d6b3582e7a5f8b897b6666ff075
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/RECORD
@@ -0,0 +1,14 @@
+matplotlib_inline-0.1.7.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+matplotlib_inline-0.1.7.dist-info/LICENSE,sha256=hSGwNsZEjg4KpyE9RxO2_e4PTGTJ8yBFD3c0a_XA6OQ,1538
+matplotlib_inline-0.1.7.dist-info/METADATA,sha256=Xz23GdN-XdBaedxKOTUXCuChARrN9NvHcKHdnkpv08E,3873
+matplotlib_inline-0.1.7.dist-info/RECORD,,
+matplotlib_inline-0.1.7.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+matplotlib_inline-0.1.7.dist-info/WHEEL,sha256=GJ7t_kWBFywbagK5eo9IoUwLW6oyOeTKmQ-9iHFVNxQ,92
+matplotlib_inline-0.1.7.dist-info/entry_points.txt,sha256=NxSdh3hjUJgJ73Z6FN60JWKd4Ll30Iy1nlIoNAvYGIU,63
+matplotlib_inline-0.1.7.dist-info/top_level.txt,sha256=0ViJqRtJZLIS7IfjHrUgYI6RIvWY0cfk1OjAlp40Zsw,18
+matplotlib_inline/__init__.py,sha256=tnQF2ytXKVHudRzPup4ymHdSygJW8znm2Ox3CVF9FQs,75
+matplotlib_inline/__pycache__/__init__.cpython-310.pyc,,
+matplotlib_inline/__pycache__/backend_inline.cpython-310.pyc,,
+matplotlib_inline/__pycache__/config.cpython-310.pyc,,
+matplotlib_inline/backend_inline.py,sha256=pR2qQcdCoXAEen7FXIyJvgRMaS4V9pker_IbabROH6E,11354
+matplotlib_inline/config.py,sha256=Z87OCv-LStYhOwQ2LmHDNxwkAn9BLFKTbHI7XSAT1Jc,3911
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/REQUESTED b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/REQUESTED
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/WHEEL b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..bab98d675883cc7567a79df485cd7b4f015e376f
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.43.0)
+Root-Is-Purelib: true
+Tag: py3-none-any
+
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/entry_points.txt b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/entry_points.txt
new file mode 100644
index 0000000000000000000000000000000000000000..cc52c3383b61c80940879d05c7a51b0c2a96250a
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/entry_points.txt
@@ -0,0 +1,2 @@
+[matplotlib.backend]
+inline = matplotlib_inline.backend_inline
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/top_level.txt b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..4b061e8006031c56c2c5ef5f56a342f8b2a60bc4
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/matplotlib_inline-0.1.7.dist-info/top_level.txt
@@ -0,0 +1 @@
+matplotlib_inline
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/multidict/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/multidict/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..efe6b91c6726e89233edec2018664e5d14fa0229
Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/multidict/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/multidict/__pycache__/_abc.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/multidict/__pycache__/_abc.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c98600e4baaffe9020ba6f00548c19b8b21f1346
Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/multidict/__pycache__/_abc.cpython-310.pyc differ
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/INSTALLER b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/License.txt b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/License.txt
new file mode 100644
index 0000000000000000000000000000000000000000..b491c70e0aef319022ded661e111ddbd45b8a17f
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/License.txt
@@ -0,0 +1,1568 @@
+End User License Agreement
+--------------------------
+
+
+Preface
+-------
+
+The Software License Agreement in Chapter 1 and the Supplement
+in Chapter 2 contain license terms and conditions that govern
+the use of NVIDIA software. By accepting this agreement, you
+agree to comply with all the terms and conditions applicable
+to the product(s) included herein.
+
+
+NVIDIA Driver
+
+
+Description
+
+This package contains the operating system driver and
+fundamental system software components for NVIDIA GPUs.
+
+
+NVIDIA CUDA Toolkit
+
+
+Description
+
+The NVIDIA CUDA Toolkit provides command-line and graphical
+tools for building, debugging and optimizing the performance
+of applications accelerated by NVIDIA GPUs, runtime and math
+libraries, and documentation including programming guides,
+user manuals, and API references.
+
+
+Default Install Location of CUDA Toolkit
+
+Windows platform:
+
+%ProgramFiles%\NVIDIA GPU Computing Toolkit\CUDA\v#.#
+
+Linux platform:
+
+/usr/local/cuda-#.#
+
+Mac platform:
+
+/Developer/NVIDIA/CUDA-#.#
+
+
+NVIDIA CUDA Samples
+
+
+Description
+
+This package includes over 100+ CUDA examples that demonstrate
+various CUDA programming principles, and efficient CUDA
+implementation of algorithms in specific application domains.
+
+
+Default Install Location of CUDA Samples
+
+Windows platform:
+
+%ProgramData%\NVIDIA Corporation\CUDA Samples\v#.#
+
+Linux platform:
+
+/usr/local/cuda-#.#/samples
+
+and
+
+$HOME/NVIDIA_CUDA-#.#_Samples
+
+Mac platform:
+
+/Developer/NVIDIA/CUDA-#.#/samples
+
+
+NVIDIA Nsight Visual Studio Edition (Windows only)
+
+
+Description
+
+NVIDIA Nsight Development Platform, Visual Studio Edition is a
+development environment integrated into Microsoft Visual
+Studio that provides tools for debugging, profiling, analyzing
+and optimizing your GPU computing and graphics applications.
+
+
+Default Install Location of Nsight Visual Studio Edition
+
+Windows platform:
+
+%ProgramFiles(x86)%\NVIDIA Corporation\Nsight Visual Studio Edition #.#
+
+
+1. License Agreement for NVIDIA Software Development Kits
+---------------------------------------------------------
+
+
+Release Date: July 26, 2018
+---------------------------
+
+
+Important NoticeRead before downloading, installing,
+copying or using the licensed software:
+-------------------------------------------------------
+
+This license agreement, including exhibits attached
+("Agreement”) is a legal agreement between you and NVIDIA
+Corporation ("NVIDIA") and governs your use of a NVIDIA
+software development kit (“SDK”).
+
+Each SDK has its own set of software and materials, but here
+is a description of the types of items that may be included in
+a SDK: source code, header files, APIs, data sets and assets
+(examples include images, textures, models, scenes, videos,
+native API input/output files), binary software, sample code,
+libraries, utility programs, programming code and
+documentation.
+
+This Agreement can be accepted only by an adult of legal age
+of majority in the country in which the SDK is used.
+
+If you are entering into this Agreement on behalf of a company
+or other legal entity, you represent that you have the legal
+authority to bind the entity to this Agreement, in which case
+“you” will mean the entity you represent.
+
+If you don’t have the required age or authority to accept
+this Agreement, or if you don’t accept all the terms and
+conditions of this Agreement, do not download, install or use
+the SDK.
+
+You agree to use the SDK only for purposes that are permitted
+by (a) this Agreement, and (b) any applicable law, regulation
+or generally accepted practices or guidelines in the relevant
+jurisdictions.
+
+
+1.1. License
+
+
+1.1.1. License Grant
+
+Subject to the terms of this Agreement, NVIDIA hereby grants
+you a non-exclusive, non-transferable license, without the
+right to sublicense (except as expressly provided in this
+Agreement) to:
+
+ 1. Install and use the SDK,
+
+ 2. Modify and create derivative works of sample source code
+ delivered in the SDK, and
+
+ 3. Distribute those portions of the SDK that are identified
+ in this Agreement as distributable, as incorporated in
+ object code format into a software application that meets
+ the distribution requirements indicated in this Agreement.
+
+
+1.1.2. Distribution Requirements
+
+These are the distribution requirements for you to exercise
+the distribution grant:
+
+ 1. Your application must have material additional
+ functionality, beyond the included portions of the SDK.
+
+ 2. The distributable portions of the SDK shall only be
+ accessed by your application.
+
+ 3. The following notice shall be included in modifications
+ and derivative works of sample source code distributed:
+ “This software contains source code provided by NVIDIA
+ Corporation.”
+
+ 4. Unless a developer tool is identified in this Agreement
+ as distributable, it is delivered for your internal use
+ only.
+
+ 5. The terms under which you distribute your application
+ must be consistent with the terms of this Agreement,
+ including (without limitation) terms relating to the
+ license grant and license restrictions and protection of
+ NVIDIA’s intellectual property rights. Additionally, you
+ agree that you will protect the privacy, security and
+ legal rights of your application users.
+
+ 6. You agree to notify NVIDIA in writing of any known or
+ suspected distribution or use of the SDK not in compliance
+ with the requirements of this Agreement, and to enforce
+ the terms of your agreements with respect to distributed
+ SDK.
+
+
+1.1.3. Authorized Users
+
+You may allow employees and contractors of your entity or of
+your subsidiary(ies) to access and use the SDK from your
+secure network to perform work on your behalf.
+
+If you are an academic institution you may allow users
+enrolled or employed by the academic institution to access and
+use the SDK from your secure network.
+
+You are responsible for the compliance with the terms of this
+Agreement by your authorized users. If you become aware that
+your authorized users didn’t follow the terms of this
+Agreement, you agree to take reasonable steps to resolve the
+non-compliance and prevent new occurrences.
+
+
+1.1.4. Pre-Release SDK
+
+The SDK versions identified as alpha, beta, preview or
+otherwise as pre-release, may not be fully functional, may
+contain errors or design flaws, and may have reduced or
+different security, privacy, accessibility, availability, and
+reliability standards relative to commercial versions of
+NVIDIA software and materials. Use of a pre-release SDK may
+result in unexpected results, loss of data, project delays or
+other unpredictable damage or loss.
+
+You may use a pre-release SDK at your own risk, understanding
+that pre-release SDKs are not intended for use in production
+or business-critical systems.
+
+NVIDIA may choose not to make available a commercial version
+of any pre-release SDK. NVIDIA may also choose to abandon
+development and terminate the availability of a pre-release
+SDK at any time without liability.
+
+
+1.1.5. Updates
+
+NVIDIA may, at its option, make available patches, workarounds
+or other updates to this SDK. Unless the updates are provided
+with their separate governing terms, they are deemed part of
+the SDK licensed to you as provided in this Agreement. You
+agree that the form and content of the SDK that NVIDIA
+provides may change without prior notice to you. While NVIDIA
+generally maintains compatibility between versions, NVIDIA may
+in some cases make changes that introduce incompatibilities in
+future versions of the SDK.
+
+
+1.1.6. Third Party Licenses
+
+The SDK may come bundled with, or otherwise include or be
+distributed with, third party software licensed by a NVIDIA
+supplier and/or open source software provided under an open
+source license. Use of third party software is subject to the
+third-party license terms, or in the absence of third party
+terms, the terms of this Agreement. Copyright to third party
+software is held by the copyright holders indicated in the
+third-party software or license.
+
+
+1.1.7. Reservation of Rights
+
+NVIDIA reserves all rights, title, and interest in and to the
+SDK, not expressly granted to you under this Agreement.
+
+
+1.2. Limitations
+
+The following license limitations apply to your use of the
+SDK:
+
+ 1. You may not reverse engineer, decompile or disassemble,
+ or remove copyright or other proprietary notices from any
+ portion of the SDK or copies of the SDK.
+
+ 2. Except as expressly provided in this Agreement, you may
+ not copy, sell, rent, sublicense, transfer, distribute,
+ modify, or create derivative works of any portion of the
+ SDK. For clarity, you may not distribute or sublicense the
+ SDK as a stand-alone product.
+
+ 3. Unless you have an agreement with NVIDIA for this
+ purpose, you may not indicate that an application created
+ with the SDK is sponsored or endorsed by NVIDIA.
+
+ 4. You may not bypass, disable, or circumvent any
+ encryption, security, digital rights management or
+ authentication mechanism in the SDK.
+
+ 5. You may not use the SDK in any manner that would cause it
+ to become subject to an open source software license. As
+ examples, licenses that require as a condition of use,
+ modification, and/or distribution that the SDK be:
+
+ a. Disclosed or distributed in source code form;
+
+ b. Licensed for the purpose of making derivative works;
+ or
+
+ c. Redistributable at no charge.
+
+ 6. Unless you have an agreement with NVIDIA for this
+ purpose, you may not use the SDK with any system or
+ application where the use or failure of the system or
+ application can reasonably be expected to threaten or
+ result in personal injury, death, or catastrophic loss.
+ Examples include use in avionics, navigation, military,
+ medical, life support or other life critical applications.
+ NVIDIA does not design, test or manufacture the SDK for
+ these critical uses and NVIDIA shall not be liable to you
+ or any third party, in whole or in part, for any claims or
+ damages arising from such uses.
+
+ 7. You agree to defend, indemnify and hold harmless NVIDIA
+ and its affiliates, and their respective employees,
+ contractors, agents, officers and directors, from and
+ against any and all claims, damages, obligations, losses,
+ liabilities, costs or debt, fines, restitutions and
+ expenses (including but not limited to attorney’s fees
+ and costs incident to establishing the right of
+ indemnification) arising out of or related to your use of
+ the SDK outside of the scope of this Agreement, or not in
+ compliance with its terms.
+
+
+1.3. Ownership
+
+ 1. NVIDIA or its licensors hold all rights, title and
+ interest in and to the SDK and its modifications and
+ derivative works, including their respective intellectual
+ property rights, subject to your rights described in this
+ section. This SDK may include software and materials from
+ NVIDIA’s licensors, and these licensors are intended
+ third party beneficiaries that may enforce this Agreement
+ with respect to their intellectual property rights.
+
+ 2. You hold all rights, title and interest in and to your
+ applications and your derivative works of the sample
+ source code delivered in the SDK, including their
+ respective intellectual property rights, subject to
+ NVIDIA’s rights described in this section.
+
+ 3. You may, but don’t have to, provide to NVIDIA
+ suggestions, feature requests or other feedback regarding
+ the SDK, including possible enhancements or modifications
+ to the SDK. For any feedback that you voluntarily provide,
+ you hereby grant NVIDIA and its affiliates a perpetual,
+ non-exclusive, worldwide, irrevocable license to use,
+ reproduce, modify, license, sublicense (through multiple
+ tiers of sublicensees), and distribute (through multiple
+ tiers of distributors) it without the payment of any
+ royalties or fees to you. NVIDIA will use feedback at its
+ choice. NVIDIA is constantly looking for ways to improve
+ its products, so you may send feedback to NVIDIA through
+ the developer portal at https://developer.nvidia.com.
+
+
+1.4. No Warranties
+
+THE SDK IS PROVIDED BY NVIDIA “AS IS” AND “WITH ALL
+FAULTS.” TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND
+ITS AFFILIATES EXPRESSLY DISCLAIM ALL WARRANTIES OF ANY KIND
+OR NATURE, WHETHER EXPRESS, IMPLIED OR STATUTORY, INCLUDING,
+BUT NOT LIMITED TO, ANY WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE, TITLE, NON-INFRINGEMENT, OR THE
+ABSENCE OF ANY DEFECTS THEREIN, WHETHER LATENT OR PATENT. NO
+WARRANTY IS MADE ON THE BASIS OF TRADE USAGE, COURSE OF
+DEALING OR COURSE OF TRADE.
+
+
+1.5. Limitation of Liability
+
+TO THE MAXIMUM EXTENT PERMITTED BY LAW, NVIDIA AND ITS
+AFFILIATES SHALL NOT BE LIABLE FOR ANY SPECIAL, INCIDENTAL,
+PUNITIVE OR CONSEQUENTIAL DAMAGES, OR ANY LOST PROFITS, LOSS
+OF USE, LOSS OF DATA OR LOSS OF GOODWILL, OR THE COSTS OF
+PROCURING SUBSTITUTE PRODUCTS, ARISING OUT OF OR IN CONNECTION
+WITH THIS AGREEMENT OR THE USE OR PERFORMANCE OF THE SDK,
+WHETHER SUCH LIABILITY ARISES FROM ANY CLAIM BASED UPON BREACH
+OF CONTRACT, BREACH OF WARRANTY, TORT (INCLUDING NEGLIGENCE),
+PRODUCT LIABILITY OR ANY OTHER CAUSE OF ACTION OR THEORY OF
+LIABILITY. IN NO EVENT WILL NVIDIA’S AND ITS AFFILIATES
+TOTAL CUMULATIVE LIABILITY UNDER OR ARISING OUT OF THIS
+AGREEMENT EXCEED US$10.00. THE NATURE OF THE LIABILITY OR THE
+NUMBER OF CLAIMS OR SUITS SHALL NOT ENLARGE OR EXTEND THIS
+LIMIT.
+
+These exclusions and limitations of liability shall apply
+regardless if NVIDIA or its affiliates have been advised of
+the possibility of such damages, and regardless of whether a
+remedy fails its essential purpose. These exclusions and
+limitations of liability form an essential basis of the
+bargain between the parties, and, absent any of these
+exclusions or limitations of liability, the provisions of this
+Agreement, including, without limitation, the economic terms,
+would be substantially different.
+
+
+1.6. Termination
+
+ 1. This Agreement will continue to apply until terminated by
+ either you or NVIDIA as described below.
+
+ 2. If you want to terminate this Agreement, you may do so by
+ stopping to use the SDK.
+
+ 3. NVIDIA may, at any time, terminate this Agreement if:
+
+ a. (i) you fail to comply with any term of this
+ Agreement and the non-compliance is not fixed within
+ thirty (30) days following notice from NVIDIA (or
+ immediately if you violate NVIDIA’s intellectual
+ property rights);
+
+ b. (ii) you commence or participate in any legal
+ proceeding against NVIDIA with respect to the SDK; or
+
+ c. (iii) NVIDIA decides to no longer provide the SDK in
+ a country or, in NVIDIA’s sole discretion, the
+ continued use of it is no longer commercially viable.
+
+ 4. Upon any termination of this Agreement, you agree to
+ promptly discontinue use of the SDK and destroy all copies
+ in your possession or control. Your prior distributions in
+ accordance with this Agreement are not affected by the
+ termination of this Agreement. Upon written request, you
+ will certify in writing that you have complied with your
+ commitments under this section. Upon any termination of
+ this Agreement all provisions survive except for the
+ license grant provisions.
+
+
+1.7. General
+
+If you wish to assign this Agreement or your rights and
+obligations, including by merger, consolidation, dissolution
+or operation of law, contact NVIDIA to ask for permission. Any
+attempted assignment not approved by NVIDIA in writing shall
+be void and of no effect. NVIDIA may assign, delegate or
+transfer this Agreement and its rights and obligations, and if
+to a non-affiliate you will be notified.
+
+You agree to cooperate with NVIDIA and provide reasonably
+requested information to verify your compliance with this
+Agreement.
+
+This Agreement will be governed in all respects by the laws of
+the United States and of the State of Delaware as those laws
+are applied to contracts entered into and performed entirely
+within Delaware by Delaware residents, without regard to the
+conflicts of laws principles. The United Nations Convention on
+Contracts for the International Sale of Goods is specifically
+disclaimed. You agree to all terms of this Agreement in the
+English language.
+
+The state or federal courts residing in Santa Clara County,
+California shall have exclusive jurisdiction over any dispute
+or claim arising out of this Agreement. Notwithstanding this,
+you agree that NVIDIA shall still be allowed to apply for
+injunctive remedies or an equivalent type of urgent legal
+relief in any jurisdiction.
+
+If any court of competent jurisdiction determines that any
+provision of this Agreement is illegal, invalid or
+unenforceable, such provision will be construed as limited to
+the extent necessary to be consistent with and fully
+enforceable under the law and the remaining provisions will
+remain in full force and effect. Unless otherwise specified,
+remedies are cumulative.
+
+Each party acknowledges and agrees that the other is an
+independent contractor in the performance of this Agreement.
+
+The SDK has been developed entirely at private expense and is
+“commercial items” consisting of “commercial computer
+software” and “commercial computer software
+documentation” provided with RESTRICTED RIGHTS. Use,
+duplication or disclosure by the U.S. Government or a U.S.
+Government subcontractor is subject to the restrictions in
+this Agreement pursuant to DFARS 227.7202-3(a) or as set forth
+in subparagraphs (c)(1) and (2) of the Commercial Computer
+Software - Restricted Rights clause at FAR 52.227-19, as
+applicable. Contractor/manufacturer is NVIDIA, 2788 San Tomas
+Expressway, Santa Clara, CA 95051.
+
+The SDK is subject to United States export laws and
+regulations. You agree that you will not ship, transfer or
+export the SDK into any country, or use the SDK in any manner,
+prohibited by the United States Bureau of Industry and
+Security or economic sanctions regulations administered by the
+U.S. Department of Treasury’s Office of Foreign Assets
+Control (OFAC), or any applicable export laws, restrictions or
+regulations. These laws include restrictions on destinations,
+end users and end use. By accepting this Agreement, you
+confirm that you are not a resident or citizen of any country
+currently embargoed by the U.S. and that you are not otherwise
+prohibited from receiving the SDK.
+
+Any notice delivered by NVIDIA to you under this Agreement
+will be delivered via mail, email or fax. You agree that any
+notices that NVIDIA sends you electronically will satisfy any
+legal communication requirements. Please direct your legal
+notices or other correspondence to NVIDIA Corporation, 2788
+San Tomas Expressway, Santa Clara, California 95051, United
+States of America, Attention: Legal Department.
+
+This Agreement and any exhibits incorporated into this
+Agreement constitute the entire agreement of the parties with
+respect to the subject matter of this Agreement and supersede
+all prior negotiations or documentation exchanged between the
+parties relating to this SDK license. Any additional and/or
+conflicting terms on documents issued by you are null, void,
+and invalid. Any amendment or waiver under this Agreement
+shall be in writing and signed by representatives of both
+parties.
+
+
+2. CUDA Toolkit Supplement to Software License Agreement for
+NVIDIA Software Development Kits
+------------------------------------------------------------
+
+
+Release date: August 16, 2018
+-----------------------------
+
+The terms in this supplement govern your use of the NVIDIA
+CUDA Toolkit SDK under the terms of your license agreement
+(“Agreement”) as modified by this supplement. Capitalized
+terms used but not defined below have the meaning assigned to
+them in the Agreement.
+
+This supplement is an exhibit to the Agreement and is
+incorporated as an integral part of the Agreement. In the
+event of conflict between the terms in this supplement and the
+terms in the Agreement, the terms in this supplement govern.
+
+
+2.1. License Scope
+
+The SDK is licensed for you to develop applications only for
+use in systems with NVIDIA GPUs.
+
+
+2.2. Distribution
+
+The portions of the SDK that are distributable under the
+Agreement are listed in Attachment A.
+
+
+2.3. Operating Systems
+
+Those portions of the SDK designed exclusively for use on the
+Linux or FreeBSD operating systems, or other operating systems
+derived from the source code to these operating systems, may
+be copied and redistributed for use in accordance with this
+Agreement, provided that the object code files are not
+modified in any way (except for unzipping of compressed
+files).
+
+
+2.4. Audio and Video Encoders and Decoders
+
+You acknowledge and agree that it is your sole responsibility
+to obtain any additional third-party licenses required to
+make, have made, use, have used, sell, import, and offer for
+sale your products or services that include or incorporate any
+third-party software and content relating to audio and/or
+video encoders and decoders from, including but not limited
+to, Microsoft, Thomson, Fraunhofer IIS, Sisvel S.p.A.,
+MPEG-LA, and Coding Technologies. NVIDIA does not grant to you
+under this Agreement any necessary patent or other rights with
+respect to any audio and/or video encoders and decoders.
+
+
+2.5. Licensing
+
+If the distribution terms in this Agreement are not suitable
+for your organization, or for any questions regarding this
+Agreement, please contact NVIDIA at
+nvidia-compute-license-questions@nvidia.com.
+
+
+2.6. Attachment A
+
+The following portions of the SDK are distributable under the
+Agreement:
+
+Component
+
+CUDA Runtime
+
+Windows
+
+cudart.dll, cudart_static.lib, cudadevrt.lib
+
+Mac OSX
+
+libcudart.dylib, libcudart_static.a, libcudadevrt.a
+
+Linux
+
+libcudart.so, libcudart_static.a, libcudadevrt.a
+
+Android
+
+libcudart.so, libcudart_static.a, libcudadevrt.a
+
+Component
+
+CUDA FFT Library
+
+Windows
+
+cufft.dll, cufftw.dll, cufft.lib, cufftw.lib
+
+Mac OSX
+
+libcufft.dylib, libcufft_static.a, libcufftw.dylib,
+libcufftw_static.a
+
+Linux
+
+libcufft.so, libcufft_static.a, libcufftw.so,
+libcufftw_static.a
+
+Android
+
+libcufft.so, libcufft_static.a, libcufftw.so,
+libcufftw_static.a
+
+Component
+
+CUDA BLAS Library
+
+Windows
+
+cublas.dll, cublasLt.dll
+
+Mac OSX
+
+libcublas.dylib, libcublasLt.dylib, libcublas_static.a,
+libcublasLt_static.a
+
+Linux
+
+libcublas.so, libcublasLt.so, libcublas_static.a,
+libcublasLt_static.a
+
+Android
+
+libcublas.so, libcublasLt.so, libcublas_static.a,
+libcublasLt_static.a
+
+Component
+
+NVIDIA "Drop-in" BLAS Library
+
+Windows
+
+nvblas.dll
+
+Mac OSX
+
+libnvblas.dylib
+
+Linux
+
+libnvblas.so
+
+Component
+
+CUDA Sparse Matrix Library
+
+Windows
+
+cusparse.dll, cusparse.lib
+
+Mac OSX
+
+libcusparse.dylib, libcusparse_static.a
+
+Linux
+
+libcusparse.so, libcusparse_static.a
+
+Android
+
+libcusparse.so, libcusparse_static.a
+
+Component
+
+CUDA Linear Solver Library
+
+Windows
+
+cusolver.dll, cusolver.lib
+
+Mac OSX
+
+libcusolver.dylib, libcusolver_static.a
+
+Linux
+
+libcusolver.so, libcusolver_static.a
+
+Android
+
+libcusolver.so, libcusolver_static.a
+
+Component
+
+CUDA Random Number Generation Library
+
+Windows
+
+curand.dll, curand.lib
+
+Mac OSX
+
+libcurand.dylib, libcurand_static.a
+
+Linux
+
+libcurand.so, libcurand_static.a
+
+Android
+
+libcurand.so, libcurand_static.a
+
+Component
+
+CUDA Accelerated Graph Library
+
+Component
+
+NVIDIA Performance Primitives Library
+
+Windows
+
+nppc.dll, nppc.lib, nppial.dll, nppial.lib, nppicc.dll,
+nppicc.lib, nppicom.dll, nppicom.lib, nppidei.dll,
+nppidei.lib, nppif.dll, nppif.lib, nppig.dll, nppig.lib,
+nppim.dll, nppim.lib, nppist.dll, nppist.lib, nppisu.dll,
+nppisu.lib, nppitc.dll, nppitc.lib, npps.dll, npps.lib
+
+Mac OSX
+
+libnppc.dylib, libnppc_static.a, libnppial.dylib,
+libnppial_static.a, libnppicc.dylib, libnppicc_static.a,
+libnppicom.dylib, libnppicom_static.a, libnppidei.dylib,
+libnppidei_static.a, libnppif.dylib, libnppif_static.a,
+libnppig.dylib, libnppig_static.a, libnppim.dylib,
+libnppisu_static.a, libnppitc.dylib, libnppitc_static.a,
+libnpps.dylib, libnpps_static.a
+
+Linux
+
+libnppc.so, libnppc_static.a, libnppial.so,
+libnppial_static.a, libnppicc.so, libnppicc_static.a,
+libnppicom.so, libnppicom_static.a, libnppidei.so,
+libnppidei_static.a, libnppif.so, libnppif_static.a
+libnppig.so, libnppig_static.a, libnppim.so,
+libnppim_static.a, libnppist.so, libnppist_static.a,
+libnppisu.so, libnppisu_static.a, libnppitc.so
+libnppitc_static.a, libnpps.so, libnpps_static.a
+
+Android
+
+libnppc.so, libnppc_static.a, libnppial.so,
+libnppial_static.a, libnppicc.so, libnppicc_static.a,
+libnppicom.so, libnppicom_static.a, libnppidei.so,
+libnppidei_static.a, libnppif.so, libnppif_static.a
+libnppig.so, libnppig_static.a, libnppim.so,
+libnppim_static.a, libnppist.so, libnppist_static.a,
+libnppisu.so, libnppisu_static.a, libnppitc.so
+libnppitc_static.a, libnpps.so, libnpps_static.a
+
+Component
+
+NVIDIA JPEG Library
+
+Linux
+
+libnvjpeg.so, libnvjpeg_static.a
+
+Component
+
+Internal common library required for statically linking to
+cuBLAS, cuSPARSE, cuFFT, cuRAND, nvJPEG and NPP
+
+Mac OSX
+
+libculibos.a
+
+Linux
+
+libculibos.a
+
+Component
+
+NVIDIA Runtime Compilation Library and Header
+
+All
+
+nvrtc.h
+
+Windows
+
+nvrtc.dll, nvrtc-builtins.dll
+
+Mac OSX
+
+libnvrtc.dylib, libnvrtc-builtins.dylib
+
+Linux
+
+libnvrtc.so, libnvrtc-builtins.so
+
+Component
+
+NVIDIA Optimizing Compiler Library
+
+Windows
+
+nvvm.dll
+
+Mac OSX
+
+libnvvm.dylib
+
+Linux
+
+libnvvm.so
+
+Component
+
+NVIDIA Common Device Math Functions Library
+
+Windows
+
+libdevice.10.bc
+
+Mac OSX
+
+libdevice.10.bc
+
+Linux
+
+libdevice.10.bc
+
+Component
+
+CUDA Occupancy Calculation Header Library
+
+All
+
+cuda_occupancy.h
+
+Component
+
+CUDA Half Precision Headers
+
+All
+
+cuda_fp16.h, cuda_fp16.hpp
+
+Component
+
+CUDA Profiling Tools Interface (CUPTI) Library
+
+Windows
+
+cupti.dll
+
+Mac OSX
+
+libcupti.dylib
+
+Linux
+
+libcupti.so
+
+Component
+
+NVIDIA Tools Extension Library
+
+Windows
+
+nvToolsExt.dll, nvToolsExt.lib
+
+Mac OSX
+
+libnvToolsExt.dylib
+
+Linux
+
+libnvToolsExt.so
+
+Component
+
+NVIDIA CUDA Driver Libraries
+
+Linux
+
+libcuda.so, libnvidia-fatbinaryloader.so,
+libnvidia-ptxjitcompiler.so
+
+The NVIDIA CUDA Driver Libraries are only distributable in
+applications that meet this criteria:
+
+ 1. The application was developed starting from a NVIDIA CUDA
+ container obtained from Docker Hub or the NVIDIA GPU
+ Cloud, and
+
+ 2. The resulting application is packaged as a Docker
+ container and distributed to users on Docker Hub or the
+ NVIDIA GPU Cloud only.
+
+
+2.7. Attachment B
+
+
+Additional Licensing Obligations
+
+The following third party components included in the SOFTWARE
+are licensed to Licensee pursuant to the following terms and
+conditions:
+
+ 1. Licensee's use of the GDB third party component is
+ subject to the terms and conditions of GNU GPL v3:
+
+ This product includes copyrighted third-party software licensed
+ under the terms of the GNU General Public License v3 ("GPL v3").
+ All third-party software packages are copyright by their respective
+ authors. GPL v3 terms and conditions are hereby incorporated into
+ the Agreement by this reference: http://www.gnu.org/licenses/gpl.txt
+
+ Consistent with these licensing requirements, the software
+ listed below is provided under the terms of the specified
+ open source software licenses. To obtain source code for
+ software provided under licenses that require
+ redistribution of source code, including the GNU General
+ Public License (GPL) and GNU Lesser General Public License
+ (LGPL), contact oss-requests@nvidia.com. This offer is
+ valid for a period of three (3) years from the date of the
+ distribution of this product by NVIDIA CORPORATION.
+
+ Component License
+ CUDA-GDB GPL v3
+
+ 2. Licensee represents and warrants that any and all third
+ party licensing and/or royalty payment obligations in
+ connection with Licensee's use of the H.264 video codecs
+ are solely the responsibility of Licensee.
+
+ 3. Licensee's use of the Thrust library is subject to the
+ terms and conditions of the Apache License Version 2.0.
+ All third-party software packages are copyright by their
+ respective authors. Apache License Version 2.0 terms and
+ conditions are hereby incorporated into the Agreement by
+ this reference.
+ http://www.apache.org/licenses/LICENSE-2.0.html
+
+ In addition, Licensee acknowledges the following notice:
+ Thrust includes source code from the Boost Iterator,
+ Tuple, System, and Random Number libraries.
+
+ Boost Software License - Version 1.0 - August 17th, 2003
+ . . . .
+
+ Permission is hereby granted, free of charge, to any person or
+ organization obtaining a copy of the software and accompanying
+ documentation covered by this license (the "Software") to use,
+ reproduce, display, distribute, execute, and transmit the Software,
+ and to prepare derivative works of the Software, and to permit
+ third-parties to whom the Software is furnished to do so, all
+ subject to the following:
+
+ The copyright notices in the Software and this entire statement,
+ including the above license grant, this restriction and the following
+ disclaimer, must be included in all copies of the Software, in whole
+ or in part, and all derivative works of the Software, unless such
+ copies or derivative works are solely in the form of machine-executable
+ object code generated by a source language processor.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+ 4. Licensee's use of the LLVM third party component is
+ subject to the following terms and conditions:
+
+ ======================================================
+ LLVM Release License
+ ======================================================
+ University of Illinois/NCSA
+ Open Source License
+
+ Copyright (c) 2003-2010 University of Illinois at Urbana-Champaign.
+ All rights reserved.
+
+ Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to
+ deal with the Software without restriction, including without limitation the
+ rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ sell copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at Urbana-
+ Champaign, nor the names of its contributors may be used to endorse or
+ promote products derived from this Software without specific prior
+ written permission.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ DEALINGS WITH THE SOFTWARE.
+
+ 5. Licensee's use (e.g. nvprof) of the PCRE third party
+ component is subject to the following terms and
+ conditions:
+
+ ------------
+ PCRE LICENCE
+ ------------
+ PCRE is a library of functions to support regular expressions whose syntax
+ and semantics are as close as possible to those of the Perl 5 language.
+ Release 8 of PCRE is distributed under the terms of the "BSD" licence, as
+ specified below. The documentation for PCRE, supplied in the "doc"
+ directory, is distributed under the same terms as the software itself. The
+ basic library functions are written in C and are freestanding. Also
+ included in the distribution is a set of C++ wrapper functions, and a just-
+ in-time compiler that can be used to optimize pattern matching. These are
+ both optional features that can be omitted when the library is built.
+
+ THE BASIC LIBRARY FUNCTIONS
+ ---------------------------
+ Written by: Philip Hazel
+ Email local part: ph10
+ Email domain: cam.ac.uk
+ University of Cambridge Computing Service,
+ Cambridge, England.
+ Copyright (c) 1997-2012 University of Cambridge
+ All rights reserved.
+
+ PCRE JUST-IN-TIME COMPILATION SUPPORT
+ -------------------------------------
+ Written by: Zoltan Herczeg
+ Email local part: hzmester
+ Emain domain: freemail.hu
+ Copyright(c) 2010-2012 Zoltan Herczeg
+ All rights reserved.
+
+ STACK-LESS JUST-IN-TIME COMPILER
+ --------------------------------
+ Written by: Zoltan Herczeg
+ Email local part: hzmester
+ Emain domain: freemail.hu
+ Copyright(c) 2009-2012 Zoltan Herczeg
+ All rights reserved.
+
+ THE C++ WRAPPER FUNCTIONS
+ -------------------------
+ Contributed by: Google Inc.
+ Copyright (c) 2007-2012, Google Inc.
+ All rights reserved.
+
+ THE "BSD" LICENCE
+ -----------------
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the name of the University of Cambridge nor the name of Google
+ Inc. nor the names of their contributors may be used to endorse or
+ promote products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+ 6. Some of the cuBLAS library routines were written by or
+ derived from code written by Vasily Volkov and are subject
+ to the Modified Berkeley Software Distribution License as
+ follows:
+
+ Copyright (c) 2007-2009, Regents of the University of California
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of the University of California, Berkeley nor
+ the names of its contributors may be used to endorse or promote
+ products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+ 7. Some of the cuBLAS library routines were written by or
+ derived from code written by Davide Barbieri and are
+ subject to the Modified Berkeley Software Distribution
+ License as follows:
+
+ Copyright (c) 2008-2009 Davide Barbieri @ University of Rome Tor Vergata.
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE AUTHOR "AS IS" AND ANY EXPRESS OR
+ IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
+ INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+ STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
+ IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+ 8. Some of the cuBLAS library routines were derived from
+ code developed by the University of Tennessee and are
+ subject to the Modified Berkeley Software Distribution
+ License as follows:
+
+ Copyright (c) 2010 The University of Tennessee.
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer listed in this license in the documentation and/or
+ other materials provided with the distribution.
+ * Neither the name of the copyright holders nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 9. Some of the cuBLAS library routines were written by or
+ derived from code written by Jonathan Hogg and are subject
+ to the Modified Berkeley Software Distribution License as
+ follows:
+
+ Copyright (c) 2012, The Science and Technology Facilities Council (STFC).
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of the STFC nor the names of its contributors
+ may be used to endorse or promote products derived from this
+ software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE STFC BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
+ BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+ WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
+ IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 10. Some of the cuBLAS library routines were written by or
+ derived from code written by Ahmad M. Abdelfattah, David
+ Keyes, and Hatem Ltaief, and are subject to the Apache
+ License, Version 2.0, as follows:
+
+ -- (C) Copyright 2013 King Abdullah University of Science and Technology
+ Authors:
+ Ahmad Abdelfattah (ahmad.ahmad@kaust.edu.sa)
+ David Keyes (david.keyes@kaust.edu.sa)
+ Hatem Ltaief (hatem.ltaief@kaust.edu.sa)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of the King Abdullah University of Science and
+ Technology nor the names of its contributors may be used to endorse
+ or promote products derived from this software without specific prior
+ written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE
+
+ 11. Some of the cuSPARSE library routines were written by or
+ derived from code written by Li-Wen Chang and are subject
+ to the NCSA Open Source License as follows:
+
+ Copyright (c) 2012, University of Illinois.
+
+ All rights reserved.
+
+ Developed by: IMPACT Group, University of Illinois, http://impact.crhc.illinois.edu
+
+ Permission is hereby granted, free of charge, to any person obtaining
+ a copy of this software and associated documentation files (the
+ "Software"), to deal with the Software without restriction, including
+ without limitation the rights to use, copy, modify, merge, publish,
+ distribute, sublicense, and/or sell copies of the Software, and to
+ permit persons to whom the Software is furnished to do so, subject to
+ the following conditions:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimers in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the names of IMPACT Group, University of Illinois, nor
+ the names of its contributors may be used to endorse or promote
+ products derived from this Software without specific prior
+ written permission.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT
+ HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+ IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+ SOFTWARE.
+
+ 12. Some of the cuRAND library routines were written by or
+ derived from code written by Mutsuo Saito and Makoto
+ Matsumoto and are subject to the following license:
+
+ Copyright (c) 2009, 2010 Mutsuo Saito, Makoto Matsumoto and Hiroshima
+ University. All rights reserved.
+
+ Copyright (c) 2011 Mutsuo Saito, Makoto Matsumoto, Hiroshima
+ University and University of Tokyo. All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of the Hiroshima University nor the names of
+ its contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 13. Some of the cuRAND library routines were derived from
+ code developed by D. E. Shaw Research and are subject to
+ the following license:
+
+ Copyright 2010-2011, D. E. Shaw Research.
+
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions, and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions, and the following
+ disclaimer in the documentation and/or other materials provided
+ with the distribution.
+ * Neither the name of D. E. Shaw Research nor the names of its
+ contributors may be used to endorse or promote products derived
+ from this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 14. Some of the Math library routines were written by or
+ derived from code developed by Norbert Juffa and are
+ subject to the following license:
+
+ Copyright (c) 2015-2017, Norbert Juffa
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions
+ are met:
+
+ 1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 15. Licensee's use of the lz4 third party component is
+ subject to the following terms and conditions:
+
+ Copyright (C) 2011-2013, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ 16. The NPP library uses code from the Boost Math Toolkit,
+ and is subject to the following license:
+
+ Boost Software License - Version 1.0 - August 17th, 2003
+ . . . .
+
+ Permission is hereby granted, free of charge, to any person or
+ organization obtaining a copy of the software and accompanying
+ documentation covered by this license (the "Software") to use,
+ reproduce, display, distribute, execute, and transmit the Software,
+ and to prepare derivative works of the Software, and to permit
+ third-parties to whom the Software is furnished to do so, all
+ subject to the following:
+
+ The copyright notices in the Software and this entire statement,
+ including the above license grant, this restriction and the following
+ disclaimer, must be included in all copies of the Software, in whole
+ or in part, and all derivative works of the Software, unless such
+ copies or derivative works are solely in the form of machine-executable
+ object code generated by a source language processor.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, TITLE AND
+ NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR
+ ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE FOR ANY DAMAGES OR
+ OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, ARISING
+ FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ OTHER DEALINGS IN THE SOFTWARE.
+
+ 17. Portions of the Nsight Eclipse Edition is subject to the
+ following license:
+
+ The Eclipse Foundation makes available all content in this plug-in
+ ("Content"). Unless otherwise indicated below, the Content is provided
+ to you under the terms and conditions of the Eclipse Public License
+ Version 1.0 ("EPL"). A copy of the EPL is available at http://
+ www.eclipse.org/legal/epl-v10.html. For purposes of the EPL, "Program"
+ will mean the Content.
+
+ If you did not receive this Content directly from the Eclipse
+ Foundation, the Content is being redistributed by another party
+ ("Redistributor") and different terms and conditions may apply to your
+ use of any object code in the Content. Check the Redistributor's
+ license that was provided with the Content. If no such license exists,
+ contact the Redistributor. Unless otherwise indicated below, the terms
+ and conditions of the EPL still apply to any source code in the
+ Content and such source code may be obtained at http://www.eclipse.org.
+
+ 18. Some of the cuBLAS library routines uses code from
+ OpenAI, which is subject to the following license:
+
+ License URL
+ https://github.com/openai/openai-gemm/blob/master/LICENSE
+
+ License Text
+ The MIT License
+
+ Copyright (c) 2016 OpenAI (http://openai.com), 2016 Google Inc.
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+ 19. Licensee's use of the Visual Studio Setup Configuration
+ Samples is subject to the following license:
+
+ The MIT License (MIT)
+ Copyright (C) Microsoft Corporation. All rights reserved.
+
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without restriction,
+ including without limitation the rights to use, copy, modify, merge,
+ publish, distribute, sublicense, and/or sell copies of the Software,
+ and to permit persons to whom the Software is furnished to do so,
+ subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included
+ in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+ 20. Licensee's use of linmath.h header for CPU functions for
+ GL vector/matrix operations from lunarG is subject to the
+ Apache License Version 2.0.
+
+ 21. The DX12-CUDA sample uses the d3dx12.h header, which is
+ subject to the MIT license .
+
+-----------------
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/METADATA b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..16c88b0d2e630c13b7e035931a931a55b75b5971
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/METADATA
@@ -0,0 +1,35 @@
+Metadata-Version: 2.1
+Name: nvidia-curand-cu11
+Version: 10.3.0.86
+Summary: CURAND native runtime libraries
+Home-page: https://developer.nvidia.com/cuda-zone
+Author: Nvidia CUDA Installer Team
+Author-email: compute_installer@nvidia.com
+License: NVIDIA Proprietary Software
+Keywords: cuda,nvidia,runtime,machine learning,deep learning
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: Other/Proprietary License
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: Scientific/Engineering
+Classifier: Topic :: Scientific/Engineering :: Mathematics
+Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
+Classifier: Topic :: Software Development
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX :: Linux
+Requires-Python: >=3
+License-File: License.txt
+
+CURAND native runtime libraries
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/RECORD b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..07183e681a2f78b6b45cffc8b0052c5458a375f0
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/RECORD
@@ -0,0 +1,32 @@
+nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/__pycache__/__init__.cpython-310.pyc,,
+nvidia/curand/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/curand/__pycache__/__init__.cpython-310.pyc,,
+nvidia/curand/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/curand/include/__pycache__/__init__.cpython-310.pyc,,
+nvidia/curand/include/curand.h,sha256=duUMdlPliB_djlgE8rv-E8J3Awre2pTDLiqc5eK-qZ0,43965
+nvidia/curand/include/curand_discrete.h,sha256=2qD3BkI622XEu0444wVP7HeYkKAx0Rjr2HDhqU4SA7E,3486
+nvidia/curand/include/curand_discrete2.h,sha256=ZrQTO5R9x83AMX88uq7M8M94DLSC5VEz0PAkfcwtQeg,10883
+nvidia/curand/include/curand_globals.h,sha256=bES1Kx0NrATXk1DReMMkqWrB062nOnaAp39y22wViXU,3717
+nvidia/curand/include/curand_kernel.h,sha256=8R_1VsYarc8ECbYVAITSVisCi9WcPt5UhMX005R45sA,52714
+nvidia/curand/include/curand_lognormal.h,sha256=-X-iNkJSzWpAYYjogm689EJTZfzore9sxU7ObddljLk,28142
+nvidia/curand/include/curand_mrg32k3a.h,sha256=ZVVREjGNsJQJ-3IzZZ_LKGtGteslicb8E0Aly49BKPs,170296
+nvidia/curand/include/curand_mtgp32.h,sha256=Qhrmx0pHWF-P2Uu5bKwYE9ymEWq3c7qBzCITVMaKMfI,7845
+nvidia/curand/include/curand_mtgp32_host.h,sha256=9eLM7OQxVsb59Qvv0Uzd2mHNY5-bF8AXS5uIzDERWRM,18266
+nvidia/curand/include/curand_mtgp32_kernel.h,sha256=KN6wkEisXbi1E-38KYZFmdDoP1a__BOgV2uc-E44cuk,13710
+nvidia/curand/include/curand_mtgp32dc_p_11213.h,sha256=7_gGYUH47UugIAEt60vYH5nFa-QUwTpDwSEgLg9cZts,276889
+nvidia/curand/include/curand_normal.h,sha256=AvPh8Q9M-xHtj_wAeMNCcB2JSsV401zy7vRp8WGmNtc,26926
+nvidia/curand/include/curand_normal_static.h,sha256=uG82DfUglNO-k67SIsNVt2kbf86qCMFWxr1vjncjUOw,4649
+nvidia/curand/include/curand_philox4x32_x.h,sha256=myBP5wxkqqs53fBowXAtDMo9BLSGAyrWrIfRXNW-3YM,7146
+nvidia/curand/include/curand_poisson.h,sha256=VmzdCnstyeS7j1enTzcPnyugBrgfCpv6zNulnb5p6g0,25409
+nvidia/curand/include/curand_precalc.h,sha256=I6NZdgT42fMm9qSCtP-rlOAqt4Zsqgal0ajktcPmEak,1392393
+nvidia/curand/include/curand_uniform.h,sha256=gpmRgQu5r6ppgLTg60NXoDdVJS6wMUy6jC5bh8l04e8,17472
+nvidia/curand/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/curand/lib/__pycache__/__init__.cpython-310.pyc,,
+nvidia/curand/lib/libcurand.so.10,sha256=l4E9-Rbr6GE--41XEnuRGUAzMqZNkpv9FyRSwUDhAd4,101334448
+nvidia_curand_cu11-10.3.0.86.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+nvidia_curand_cu11-10.3.0.86.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
+nvidia_curand_cu11-10.3.0.86.dist-info/METADATA,sha256=iZzT2G6DE3UKnCNmXcCbgresNj_ibNc9u4HA3PjgXo8,1509
+nvidia_curand_cu11-10.3.0.86.dist-info/RECORD,,
+nvidia_curand_cu11-10.3.0.86.dist-info/WHEEL,sha256=6NTOzyjaR-9aJxJquqd0X0mRtq4MUYsP0w_ppLcEVkQ,108
+nvidia_curand_cu11-10.3.0.86.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/WHEEL b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..2cbd0cba57b06979b7006f8f837931386d6836ce
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: setuptools (72.2.0)
+Root-Is-Purelib: true
+Tag: py3-none-manylinux2014_x86_64
+
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/top_level.txt b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/top_level.txt
new file mode 100644
index 0000000000000000000000000000000000000000..862f7abf232cdfbb928609856247292e81c9decb
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_curand_cu11-10.3.0.86.dist-info/top_level.txt
@@ -0,0 +1 @@
+nvidia
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/INSTALLER b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/INSTALLER
new file mode 100644
index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/INSTALLER
@@ -0,0 +1 @@
+pip
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/METADATA b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/METADATA
new file mode 100644
index 0000000000000000000000000000000000000000..b3e4a52e9cbba7e3fc0ffd3ac79817726cc6de5e
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/METADATA
@@ -0,0 +1,35 @@
+Metadata-Version: 2.1
+Name: nvidia-nvtx-cu12
+Version: 12.1.105
+Summary: NVIDIA Tools Extension
+Home-page: https://developer.nvidia.com/cuda-zone
+Author: Nvidia CUDA Installer Team
+Author-email: cuda_installer@nvidia.com
+License: NVIDIA Proprietary Software
+Keywords: cuda,nvidia,runtime,machine learning,deep learning
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: Education
+Classifier: Intended Audience :: Science/Research
+Classifier: License :: Other/Proprietary License
+Classifier: Natural Language :: English
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3.9
+Classifier: Programming Language :: Python :: 3.10
+Classifier: Programming Language :: Python :: 3.11
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Topic :: Scientific/Engineering
+Classifier: Topic :: Scientific/Engineering :: Mathematics
+Classifier: Topic :: Scientific/Engineering :: Artificial Intelligence
+Classifier: Topic :: Software Development
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: POSIX :: Linux
+Requires-Python: >=3
+License-File: License.txt
+
+A C-based API for annotating events, code ranges, and resources in your applications. Applications which integrate NVTX can use the Visual Profiler to capture and visualize these events and ranges.
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/RECORD b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/RECORD
new file mode 100644
index 0000000000000000000000000000000000000000..5349f6a78a53a5a2a6e4c1af3da3eee1bde2d6f1
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/RECORD
@@ -0,0 +1,37 @@
+nvidia/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/__pycache__/__init__.cpython-310.pyc,,
+nvidia/nvtx/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/nvtx/__pycache__/__init__.cpython-310.pyc,,
+nvidia/nvtx/include/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/nvtx/include/__pycache__/__init__.cpython-310.pyc,,
+nvidia/nvtx/include/nvToolsExt.h,sha256=OiT6v1G2-vlkYnpDQZjiGT1O-THDyk1gw2021qMRvQM,53680
+nvidia/nvtx/include/nvToolsExtCuda.h,sha256=UDA1pbmvoRFmlJ11Et9tIMEztOtOVw-10mO27Q6K8jg,6009
+nvidia/nvtx/include/nvToolsExtCudaRt.h,sha256=6IbgdRGObly53jzRqvsZ4FQoTrXJOJwSyCOLuXr9ncA,5192
+nvidia/nvtx/include/nvToolsExtOpenCL.h,sha256=gETZH9ch_o6MYE_BYQ2pj9SSuxyAo1H4ptmRK-DMWSo,8360
+nvidia/nvtx/include/nvToolsExtSync.h,sha256=wqONIiycUPaUUCzQBmCippilgKt8sOL9tpzG773u0nY,14562
+nvidia/nvtx/include/nvtx3/nvToolsExt.h,sha256=TFEF3fx1043EwMdbS7FqvvavwK0koZeGrIOAsCrB12s,52247
+nvidia/nvtx/include/nvtx3/nvToolsExtCuda.h,sha256=4ZbZHUMcmHRf4SdKB7nH0E3uHd_9ZhZBuwuWPItK-Vs,6204
+nvidia/nvtx/include/nvtx3/nvToolsExtCudaRt.h,sha256=boW0zdYobNFFE9wwxCyzBGBLcSGtdbQ5osKjQGNC2E8,5393
+nvidia/nvtx/include/nvtx3/nvToolsExtOpenCL.h,sha256=RPfsZl3lHAPIOCzTipmz07-vaiIO4cxelcx12EjB2L0,8563
+nvidia/nvtx/include/nvtx3/nvToolsExtSync.h,sha256=C-HIVBaupxYom3BqMggQ_ePq1bxFhw8kXsOfYJKBWrI,14756
+nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImpl.h,sha256=jEnYF3MyLsD72euw2It3Bz0X0GK4Xv_htEd8BeIrPjY,23333
+nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplCore.h,sha256=sYpWqZfYrjsMddxtezPX3qSTIbAOn4dlEoLiYQ9M2nM,9756
+nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplCudaRt_v3.h,sha256=SoaiprvsI80yLmEAnlFX0iFufv6RtKjjMMrVwQZjjQI,4775
+nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplCuda_v3.h,sha256=IEor-ISqComCRGVDdIzKBLU3eWCuDI0Igqz-eRKKcvg,5550
+nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplOpenCL_v3.h,sha256=iPR2x74bJE3plFQBT9FWGBaTm4sC-Pll6WAjpKRnz7g,8275
+nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxImplSync_v3.h,sha256=TqwQfEUVbwc58bpHioE13NMweFhOuHXNql65BnLzhvc,5022
+nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxInit.h,sha256=foajOFacvLGx3BN5ntw5v8o4J3OY4hqkVZE5ZC0x3e4,14716
+nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxInitDecls.h,sha256=-Qyxcy9CDXOBhEtYZ8L7iYd6daJ9aCeyQM48X0BafMM,9361
+nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxInitDefs.h,sha256=dLhOV4knhNrmT2DnUNzXreOt_Qc6GAa3yIlmqJFCeVI,35432
+nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxLinkOnce.h,sha256=Jp-z6LTz_p8fKRulcFfdcskIxzcZ6ybbHkGB9mpJa2M,3863
+nvidia/nvtx/include/nvtx3/nvtxDetail/nvtxTypes.h,sha256=jkbCwyvIP1G-Ef8SwYp4kDi69hjZbzaxKSk7ScgrNI8,17352
+nvidia/nvtx/lib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia/nvtx/lib/__pycache__/__init__.cpython-310.pyc,,
+nvidia/nvtx/lib/libnvToolsExt.so.1,sha256=hH148nXIzJdEKieAcyBL3BoACf_CVZv3JIxw2SEF39w,40136
+nvidia_nvtx_cu12-12.1.105.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+nvidia_nvtx_cu12-12.1.105.dist-info/License.txt,sha256=rW9YU_ugyg0VnQ9Y1JrkmDDC-Mk_epJki5zpCttMbM0,59262
+nvidia_nvtx_cu12-12.1.105.dist-info/METADATA,sha256=LP0Xeqykb8k4yxR2_JzTBqGwxALQERIJbbmP1k6-Z3Y,1660
+nvidia_nvtx_cu12-12.1.105.dist-info/RECORD,,
+nvidia_nvtx_cu12-12.1.105.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+nvidia_nvtx_cu12-12.1.105.dist-info/WHEEL,sha256=-kQi_VMfvRQozZJT7HUPMfY-5vLo0LVTmAylNJ3Ft98,106
+nvidia_nvtx_cu12-12.1.105.dist-info/top_level.txt,sha256=fTkAtiFuL16nUrB9ytDDtpytz2t0B4NvYTnRzwAhO14,7
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/WHEEL b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/WHEEL
new file mode 100644
index 0000000000000000000000000000000000000000..06e355fe0e3ed7077903f119ae6928a17da8eb6f
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/nvidia_nvtx_cu12-12.1.105.dist-info/WHEEL
@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.37.1)
+Root-Is-Purelib: true
+Tag: py3-none-manylinux1_x86_64
+
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/pydantic_core/__init__.py b/evalkit_cambrian/lib/python3.10/site-packages/pydantic_core/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..791de9d92342872ff53d876a42677c204a974fdc
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/pydantic_core/__init__.py
@@ -0,0 +1,139 @@
+from __future__ import annotations
+
+import sys as _sys
+from typing import Any as _Any
+
+from ._pydantic_core import (
+ ArgsKwargs,
+ MultiHostUrl,
+ PydanticCustomError,
+ PydanticKnownError,
+ PydanticOmit,
+ PydanticSerializationError,
+ PydanticSerializationUnexpectedValue,
+ PydanticUndefined,
+ PydanticUndefinedType,
+ PydanticUseDefault,
+ SchemaError,
+ SchemaSerializer,
+ SchemaValidator,
+ Some,
+ TzInfo,
+ Url,
+ ValidationError,
+ __version__,
+ from_json,
+ to_json,
+ to_jsonable_python,
+ validate_core_schema,
+)
+from .core_schema import CoreConfig, CoreSchema, CoreSchemaType, ErrorType
+
+if _sys.version_info < (3, 11):
+ from typing_extensions import NotRequired as _NotRequired
+else:
+ from typing import NotRequired as _NotRequired
+
+if _sys.version_info < (3, 12):
+ from typing_extensions import TypedDict as _TypedDict
+else:
+ from typing import TypedDict as _TypedDict
+
+__all__ = [
+ '__version__',
+ 'CoreConfig',
+ 'CoreSchema',
+ 'CoreSchemaType',
+ 'SchemaValidator',
+ 'SchemaSerializer',
+ 'Some',
+ 'Url',
+ 'MultiHostUrl',
+ 'ArgsKwargs',
+ 'PydanticUndefined',
+ 'PydanticUndefinedType',
+ 'SchemaError',
+ 'ErrorDetails',
+ 'InitErrorDetails',
+ 'ValidationError',
+ 'PydanticCustomError',
+ 'PydanticKnownError',
+ 'PydanticOmit',
+ 'PydanticUseDefault',
+ 'PydanticSerializationError',
+ 'PydanticSerializationUnexpectedValue',
+ 'TzInfo',
+ 'to_json',
+ 'from_json',
+ 'to_jsonable_python',
+ 'validate_core_schema',
+]
+
+
+class ErrorDetails(_TypedDict):
+ type: str
+ """
+ The type of error that occurred, this is an identifier designed for
+ programmatic use that will change rarely or never.
+
+ `type` is unique for each error message, and can hence be used as an identifier to build custom error messages.
+ """
+ loc: tuple[int | str, ...]
+ """Tuple of strings and ints identifying where in the schema the error occurred."""
+ msg: str
+ """A human readable error message."""
+ input: _Any
+ """The input data at this `loc` that caused the error."""
+ ctx: _NotRequired[dict[str, _Any]]
+ """
+ Values which are required to render the error message, and could hence be useful in rendering custom error messages.
+ Also useful for passing custom error data forward.
+ """
+
+
+class InitErrorDetails(_TypedDict):
+ type: str | PydanticCustomError
+ """The type of error that occurred, this should a "slug" identifier that changes rarely or never."""
+ loc: _NotRequired[tuple[int | str, ...]]
+ """Tuple of strings and ints identifying where in the schema the error occurred."""
+ input: _Any
+ """The input data at this `loc` that caused the error."""
+ ctx: _NotRequired[dict[str, _Any]]
+ """
+ Values which are required to render the error message, and could hence be useful in rendering custom error messages.
+ Also useful for passing custom error data forward.
+ """
+
+
+class ErrorTypeInfo(_TypedDict):
+ """
+ Gives information about errors.
+ """
+
+ type: ErrorType
+ """The type of error that occurred, this should a "slug" identifier that changes rarely or never."""
+ message_template_python: str
+ """String template to render a human readable error message from using context, when the input is Python."""
+ example_message_python: str
+ """Example of a human readable error message, when the input is Python."""
+ message_template_json: _NotRequired[str]
+ """String template to render a human readable error message from using context, when the input is JSON data."""
+ example_message_json: _NotRequired[str]
+ """Example of a human readable error message, when the input is JSON data."""
+ example_context: dict[str, _Any] | None
+ """Example of context values."""
+
+
+class MultiHostHost(_TypedDict):
+ """
+ A host part of a multi-host URL.
+ """
+
+ username: str | None
+ """The username part of this host, or `None`."""
+ password: str | None
+ """The password part of this host, or `None`."""
+ host: str | None
+ """The host part of this host, or `None`."""
+ port: int | None
+ """The port part of this host, or `None`."""
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/pydantic_core/__pycache__/__init__.cpython-310.pyc b/evalkit_cambrian/lib/python3.10/site-packages/pydantic_core/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..030686bcb49020211847abd5638476d45d8b5746
Binary files /dev/null and b/evalkit_cambrian/lib/python3.10/site-packages/pydantic_core/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/pydantic_core/_pydantic_core.pyi b/evalkit_cambrian/lib/python3.10/site-packages/pydantic_core/_pydantic_core.pyi
new file mode 100644
index 0000000000000000000000000000000000000000..f3103f28fb44355c2afdb623f51c6608958ad873
--- /dev/null
+++ b/evalkit_cambrian/lib/python3.10/site-packages/pydantic_core/_pydantic_core.pyi
@@ -0,0 +1,1013 @@
+import datetime
+from collections.abc import Mapping
+from typing import Any, Callable, Generic, Literal, TypeVar, final
+
+from _typeshed import SupportsAllComparisons
+from typing_extensions import LiteralString, Self, TypeAlias
+
+from pydantic_core import ErrorDetails, ErrorTypeInfo, InitErrorDetails, MultiHostHost
+from pydantic_core.core_schema import CoreConfig, CoreSchema, ErrorType
+
+__all__ = [
+ '__version__',
+ 'build_profile',
+ 'build_info',
+ '_recursion_limit',
+ 'ArgsKwargs',
+ 'SchemaValidator',
+ 'SchemaSerializer',
+ 'Url',
+ 'MultiHostUrl',
+ 'SchemaError',
+ 'ValidationError',
+ 'PydanticCustomError',
+ 'PydanticKnownError',
+ 'PydanticOmit',
+ 'PydanticUseDefault',
+ 'PydanticSerializationError',
+ 'PydanticSerializationUnexpectedValue',
+ 'PydanticUndefined',
+ 'PydanticUndefinedType',
+ 'Some',
+ 'to_json',
+ 'from_json',
+ 'to_jsonable_python',
+ 'list_all_errors',
+ 'TzInfo',
+ 'validate_core_schema',
+]
+__version__: str
+build_profile: str
+build_info: str
+_recursion_limit: int
+
+_T = TypeVar('_T', default=Any, covariant=True)
+
+_StringInput: TypeAlias = 'dict[str, _StringInput]'
+
+@final
+class Some(Generic[_T]):
+ """
+ Similar to Rust's [`Option::Some`](https://doc.rust-lang.org/std/option/enum.Option.html) type, this
+ identifies a value as being present, and provides a way to access it.
+
+ Generally used in a union with `None` to different between "some value which could be None" and no value.
+ """
+
+ __match_args__ = ('value',)
+
+ @property
+ def value(self) -> _T:
+ """
+ Returns the value wrapped by `Some`.
+ """
+ @classmethod
+ def __class_getitem__(cls, item: Any, /) -> type[Self]: ...
+
+@final
+class SchemaValidator:
+ """
+ `SchemaValidator` is the Python wrapper for `pydantic-core`'s Rust validation logic, internally it owns one
+ `CombinedValidator` which may in turn own more `CombinedValidator`s which make up the full schema validator.
+ """
+
+ # note: pyo3 currently supports __new__, but not __init__, though we include __init__ stubs
+ # and docstrings here (and in the following classes) for documentation purposes
+
+ def __init__(self, schema: CoreSchema, config: CoreConfig | None = None) -> None:
+ """Initializes the `SchemaValidator`.
+
+ Arguments:
+ schema: The `CoreSchema` to use for validation.
+ config: Optionally a [`CoreConfig`][pydantic_core.core_schema.CoreConfig] to configure validation.
+ """
+
+ def __new__(cls, schema: CoreSchema, config: CoreConfig | None = None) -> Self: ...
+ @property
+ def title(self) -> str:
+ """
+ The title of the schema, as used in the heading of [`ValidationError.__str__()`][pydantic_core.ValidationError].
+ """
+ def validate_python(
+ self,
+ input: Any,
+ *,
+ strict: bool | None = None,
+ from_attributes: bool | None = None,
+ context: Any | None = None,
+ self_instance: Any | None = None,
+ allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False,
+ ) -> Any:
+ """
+ Validate a Python object against the schema and return the validated object.
+
+ Arguments:
+ input: The Python object to validate.
+ strict: Whether to validate the object in strict mode.
+ If `None`, the value of [`CoreConfig.strict`][pydantic_core.core_schema.CoreConfig] is used.
+ from_attributes: Whether to validate objects as inputs to models by extracting attributes.
+ If `None`, the value of [`CoreConfig.from_attributes`][pydantic_core.core_schema.CoreConfig] is used.
+ context: The context to use for validation, this is passed to functional validators as
+ [`info.context`][pydantic_core.core_schema.ValidationInfo.context].
+ self_instance: An instance of a model set attributes on from validation, this is used when running
+ validation from the `__init__` method of a model.
+ allow_partial: Whether to allow partial validation; if `True` errors in the last element of sequences
+ and mappings are ignored.
+ `'trailing-strings'` means any final unfinished JSON string is included in the result.
+
+ Raises:
+ ValidationError: If validation fails.
+ Exception: Other error types maybe raised if internal errors occur.
+
+ Returns:
+ The validated object.
+ """
+ def isinstance_python(
+ self,
+ input: Any,
+ *,
+ strict: bool | None = None,
+ from_attributes: bool | None = None,
+ context: Any | None = None,
+ self_instance: Any | None = None,
+ ) -> bool:
+ """
+ Similar to [`validate_python()`][pydantic_core.SchemaValidator.validate_python] but returns a boolean.
+
+ Arguments match `validate_python()`. This method will not raise `ValidationError`s but will raise internal
+ errors.
+
+ Returns:
+ `True` if validation succeeds, `False` if validation fails.
+ """
+ def validate_json(
+ self,
+ input: str | bytes | bytearray,
+ *,
+ strict: bool | None = None,
+ context: Any | None = None,
+ self_instance: Any | None = None,
+ allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False,
+ ) -> Any:
+ """
+ Validate JSON data directly against the schema and return the validated Python object.
+
+ This method should be significantly faster than `validate_python(json.loads(json_data))` as it avoids the
+ need to create intermediate Python objects
+
+ It also handles constructing the correct Python type even in strict mode, where
+ `validate_python(json.loads(json_data))` would fail validation.
+
+ Arguments:
+ input: The JSON data to validate.
+ strict: Whether to validate the object in strict mode.
+ If `None`, the value of [`CoreConfig.strict`][pydantic_core.core_schema.CoreConfig] is used.
+ context: The context to use for validation, this is passed to functional validators as
+ [`info.context`][pydantic_core.core_schema.ValidationInfo.context].
+ self_instance: An instance of a model set attributes on from validation.
+ allow_partial: Whether to allow partial validation; if `True` incomplete JSON will be parsed successfully
+ and errors in the last element of sequences and mappings are ignored.
+ `'trailing-strings'` means any final unfinished JSON string is included in the result.
+
+ Raises:
+ ValidationError: If validation fails or if the JSON data is invalid.
+ Exception: Other error types maybe raised if internal errors occur.
+
+ Returns:
+ The validated Python object.
+ """
+ def validate_strings(
+ self,
+ input: _StringInput,
+ *,
+ strict: bool | None = None,
+ context: Any | None = None,
+ allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False,
+ ) -> Any:
+ """
+ Validate a string against the schema and return the validated Python object.
+
+ This is similar to `validate_json` but applies to scenarios where the input will be a string but not
+ JSON data, e.g. URL fragments, query parameters, etc.
+
+ Arguments:
+ input: The input as a string, or bytes/bytearray if `strict=False`.
+ strict: Whether to validate the object in strict mode.
+ If `None`, the value of [`CoreConfig.strict`][pydantic_core.core_schema.CoreConfig] is used.
+ context: The context to use for validation, this is passed to functional validators as
+ [`info.context`][pydantic_core.core_schema.ValidationInfo.context].
+ allow_partial: Whether to allow partial validation; if `True` errors in the last element of sequences
+ and mappings are ignored.
+ `'trailing-strings'` means any final unfinished JSON string is included in the result.
+
+ Raises:
+ ValidationError: If validation fails or if the JSON data is invalid.
+ Exception: Other error types maybe raised if internal errors occur.
+
+ Returns:
+ The validated Python object.
+ """
+ def validate_assignment(
+ self,
+ obj: Any,
+ field_name: str,
+ field_value: Any,
+ *,
+ strict: bool | None = None,
+ from_attributes: bool | None = None,
+ context: Any | None = None,
+ ) -> dict[str, Any] | tuple[dict[str, Any], dict[str, Any] | None, set[str]]:
+ """
+ Validate an assignment to a field on a model.
+
+ Arguments:
+ obj: The model instance being assigned to.
+ field_name: The name of the field to validate assignment for.
+ field_value: The value to assign to the field.
+ strict: Whether to validate the object in strict mode.
+ If `None`, the value of [`CoreConfig.strict`][pydantic_core.core_schema.CoreConfig] is used.
+ from_attributes: Whether to validate objects as inputs to models by extracting attributes.
+ If `None`, the value of [`CoreConfig.from_attributes`][pydantic_core.core_schema.CoreConfig] is used.
+ context: The context to use for validation, this is passed to functional validators as
+ [`info.context`][pydantic_core.core_schema.ValidationInfo.context].
+
+ Raises:
+ ValidationError: If validation fails.
+ Exception: Other error types maybe raised if internal errors occur.
+
+ Returns:
+ Either the model dict or a tuple of `(model_data, model_extra, fields_set)`
+ """
+ def get_default_value(self, *, strict: bool | None = None, context: Any = None) -> Some | None:
+ """
+ Get the default value for the schema, including running default value validation.
+
+ Arguments:
+ strict: Whether to validate the default value in strict mode.
+ If `None`, the value of [`CoreConfig.strict`][pydantic_core.core_schema.CoreConfig] is used.
+ context: The context to use for validation, this is passed to functional validators as
+ [`info.context`][pydantic_core.core_schema.ValidationInfo.context].
+
+ Raises:
+ ValidationError: If validation fails.
+ Exception: Other error types maybe raised if internal errors occur.
+
+ Returns:
+ `None` if the schema has no default value, otherwise a [`Some`][pydantic_core.Some] containing the default.
+ """
+
+# In reality, `bool` should be replaced by `Literal[True]` but mypy fails to correctly apply bidirectional type inference
+# (e.g. when using `{'a': {'b': True}}`).
+_IncEx: TypeAlias = set[int] | set[str] | Mapping[int, _IncEx | bool] | Mapping[str, _IncEx | bool]
+
+@final
+class SchemaSerializer:
+ """
+ `SchemaSerializer` is the Python wrapper for `pydantic-core`'s Rust serialization logic, internally it owns one
+ `CombinedSerializer` which may in turn own more `CombinedSerializer`s which make up the full schema serializer.
+ """
+
+ def __init__(self, schema: CoreSchema, config: CoreConfig | None = None) -> None:
+ """Initializes the `SchemaSerializer`.
+
+ Arguments:
+ schema: The `CoreSchema` to use for serialization.
+ config: Optionally a [`CoreConfig`][pydantic_core.core_schema.CoreConfig] to to configure serialization.
+ """
+
+ def __new__(cls, schema: CoreSchema, config: CoreConfig | None = None) -> Self: ...
+ def to_python(
+ self,
+ value: Any,
+ *,
+ mode: str | None = None,
+ include: _IncEx | None = None,
+ exclude: _IncEx | None = None,
+ by_alias: bool = True,
+ exclude_unset: bool = False,
+ exclude_defaults: bool = False,
+ exclude_none: bool = False,
+ round_trip: bool = False,
+ warnings: bool | Literal['none', 'warn', 'error'] = True,
+ fallback: Callable[[Any], Any] | None = None,
+ serialize_as_any: bool = False,
+ context: Any | None = None,
+ ) -> Any:
+ """
+ Serialize/marshal a Python object to a Python object including transforming and filtering data.
+
+ Arguments:
+ value: The Python object to serialize.
+ mode: The serialization mode to use, either `'python'` or `'json'`, defaults to `'python'`. In JSON mode,
+ all values are converted to JSON compatible types, e.g. `None`, `int`, `float`, `str`, `list`, `dict`.
+ include: A set of fields to include, if `None` all fields are included.
+ exclude: A set of fields to exclude, if `None` no fields are excluded.
+ by_alias: Whether to use the alias names of fields.
+ exclude_unset: Whether to exclude fields that are not set,
+ e.g. are not included in `__pydantic_fields_set__`.
+ exclude_defaults: Whether to exclude fields that are equal to their default value.
+ exclude_none: Whether to exclude fields that have a value of `None`.
+ round_trip: Whether to enable serialization and validation round-trip support.
+ warnings: How to handle invalid fields. False/"none" ignores them, True/"warn" logs errors,
+ "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
+ fallback: A function to call when an unknown value is encountered,
+ if `None` a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised.
+ serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
+ context: The context to use for serialization, this is passed to functional serializers as
+ [`info.context`][pydantic_core.core_schema.SerializationInfo.context].
+
+ Raises:
+ PydanticSerializationError: If serialization fails and no `fallback` function is provided.
+
+ Returns:
+ The serialized Python object.
+ """
+ def to_json(
+ self,
+ value: Any,
+ *,
+ indent: int | None = None,
+ include: _IncEx | None = None,
+ exclude: _IncEx | None = None,
+ by_alias: bool = True,
+ exclude_unset: bool = False,
+ exclude_defaults: bool = False,
+ exclude_none: bool = False,
+ round_trip: bool = False,
+ warnings: bool | Literal['none', 'warn', 'error'] = True,
+ fallback: Callable[[Any], Any] | None = None,
+ serialize_as_any: bool = False,
+ context: Any | None = None,
+ ) -> bytes:
+ """
+ Serialize a Python object to JSON including transforming and filtering data.
+
+ Arguments:
+ value: The Python object to serialize.
+ indent: If `None`, the JSON will be compact, otherwise it will be pretty-printed with the indent provided.
+ include: A set of fields to include, if `None` all fields are included.
+ exclude: A set of fields to exclude, if `None` no fields are excluded.
+ by_alias: Whether to use the alias names of fields.
+ exclude_unset: Whether to exclude fields that are not set,
+ e.g. are not included in `__pydantic_fields_set__`.
+ exclude_defaults: Whether to exclude fields that are equal to their default value.
+ exclude_none: Whether to exclude fields that have a value of `None`.
+ round_trip: Whether to enable serialization and validation round-trip support.
+ warnings: How to handle invalid fields. False/"none" ignores them, True/"warn" logs errors,
+ "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
+ fallback: A function to call when an unknown value is encountered,
+ if `None` a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised.
+ serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
+ context: The context to use for serialization, this is passed to functional serializers as
+ [`info.context`][pydantic_core.core_schema.SerializationInfo.context].
+
+ Raises:
+ PydanticSerializationError: If serialization fails and no `fallback` function is provided.
+
+ Returns:
+ JSON bytes.
+ """
+
+def to_json(
+ value: Any,
+ *,
+ indent: int | None = None,
+ include: _IncEx | None = None,
+ exclude: _IncEx | None = None,
+ by_alias: bool = True,
+ exclude_none: bool = False,
+ round_trip: bool = False,
+ timedelta_mode: Literal['iso8601', 'float'] = 'iso8601',
+ bytes_mode: Literal['utf8', 'base64', 'hex'] = 'utf8',
+ inf_nan_mode: Literal['null', 'constants', 'strings'] = 'constants',
+ serialize_unknown: bool = False,
+ fallback: Callable[[Any], Any] | None = None,
+ serialize_as_any: bool = False,
+ context: Any | None = None,
+) -> bytes:
+ """
+ Serialize a Python object to JSON including transforming and filtering data.
+
+ This is effectively a standalone version of [`SchemaSerializer.to_json`][pydantic_core.SchemaSerializer.to_json].
+
+ Arguments:
+ value: The Python object to serialize.
+ indent: If `None`, the JSON will be compact, otherwise it will be pretty-printed with the indent provided.
+ include: A set of fields to include, if `None` all fields are included.
+ exclude: A set of fields to exclude, if `None` no fields are excluded.
+ by_alias: Whether to use the alias names of fields.
+ exclude_none: Whether to exclude fields that have a value of `None`.
+ round_trip: Whether to enable serialization and validation round-trip support.
+ timedelta_mode: How to serialize `timedelta` objects, either `'iso8601'` or `'float'`.
+ bytes_mode: How to serialize `bytes` objects, either `'utf8'`, `'base64'`, or `'hex'`.
+ inf_nan_mode: How to serialize `Infinity`, `-Infinity` and `NaN` values, either `'null'`, `'constants'`, or `'strings'`.
+ serialize_unknown: Attempt to serialize unknown types, `str(value)` will be used, if that fails
+ `""` will be used.
+ fallback: A function to call when an unknown value is encountered,
+ if `None` a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised.
+ serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
+ context: The context to use for serialization, this is passed to functional serializers as
+ [`info.context`][pydantic_core.core_schema.SerializationInfo.context].
+
+ Raises:
+ PydanticSerializationError: If serialization fails and no `fallback` function is provided.
+
+ Returns:
+ JSON bytes.
+ """
+
+def from_json(
+ data: str | bytes | bytearray,
+ *,
+ allow_inf_nan: bool = True,
+ cache_strings: bool | Literal['all', 'keys', 'none'] = True,
+ allow_partial: bool | Literal['off', 'on', 'trailing-strings'] = False,
+) -> Any:
+ """
+ Deserialize JSON data to a Python object.
+
+ This is effectively a faster version of `json.loads()`, with some extra functionality.
+
+ Arguments:
+ data: The JSON data to deserialize.
+ allow_inf_nan: Whether to allow `Infinity`, `-Infinity` and `NaN` values as `json.loads()` does by default.
+ cache_strings: Whether to cache strings to avoid constructing new Python objects,
+ this should have a significant impact on performance while increasing memory usage slightly,
+ `all/True` means cache all strings, `keys` means cache only dict keys, `none/False` means no caching.
+ allow_partial: Whether to allow partial deserialization, if `True` JSON data is returned if the end of the
+ input is reached before the full object is deserialized, e.g. `["aa", "bb", "c` would return `['aa', 'bb']`.
+ `'trailing-strings'` means any final unfinished JSON string is included in the result.
+
+ Raises:
+ ValueError: If deserialization fails.
+
+ Returns:
+ The deserialized Python object.
+ """
+
+def to_jsonable_python(
+ value: Any,
+ *,
+ include: _IncEx | None = None,
+ exclude: _IncEx | None = None,
+ by_alias: bool = True,
+ exclude_none: bool = False,
+ round_trip: bool = False,
+ timedelta_mode: Literal['iso8601', 'float'] = 'iso8601',
+ bytes_mode: Literal['utf8', 'base64', 'hex'] = 'utf8',
+ inf_nan_mode: Literal['null', 'constants', 'strings'] = 'constants',
+ serialize_unknown: bool = False,
+ fallback: Callable[[Any], Any] | None = None,
+ serialize_as_any: bool = False,
+ context: Any | None = None,
+) -> Any:
+ """
+ Serialize/marshal a Python object to a JSON-serializable Python object including transforming and filtering data.
+
+ This is effectively a standalone version of
+ [`SchemaSerializer.to_python(mode='json')`][pydantic_core.SchemaSerializer.to_python].
+
+ Args:
+ value: The Python object to serialize.
+ include: A set of fields to include, if `None` all fields are included.
+ exclude: A set of fields to exclude, if `None` no fields are excluded.
+ by_alias: Whether to use the alias names of fields.
+ exclude_none: Whether to exclude fields that have a value of `None`.
+ round_trip: Whether to enable serialization and validation round-trip support.
+ timedelta_mode: How to serialize `timedelta` objects, either `'iso8601'` or `'float'`.
+ bytes_mode: How to serialize `bytes` objects, either `'utf8'`, `'base64'`, or `'hex'`.
+ inf_nan_mode: How to serialize `Infinity`, `-Infinity` and `NaN` values, either `'null'`, `'constants'`, or `'strings'`.
+ serialize_unknown: Attempt to serialize unknown types, `str(value)` will be used, if that fails
+ `""` will be used.
+ fallback: A function to call when an unknown value is encountered,
+ if `None` a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised.
+ serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
+ context: The context to use for serialization, this is passed to functional serializers as
+ [`info.context`][pydantic_core.core_schema.SerializationInfo.context].
+
+ Raises:
+ PydanticSerializationError: If serialization fails and no `fallback` function is provided.
+
+ Returns:
+ The serialized Python object.
+ """
+
+class Url(SupportsAllComparisons):
+ """
+ A URL type, internal logic uses the [url rust crate](https://docs.rs/url/latest/url/) originally developed
+ by Mozilla.
+ """
+
+ def __init__(self, url: str) -> None: ...
+ def __new__(cls, url: str) -> Self: ...
+ @property
+ def scheme(self) -> str: ...
+ @property
+ def username(self) -> str | None: ...
+ @property
+ def password(self) -> str | None: ...
+ @property
+ def host(self) -> str | None: ...
+ def unicode_host(self) -> str | None: ...
+ @property
+ def port(self) -> int | None: ...
+ @property
+ def path(self) -> str | None: ...
+ @property
+ def query(self) -> str | None: ...
+ def query_params(self) -> list[tuple[str, str]]: ...
+ @property
+ def fragment(self) -> str | None: ...
+ def unicode_string(self) -> str: ...
+ def __repr__(self) -> str: ...
+ def __str__(self) -> str: ...
+ def __deepcopy__(self, memo: dict) -> str: ...
+ @classmethod
+ def build(
+ cls,
+ *,
+ scheme: str,
+ username: str | None = None,
+ password: str | None = None,
+ host: str,
+ port: int | None = None,
+ path: str | None = None,
+ query: str | None = None,
+ fragment: str | None = None,
+ ) -> Self: ...
+
+class MultiHostUrl(SupportsAllComparisons):
+ """
+ A URL type with support for multiple hosts, as used by some databases for DSNs, e.g. `https://foo.com,bar.com/path`.
+
+ Internal URL logic uses the [url rust crate](https://docs.rs/url/latest/url/) originally developed
+ by Mozilla.
+ """
+
+ def __init__(self, url: str) -> None: ...
+ def __new__(cls, url: str) -> Self: ...
+ @property
+ def scheme(self) -> str: ...
+ @property
+ def path(self) -> str | None: ...
+ @property
+ def query(self) -> str | None: ...
+ def query_params(self) -> list[tuple[str, str]]: ...
+ @property
+ def fragment(self) -> str | None: ...
+ def hosts(self) -> list[MultiHostHost]: ...
+ def unicode_string(self) -> str: ...
+ def __repr__(self) -> str: ...
+ def __str__(self) -> str: ...
+ def __deepcopy__(self, memo: dict) -> Self: ...
+ @classmethod
+ def build(
+ cls,
+ *,
+ scheme: str,
+ hosts: list[MultiHostHost] | None = None,
+ username: str | None = None,
+ password: str | None = None,
+ host: str | None = None,
+ port: int | None = None,
+ path: str | None = None,
+ query: str | None = None,
+ fragment: str | None = None,
+ ) -> Self: ...
+
+@final
+class SchemaError(Exception):
+ """
+ Information about errors that occur while building a [`SchemaValidator`][pydantic_core.SchemaValidator]
+ or [`SchemaSerializer`][pydantic_core.SchemaSerializer].
+ """
+
+ def error_count(self) -> int:
+ """
+ Returns:
+ The number of errors in the schema.
+ """
+ def errors(self) -> list[ErrorDetails]:
+ """
+ Returns:
+ A list of [`ErrorDetails`][pydantic_core.ErrorDetails] for each error in the schema.
+ """
+
+class ValidationError(ValueError):
+ """
+ `ValidationError` is the exception raised by `pydantic-core` when validation fails, it contains a list of errors
+ which detail why validation failed.
+ """
+ @classmethod
+ def from_exception_data(
+ cls,
+ title: str,
+ line_errors: list[InitErrorDetails],
+ input_type: Literal['python', 'json'] = 'python',
+ hide_input: bool = False,
+ ) -> Self:
+ """
+ Python constructor for a Validation Error.
+
+ The API for constructing validation errors will probably change in the future,
+ hence the static method rather than `__init__`.
+
+ Arguments:
+ title: The title of the error, as used in the heading of `str(validation_error)`
+ line_errors: A list of [`InitErrorDetails`][pydantic_core.InitErrorDetails] which contain information
+ about errors that occurred during validation.
+ input_type: Whether the error is for a Python object or JSON.
+ hide_input: Whether to hide the input value in the error message.
+ """
+ @property
+ def title(self) -> str:
+ """
+ The title of the error, as used in the heading of `str(validation_error)`.
+ """
+ def error_count(self) -> int:
+ """
+ Returns:
+ The number of errors in the validation error.
+ """
+ def errors(
+ self, *, include_url: bool = True, include_context: bool = True, include_input: bool = True
+ ) -> list[ErrorDetails]:
+ """
+ Details about each error in the validation error.
+
+ Args:
+ include_url: Whether to include a URL to documentation on the error each error.
+ include_context: Whether to include the context of each error.
+ include_input: Whether to include the input value of each error.
+
+ Returns:
+ A list of [`ErrorDetails`][pydantic_core.ErrorDetails] for each error in the validation error.
+ """
+ def json(
+ self,
+ *,
+ indent: int | None = None,
+ include_url: bool = True,
+ include_context: bool = True,
+ include_input: bool = True,
+ ) -> str:
+ """
+ Same as [`errors()`][pydantic_core.ValidationError.errors] but returns a JSON string.
+
+ Args:
+ indent: The number of spaces to indent the JSON by, or `None` for no indentation - compact JSON.
+ include_url: Whether to include a URL to documentation on the error each error.
+ include_context: Whether to include the context of each error.
+ include_input: Whether to include the input value of each error.
+
+ Returns:
+ a JSON string.
+ """
+
+ def __repr__(self) -> str:
+ """
+ A string representation of the validation error.
+
+ Whether or not documentation URLs are included in the repr is controlled by the
+ environment variable `PYDANTIC_ERRORS_INCLUDE_URL` being set to `1` or
+ `true`; by default, URLs are shown.
+
+ Due to implementation details, this environment variable can only be set once,
+ before the first validation error is created.
+ """
+
+class PydanticCustomError(ValueError):
+ """A custom exception providing flexible error handling for Pydantic validators.
+
+ You can raise this error in custom validators when you'd like flexibility in regards to the error type, message, and context.
+
+ Example:
+ ```py
+ from pydantic_core import PydanticCustomError
+
+ def custom_validator(v) -> None:
+ if v <= 10:
+ raise PydanticCustomError('custom_value_error', 'Value must be greater than {value}', {'value': 10, 'extra_context': 'extra_data'})
+ return v
+ ```
+ """
+
+ def __init__(
+ self, error_type: LiteralString, message_template: LiteralString, context: dict[str, Any] | None = None
+ ) -> None:
+ """Initializes the `PydanticCustomError`.
+
+ Arguments:
+ error_type: The error type.
+ message_template: The message template.
+ context: The data to inject into the message template.
+ """
+
+ def __new__(
+ cls, error_type: LiteralString, message_template: LiteralString, context: dict[str, Any] | None = None
+ ) -> Self: ...
+ @property
+ def context(self) -> dict[str, Any] | None:
+ """Values which are required to render the error message, and could hence be useful in passing error data forward."""
+
+ @property
+ def type(self) -> str:
+ """The error type associated with the error. For consistency with Pydantic, this is typically a snake_case string."""
+
+ @property
+ def message_template(self) -> str:
+ """The message template associated with the error. This is a string that can be formatted with context variables in `{curly_braces}`."""
+
+ def message(self) -> str:
+ """The formatted message associated with the error. This presents as the message template with context variables appropriately injected."""
+
+@final
+class PydanticKnownError(ValueError):
+ """A helper class for raising exceptions that mimic Pydantic's built-in exceptions, with more flexibility in regards to context.
+
+ Unlike [`PydanticCustomError`][pydantic_core.PydanticCustomError], the `error_type` argument must be a known `ErrorType`.
+
+ Example:
+ ```py
+ from pydantic_core import PydanticKnownError
+
+ def custom_validator(v) -> None:
+ if v <= 10:
+ raise PydanticKnownError(error_type='greater_than', context={'gt': 10})
+ return v
+ ```
+ """
+
+ def __init__(self, error_type: ErrorType, context: dict[str, Any] | None = None) -> None:
+ """Initializes the `PydanticKnownError`.
+
+ Arguments:
+ error_type: The error type.
+ context: The data to inject into the message template.
+ """
+
+ def __new__(cls, error_type: ErrorType, context: dict[str, Any] | None = None) -> Self: ...
+ @property
+ def context(self) -> dict[str, Any] | None:
+ """Values which are required to render the error message, and could hence be useful in passing error data forward."""
+
+ @property
+ def type(self) -> ErrorType:
+ """The type of the error."""
+
+ @property
+ def message_template(self) -> str:
+ """The message template associated with the provided error type. This is a string that can be formatted with context variables in `{curly_braces}`."""
+
+ def message(self) -> str:
+ """The formatted message associated with the error. This presents as the message template with context variables appropriately injected."""
+
+@final
+class PydanticOmit(Exception):
+ """An exception to signal that a field should be omitted from a generated result.
+
+ This could span from omitting a field from a JSON Schema to omitting a field from a serialized result.
+ Upcoming: more robust support for using PydanticOmit in custom serializers is still in development.
+ Right now, this is primarily used in the JSON Schema generation process.
+
+ Example:
+ ```py
+ from typing import Callable
+
+ from pydantic_core import PydanticOmit
+
+ from pydantic import BaseModel
+ from pydantic.json_schema import GenerateJsonSchema, JsonSchemaValue
+
+
+ class MyGenerateJsonSchema(GenerateJsonSchema):
+ def handle_invalid_for_json_schema(self, schema, error_info) -> JsonSchemaValue:
+ raise PydanticOmit
+
+
+ class Predicate(BaseModel):
+ name: str = 'no-op'
+ func: Callable = lambda x: x
+
+
+ instance_example = Predicate()
+
+ validation_schema = instance_example.model_json_schema(schema_generator=MyGenerateJsonSchema, mode='validation')
+ print(validation_schema)
+ '''
+ {'properties': {'name': {'default': 'no-op', 'title': 'Name', 'type': 'string'}}, 'title': 'Predicate', 'type': 'object'}
+ '''
+ ```
+
+ For a more in depth example / explanation, see the [customizing JSON schema](../concepts/json_schema.md#customizing-the-json-schema-generation-process) docs.
+ """
+
+ def __new__(cls) -> Self: ...
+
+@final
+class PydanticUseDefault(Exception):
+ """An exception to signal that standard validation either failed or should be skipped, and the default value should be used instead.
+
+ This warning can be raised in custom valiation functions to redirect the flow of validation.
+
+ Example:
+ ```py
+ from pydantic_core import PydanticUseDefault
+ from datetime import datetime
+ from pydantic import BaseModel, field_validator
+
+
+ class Event(BaseModel):
+ name: str = 'meeting'
+ time: datetime
+
+ @field_validator('name', mode='plain')
+ def name_must_be_present(cls, v) -> str:
+ if not v or not isinstance(v, str):
+ raise PydanticUseDefault()
+ return v
+
+
+ event1 = Event(name='party', time=datetime(2024, 1, 1, 12, 0, 0))
+ print(repr(event1))
+ # > Event(name='party', time=datetime.datetime(2024, 1, 1, 12, 0))
+ event2 = Event(time=datetime(2024, 1, 1, 12, 0, 0))
+ print(repr(event2))
+ # > Event(name='meeting', time=datetime.datetime(2024, 1, 1, 12, 0))
+ ```
+
+ For an additional example, seethe [validating partial json data](../concepts/json.md#partial-json-parsing) section of the Pydantic documentation.
+ """
+
+ def __new__(cls) -> Self: ...
+
+@final
+class PydanticSerializationError(ValueError):
+ """An error raised when an issue occurs during serialization.
+
+ In custom serializers, this error can be used to indicate that serialization has failed.
+ """
+
+ def __init__(self, message: str) -> None:
+ """Initializes the `PydanticSerializationError`.
+
+ Arguments:
+ message: The message associated with the error.
+ """
+
+ def __new__(cls, message: str) -> Self: ...
+
+@final
+class PydanticSerializationUnexpectedValue(ValueError):
+ """An error raised when an unexpected value is encountered during serialization.
+
+ This error is often caught and coerced into a warning, as `pydantic-core` generally makes a best attempt
+ at serializing values, in contrast with validation where errors are eagerly raised.
+
+ Example:
+ ```py
+ from pydantic import BaseModel, field_serializer
+ from pydantic_core import PydanticSerializationUnexpectedValue
+
+ class BasicPoint(BaseModel):
+ x: int
+ y: int
+
+ @field_serializer('*')
+ def serialize(self, v):
+ if not isinstance(v, int):
+ raise PydanticSerializationUnexpectedValue(f'Expected type `int`, got {type(v)} with value {v}')
+ return v
+
+ point = BasicPoint(x=1, y=2)
+ # some sort of mutation
+ point.x = 'a'
+
+ print(point.model_dump())
+ '''
+ UserWarning: Pydantic serializer warnings:
+ PydanticSerializationUnexpectedValue(Expected type `int`, got with value a)
+ return self.__pydantic_serializer__.to_python(
+ {'x': 'a', 'y': 2}
+ '''
+ ```
+
+ This is often used internally in `pydantic-core` when unexpected types are encountered during serialization,
+ but it can also be used by users in custom serializers, as seen above.
+ """
+
+ def __init__(self, message: str) -> None:
+ """Initializes the `PydanticSerializationUnexpectedValue`.
+
+ Arguments:
+ message: The message associated with the unexpected value.
+ """
+
+ def __new__(cls, message: str | None = None) -> Self: ...
+
+@final
+class ArgsKwargs:
+ """A construct used to store arguments and keyword arguments for a function call.
+
+ This data structure is generally used to store information for core schemas associated with functions (like in an arguments schema).
+ This data structure is also currently used for some validation against dataclasses.
+
+ Example:
+ ```py
+ from pydantic.dataclasses import dataclass
+ from pydantic import model_validator
+
+
+ @dataclass
+ class Model:
+ a: int
+ b: int
+
+ @model_validator(mode="before")
+ @classmethod
+ def no_op_validator(cls, values):
+ print(values)
+ return values
+
+ Model(1, b=2)
+ #> ArgsKwargs((1,), {"b": 2})
+
+ Model(1, 2)
+ #> ArgsKwargs((1, 2), {})
+
+ Model(a=1, b=2)
+ #> ArgsKwargs((), {"a": 1, "b": 2})
+ ```
+ """
+
+ def __init__(self, args: tuple[Any, ...], kwargs: dict[str, Any] | None = None) -> None:
+ """Initializes the `ArgsKwargs`.
+
+ Arguments:
+ args: The arguments (inherently ordered) for a function call.
+ kwargs: The keyword arguments for a function call
+ """
+
+ def __new__(cls, args: tuple[Any, ...], kwargs: dict[str, Any] | None = None) -> Self: ...
+ @property
+ def args(self) -> tuple[Any, ...]:
+ """The arguments (inherently ordered) for a function call."""
+
+ @property
+ def kwargs(self) -> dict[str, Any] | None:
+ """The keyword arguments for a function call."""
+
+@final
+class PydanticUndefinedType:
+ """A type used as a sentinel for undefined values."""
+
+ def __copy__(self) -> Self: ...
+ def __deepcopy__(self, memo: Any) -> Self: ...
+
+PydanticUndefined: PydanticUndefinedType
+
+def list_all_errors() -> list[ErrorTypeInfo]:
+ """
+ Get information about all built-in errors.
+
+ Returns:
+ A list of `ErrorTypeInfo` typed dicts.
+ """
+@final
+class TzInfo(datetime.tzinfo):
+ """An `pydantic-core` implementation of the abstract [`datetime.tzinfo`] class."""
+
+ # Docstrings for attributes sourced from the abstract base class, [`datetime.tzinfo`](https://docs.python.org/3/library/datetime.html#datetime.tzinfo).
+
+ def tzname(self, dt: datetime.datetime | None) -> str | None:
+ """Return the time zone name corresponding to the [`datetime`][datetime.datetime] object _dt_, as a string.
+
+ For more info, see [`tzinfo.tzname`][datetime.tzinfo.tzname].
+ """
+
+ def utcoffset(self, dt: datetime.datetime | None) -> datetime.timedelta | None:
+ """Return offset of local time from UTC, as a [`timedelta`][datetime.timedelta] object that is positive east of UTC. If local time is west of UTC, this should be negative.
+
+ More info can be found at [`tzinfo.utcoffset`][datetime.tzinfo.utcoffset].
+ """
+
+ def dst(self, dt: datetime.datetime | None) -> datetime.timedelta | None:
+ """Return the daylight saving time (DST) adjustment, as a [`timedelta`][datetime.timedelta] object or `None` if DST information isn’t known.
+
+ More info can be found at[`tzinfo.dst`][datetime.tzinfo.dst]."""
+
+ def fromutc(self, dt: datetime.datetime) -> datetime.datetime:
+ """Adjust the date and time data associated datetime object _dt_, returning an equivalent datetime in self’s local time.
+
+ More info can be found at [`tzinfo.fromutc`][datetime.tzinfo.fromutc]."""
+
+ def __deepcopy__(self, _memo: dict[Any, Any]) -> TzInfo: ...
+
+def validate_core_schema(schema: CoreSchema, *, strict: bool | None = None) -> CoreSchema:
+ """Validate a core schema.
+
+ This currently uses lax mode for validation (i.e. will coerce strings to dates and such)
+ but may use strict mode in the future.
+ We may also remove this function altogether, do not rely on it being present if you are
+ using pydantic-core directly.
+ """
diff --git a/evalkit_cambrian/lib/python3.10/site-packages/pydantic_core/py.typed b/evalkit_cambrian/lib/python3.10/site-packages/pydantic_core/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_compositeexplicitautograd_dispatch.h
new file mode 100644
index 0000000000000000000000000000000000000000..2540f4df92e1ba255b4ac36cce81a2e58500d4cc
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_compositeexplicitautograd_dispatch.h
@@ -0,0 +1,25 @@
+#pragma once
+// @generated by torchgen/gen.py from DispatchKeyFunction.h
+
+// NB: The implementing C++ file is RegisterDispatchKey.cpp
+
+// The only #includes we need are for custom classes that have defaults in the C++ API
+#include
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+
+namespace compositeexplicitautograd {
+
+TORCH_API ::std::tuple<::std::vector,at::Tensor> _amp_foreach_non_finite_check_and_unscale(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale);
+TORCH_API void _amp_foreach_non_finite_check_and_unscale_out(at::TensorList out, at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale);
+TORCH_API void _amp_foreach_non_finite_check_and_unscale_outf(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out);
+
+} // namespace compositeexplicitautograd
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2c_cpu_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2c_cpu_dispatch.h
new file mode 100644
index 0000000000000000000000000000000000000000..1a39763975e6672474b93d246f5007a9527cc819
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_c2c_cpu_dispatch.h
@@ -0,0 +1,28 @@
+#pragma once
+// @generated by torchgen/gen.py from DispatchKeyFunction.h
+
+// NB: The implementing C++ file is RegisterDispatchKey.cpp
+
+// The only #includes we need are for custom classes that have defaults in the C++ API
+#include
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+
+namespace cpu {
+
+TORCH_API at::Tensor _fft_c2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward);
+TORCH_API at::Tensor _fft_c2c_symint(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward);
+TORCH_API at::Tensor & _fft_c2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward);
+TORCH_API at::Tensor & _fft_c2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out);
+TORCH_API at::Tensor & _fft_c2c_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward);
+TORCH_API at::Tensor & _fft_c2c_symint_outf(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward, at::Tensor & out);
+
+} // namespace cpu
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_frac_cuda_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_frac_cuda_dispatch.h
new file mode 100644
index 0000000000000000000000000000000000000000..be2715382e60a5ef9e942bdf1932e524d7276299
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_frac_cuda_dispatch.h
@@ -0,0 +1,24 @@
+#pragma once
+// @generated by torchgen/gen.py from DispatchKeyFunction.h
+
+// NB: The implementing C++ file is RegisterDispatchKey.cpp
+
+// The only #includes we need are for custom classes that have defaults in the C++ API
+#include
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+
+namespace cuda {
+
+TORCH_API ::std::vector _foreach_frac(at::TensorList self);
+TORCH_API void _foreach_frac_(at::TensorList self);
+
+} // namespace cuda
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_dropout_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_dropout_native.h
new file mode 100644
index 0000000000000000000000000000000000000000..b123aa0b0e05e64dd71ad2101cac542433dbdae8
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_dropout_native.h
@@ -0,0 +1,22 @@
+#pragma once
+
+// @generated by torchgen/gen.py from NativeFunction.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+namespace at {
+namespace native {
+TORCH_API ::std::tuple _fused_dropout_out(const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out0, at::Tensor & out1);
+TORCH_API ::std::tuple fused_dropout_cuda(const at::Tensor & self, double p, ::std::optional generator=::std::nullopt);
+} // namespace native
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_meta_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_meta_dispatch.h
new file mode 100644
index 0000000000000000000000000000000000000000..2fd2f3131bbf8abf00646c51b096348be35fe58c
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_meta_dispatch.h
@@ -0,0 +1,25 @@
+#pragma once
+// @generated by torchgen/gen.py from DispatchKeyFunction.h
+
+// NB: The implementing C++ file is RegisterDispatchKey.cpp
+
+// The only #includes we need are for custom classes that have defaults in the C++ API
+#include
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+
+namespace meta {
+
+TORCH_API at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype);
+TORCH_API at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype);
+TORCH_API at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out);
+
+} // namespace meta
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_native.h
new file mode 100644
index 0000000000000000000000000000000000000000..2a6f90dd87402c9c39151641e0e75aaaaa433f29
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_make_per_channel_quantized_tensor_native.h
@@ -0,0 +1,23 @@
+#pragma once
+
+// @generated by torchgen/gen.py from NativeFunction.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+namespace at {
+namespace native {
+TORCH_API at::Tensor & _make_per_channel_quantized_tensor_out(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, at::Tensor & out);
+TORCH_API at::Tensor make_per_channel_quantized_tensor_cpu(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis);
+TORCH_API at::Tensor make_per_channel_quantized_tensor_cuda(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis);
+} // namespace native
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged_copy_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged_copy_ops.h
new file mode 100644
index 0000000000000000000000000000000000000000..846b99a15a4d9bbea12bb028ed17122e5e34a3e3
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_view_from_jagged_copy_ops.h
@@ -0,0 +1,39 @@
+#pragma once
+
+// @generated by torchgen/gen.py from Operator.h
+
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+namespace _ops {
+
+
+struct TORCH_API _nested_view_from_jagged_copy {
+ using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, const ::std::optional &, const ::std::optional &);
+ using ptr_schema = schema*;
+ // See Note [static constexpr char* members for windows NVCC]
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_view_from_jagged_copy")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_view_from_jagged_copy(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None) -> Tensor")
+ static at::Tensor call(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional & lengths, int64_t ragged_idx, const ::std::optional & min_seqlen, const ::std::optional & max_seqlen);
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional & lengths, int64_t ragged_idx, const ::std::optional & min_seqlen, const ::std::optional & max_seqlen);
+};
+
+struct TORCH_API _nested_view_from_jagged_copy_out {
+ using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, const ::std::optional &, int64_t, const ::std::optional &, const ::std::optional &, at::Tensor &);
+ using ptr_schema = schema*;
+ // See Note [static constexpr char* members for windows NVCC]
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_nested_view_from_jagged_copy")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_nested_view_from_jagged_copy.out(Tensor self, Tensor offsets, Tensor dummy, Tensor? lengths=None, int ragged_idx=1, Tensor? min_seqlen=None, Tensor? max_seqlen=None, *, Tensor(a!) out) -> Tensor(a!)")
+ static at::Tensor & call(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional & lengths, int64_t ragged_idx, const ::std::optional & min_seqlen, const ::std::optional & max_seqlen, at::Tensor & out);
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const ::std::optional & lengths, int64_t ragged_idx, const ::std::optional & min_seqlen, const ::std::optional & max_seqlen, at::Tensor & out);
+};
+
+}} // namespace at::_ops
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_print_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_print_native.h
new file mode 100644
index 0000000000000000000000000000000000000000..c956ce8abf198faa36c4d2427e9de99468b9db08
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_print_native.h
@@ -0,0 +1,21 @@
+#pragma once
+
+// @generated by torchgen/gen.py from NativeFunction.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+namespace at {
+namespace native {
+TORCH_API void _print(c10::string_view s);
+} // namespace native
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_with_dims.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_with_dims.h
new file mode 100644
index 0000000000000000000000000000000000000000..795bd1faae2dcad68ab0237901c6b77a880be4fc
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_with_dims.h
@@ -0,0 +1,34 @@
+#pragma once
+
+// @generated by torchgen/gen.py from Function.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+
+#include
+
+namespace at {
+
+
+// aten::_sparse_compressed_tensor_with_dims(int nnz, int dense_dim, int[] size, int[] blocksize, ScalarType index_dtype, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
+inline at::Tensor _sparse_compressed_tensor_with_dims(int64_t nnz, int64_t dense_dim, at::IntArrayRef size, at::IntArrayRef blocksize, at::ScalarType index_dtype, at::TensorOptions options) {
+ return at::_ops::_sparse_compressed_tensor_with_dims::call(nnz, dense_dim, size, blocksize, index_dtype, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
+}
+// aten::_sparse_compressed_tensor_with_dims(int nnz, int dense_dim, int[] size, int[] blocksize, ScalarType index_dtype, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor
+inline at::Tensor _sparse_compressed_tensor_with_dims(int64_t nnz, int64_t dense_dim, at::IntArrayRef size, at::IntArrayRef blocksize, at::ScalarType index_dtype, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) {
+ return at::_ops::_sparse_compressed_tensor_with_dims::call(nnz, dense_dim, size, blocksize, index_dtype, dtype, layout, device, pin_memory);
+}
+
+}
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h
new file mode 100644
index 0000000000000000000000000000000000000000..01aab769ee9e990baa7720b039e7373a362177fa
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_log_softmax_backward_data_compositeexplicitautograd_dispatch.h
@@ -0,0 +1,24 @@
+#pragma once
+// @generated by torchgen/gen.py from DispatchKeyFunction.h
+
+// NB: The implementing C++ file is RegisterDispatchKey.cpp
+
+// The only #includes we need are for custom classes that have defaults in the C++ API
+#include
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+
+namespace compositeexplicitautograd {
+
+TORCH_API at::Tensor & _sparse_log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self);
+TORCH_API at::Tensor & _sparse_log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self, at::Tensor & out);
+
+} // namespace compositeexplicitautograd
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default.h
new file mode 100644
index 0000000000000000000000000000000000000000..b66d90b21246ccbbc23de03cb37da576ae1d7e2c
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_test_string_default.h
@@ -0,0 +1,30 @@
+#pragma once
+
+// @generated by torchgen/gen.py from Function.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+
+#include
+
+namespace at {
+
+
+// aten::_test_string_default(Tensor dummy, str a="\"'\\", str b='"\'\\') -> Tensor
+inline at::Tensor _test_string_default(const at::Tensor & dummy, c10::string_view a="\"'\\", c10::string_view b="\"'\\") {
+ return at::_ops::_test_string_default::call(dummy, a, b);
+}
+
+}
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_ops.h
new file mode 100644
index 0000000000000000000000000000000000000000..2bf756c622fa3c86d763e44787735626268da822
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_unique_ops.h
@@ -0,0 +1,39 @@
+#pragma once
+
+// @generated by torchgen/gen.py from Operator.h
+
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+namespace _ops {
+
+
+struct TORCH_API _unique {
+ using schema = ::std::tuple (const at::Tensor &, bool, bool);
+ using ptr_schema = schema*;
+ // See Note [static constexpr char* members for windows NVCC]
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unique")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unique(Tensor self, bool sorted=True, bool return_inverse=False) -> (Tensor, Tensor)")
+ static ::std::tuple call(const at::Tensor & self, bool sorted, bool return_inverse);
+ static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse);
+};
+
+struct TORCH_API _unique_out {
+ using schema = ::std::tuple (const at::Tensor &, bool, bool, at::Tensor &, at::Tensor &);
+ using ptr_schema = schema*;
+ // See Note [static constexpr char* members for windows NVCC]
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_unique")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_unique.out(Tensor self, bool sorted=True, bool return_inverse=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))")
+ static ::std::tuple call(const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1);
+ static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, bool sorted, bool return_inverse, at::Tensor & out0, at::Tensor & out1);
+};
+
+}} // namespace at::_ops
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h
new file mode 100644
index 0000000000000000000000000000000000000000..ffc09c3de26d8cf26ef0a19ad3927a930c3f792a
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_csc_tensor_args.h
@@ -0,0 +1,30 @@
+#pragma once
+
+// @generated by torchgen/gen.py from Function.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+
+#include
+
+namespace at {
+
+
+// aten::_validate_sparse_csc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> ()
+inline void _validate_sparse_csc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
+ return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size);
+}
+
+}
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/acos_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/acos_ops.h
new file mode 100644
index 0000000000000000000000000000000000000000..986f166cc893c0a15f85b9397b5393d778bf480c
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/acos_ops.h
@@ -0,0 +1,50 @@
+#pragma once
+
+// @generated by torchgen/gen.py from Operator.h
+
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+namespace _ops {
+
+
+struct TORCH_API acos {
+ using schema = at::Tensor (const at::Tensor &);
+ using ptr_schema = schema*;
+ // See Note [static constexpr char* members for windows NVCC]
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::acos")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "acos(Tensor self) -> Tensor")
+ static at::Tensor call(const at::Tensor & self);
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
+};
+
+struct TORCH_API acos_ {
+ using schema = at::Tensor & (at::Tensor &);
+ using ptr_schema = schema*;
+ // See Note [static constexpr char* members for windows NVCC]
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::acos_")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "acos_(Tensor(a!) self) -> Tensor(a!)")
+ static at::Tensor & call(at::Tensor & self);
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
+};
+
+struct TORCH_API acos_out {
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
+ using ptr_schema = schema*;
+ // See Note [static constexpr char* members for windows NVCC]
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::acos")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
+};
+
+}} // namespace at::_ops
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_native.h
new file mode 100644
index 0000000000000000000000000000000000000000..674ad5fa72774174cb83781f5b458b9b2fbd8b0e
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/adaptive_max_pool3d_backward_native.h
@@ -0,0 +1,26 @@
+#pragma once
+
+// @generated by torchgen/gen.py from NativeFunction.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace at {
+namespace native {
+struct TORCH_API structured_adaptive_max_pool3d_backward_out_cpu : public at::meta::structured_adaptive_max_pool3d_backward {
+void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, const at::Tensor & grad_input);
+};
+struct TORCH_API structured_adaptive_max_pool3d_backward_out_cuda : public at::meta::structured_adaptive_max_pool3d_backward {
+void impl(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices, const at::Tensor & grad_input);
+};
+} // namespace native
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/arccos.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/arccos.h
new file mode 100644
index 0000000000000000000000000000000000000000..c7bb87a17fdcda558cb013c63c3824ff1124e526
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/arccos.h
@@ -0,0 +1,44 @@
+#pragma once
+
+// @generated by torchgen/gen.py from Function.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+
+#include
+
+namespace at {
+
+
+// aten::arccos(Tensor self) -> Tensor
+inline at::Tensor arccos(const at::Tensor & self) {
+ return at::_ops::arccos::call(self);
+}
+
+// aten::arccos_(Tensor(a!) self) -> Tensor(a!)
+inline at::Tensor & arccos_(at::Tensor & self) {
+ return at::_ops::arccos_::call(self);
+}
+
+// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+inline at::Tensor & arccos_out(at::Tensor & out, const at::Tensor & self) {
+ return at::_ops::arccos_out::call(self, out);
+}
+// aten::arccos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)
+inline at::Tensor & arccos_outf(const at::Tensor & self, at::Tensor & out) {
+ return at::_ops::arccos_out::call(self, out);
+}
+
+}
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/arccos_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/arccos_native.h
new file mode 100644
index 0000000000000000000000000000000000000000..4782be8fd671ecf48e9409bf7865f67ad1110837
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/arccos_native.h
@@ -0,0 +1,23 @@
+#pragma once
+
+// @generated by torchgen/gen.py from NativeFunction.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+namespace at {
+namespace native {
+TORCH_API at::Tensor arccos(const at::Tensor & self);
+TORCH_API at::Tensor & arccos_out(const at::Tensor & self, at::Tensor & out);
+TORCH_API at::Tensor & arccos_(at::Tensor & self);
+} // namespace native
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_native.h
new file mode 100644
index 0000000000000000000000000000000000000000..6aa7c3b7db566cd77b44bfa15d56fcbb83cdfbee
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_native.h
@@ -0,0 +1,29 @@
+#pragma once
+
+// @generated by torchgen/gen.py from NativeFunction.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+namespace at {
+namespace native {
+struct TORCH_API structured_ceil_out : public at::meta::structured_ceil {
+void impl(const at::Tensor & self, const at::Tensor & out);
+};
+TORCH_API at::Tensor ceil_sparse(const at::Tensor & self);
+TORCH_API at::Tensor & ceil_sparse_out(const at::Tensor & self, at::Tensor & out);
+TORCH_API at::Tensor & ceil_sparse_(at::Tensor & self);
+TORCH_API at::Tensor ceil_sparse_csr(const at::Tensor & self);
+TORCH_API at::Tensor & ceil_sparse_csr_out(const at::Tensor & self, at::Tensor & out);
+TORCH_API at::Tensor & ceil_sparse_csr_(at::Tensor & self);
+} // namespace native
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/conv2d_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/conv2d_compositeimplicitautograd_dispatch.h
new file mode 100644
index 0000000000000000000000000000000000000000..d3453ba515644d08ddb13760949b80131ba6d2ef
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/conv2d_compositeimplicitautograd_dispatch.h
@@ -0,0 +1,26 @@
+#pragma once
+// @generated by torchgen/gen.py from DispatchKeyFunction.h
+
+// NB: The implementing C++ file is RegisterDispatchKey.cpp
+
+// The only #includes we need are for custom classes that have defaults in the C++ API
+#include
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+
+namespace compositeimplicitautograd {
+
+TORCH_API at::Tensor conv2d(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1);
+TORCH_API at::Tensor conv2d_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1);
+TORCH_API at::Tensor conv2d(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1);
+TORCH_API at::Tensor conv2d_symint(const at::Tensor & input, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1);
+
+} // namespace compositeimplicitautograd
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/expand_as_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/expand_as_compositeimplicitautograd_dispatch.h
new file mode 100644
index 0000000000000000000000000000000000000000..2198ce01f90ba6fa51f1c4c601e619c2b465870c
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/expand_as_compositeimplicitautograd_dispatch.h
@@ -0,0 +1,23 @@
+#pragma once
+// @generated by torchgen/gen.py from DispatchKeyFunction.h
+
+// NB: The implementing C++ file is RegisterDispatchKey.cpp
+
+// The only #includes we need are for custom classes that have defaults in the C++ API
+#include
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+
+namespace compositeimplicitautograd {
+
+TORCH_API at::Tensor expand_as(const at::Tensor & self, const at::Tensor & other);
+
+} // namespace compositeimplicitautograd
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq.h
new file mode 100644
index 0000000000000000000000000000000000000000..bf9b64442bed66d352f4946f779453c6212a0572
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq.h
@@ -0,0 +1,43 @@
+#pragma once
+
+// @generated by torchgen/gen.py from Function.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+
+#include
+
+namespace at {
+
+
+// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+inline at::Tensor fft_fftfreq(int64_t n, double d=1.0, at::TensorOptions options={}) {
+ return at::_ops::fft_fftfreq::call(n, d, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
+}
+// aten::fft_fftfreq(int n, float d=1.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
+inline at::Tensor fft_fftfreq(int64_t n, double d, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) {
+ return at::_ops::fft_fftfreq::call(n, d, dtype, layout, device, pin_memory);
+}
+
+// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
+inline at::Tensor & fft_fftfreq_out(at::Tensor & out, int64_t n, double d=1.0) {
+ return at::_ops::fft_fftfreq_out::call(n, d, out);
+}
+// aten::fft_fftfreq.out(int n, float d=1.0, *, Tensor(a!) out) -> Tensor(a!)
+inline at::Tensor & fft_fftfreq_outf(int64_t n, double d, at::Tensor & out) {
+ return at::_ops::fft_fftfreq_out::call(n, d, out);
+}
+
+}
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/flip_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/flip_native.h
new file mode 100644
index 0000000000000000000000000000000000000000..f6116ea9ed74a5ba3aeb31d23ade36b91fd9230e
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/flip_native.h
@@ -0,0 +1,22 @@
+#pragma once
+
+// @generated by torchgen/gen.py from NativeFunction.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+namespace at {
+namespace native {
+TORCH_API at::Tensor & flip_out(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out);
+TORCH_API at::Tensor flip(const at::Tensor & self, at::IntArrayRef dims);
+} // namespace native
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/glu_backward_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/glu_backward_native.h
new file mode 100644
index 0000000000000000000000000000000000000000..bc02b7b5d204985332d22011e72f99444fc28d0e
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/glu_backward_native.h
@@ -0,0 +1,24 @@
+#pragma once
+
+// @generated by torchgen/gen.py from NativeFunction.h
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+
+namespace at {
+namespace native {
+TORCH_API at::Tensor glu_backward_cpu(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim);
+TORCH_API at::Tensor & glu_backward_cpu_out(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input);
+TORCH_API at::Tensor glu_backward_cuda(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim);
+TORCH_API at::Tensor & glu_backward_cuda_out(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, at::Tensor & grad_input);
+} // namespace native
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_ops.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_ops.h
new file mode 100644
index 0000000000000000000000000000000000000000..ac26a2555027ce42c1f16d68a38c2d410c436a43
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/lgamma_ops.h
@@ -0,0 +1,50 @@
+#pragma once
+
+// @generated by torchgen/gen.py from Operator.h
+
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+namespace _ops {
+
+
+struct TORCH_API lgamma_out {
+ using schema = at::Tensor & (const at::Tensor &, at::Tensor &);
+ using ptr_schema = schema*;
+ // See Note [static constexpr char* members for windows NVCC]
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lgamma")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lgamma.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)")
+ static at::Tensor & call(const at::Tensor & self, at::Tensor & out);
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out);
+};
+
+struct TORCH_API lgamma_ {
+ using schema = at::Tensor & (at::Tensor &);
+ using ptr_schema = schema*;
+ // See Note [static constexpr char* members for windows NVCC]
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lgamma_")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lgamma_(Tensor(a!) self) -> Tensor(a!)")
+ static at::Tensor & call(at::Tensor & self);
+ static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self);
+};
+
+struct TORCH_API lgamma {
+ using schema = at::Tensor (const at::Tensor &);
+ using ptr_schema = schema*;
+ // See Note [static constexpr char* members for windows NVCC]
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lgamma")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
+ STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lgamma(Tensor self) -> Tensor")
+ static at::Tensor call(const at::Tensor & self);
+ static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self);
+};
+
+}} // namespace at::_ops
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h
new file mode 100644
index 0000000000000000000000000000000000000000..3b8e3b83c7ed918c028c3caec1ad22ed8c087e8d
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_compositeexplicitautogradnonfunctional_dispatch.h
@@ -0,0 +1,23 @@
+#pragma once
+// @generated by torchgen/gen.py from DispatchKeyFunction.h
+
+// NB: The implementing C++ file is RegisterDispatchKey.cpp
+
+// The only #includes we need are for custom classes that have defaults in the C++ API
+#include
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+
+namespace compositeexplicitautogradnonfunctional {
+
+TORCH_API at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps=::std::nullopt);
+
+} // namespace compositeexplicitautogradnonfunctional
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h
new file mode 100644
index 0000000000000000000000000000000000000000..ea56b707a0ac0e9398f61c2b7ed8d815afa88f64
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/margin_ranking_loss_compositeimplicitautograd_dispatch.h
@@ -0,0 +1,23 @@
+#pragma once
+// @generated by torchgen/gen.py from DispatchKeyFunction.h
+
+// NB: The implementing C++ file is RegisterDispatchKey.cpp
+
+// The only #includes we need are for custom classes that have defaults in the C++ API
+#include
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+
+namespace compositeimplicitautograd {
+
+TORCH_API at::Tensor margin_ranking_loss(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin=0.0, int64_t reduction=at::Reduction::Mean);
+
+} // namespace compositeimplicitautograd
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h
new file mode 100644
index 0000000000000000000000000000000000000000..3ff969183a4b7be6dac728dbc965f73e89b12012
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_compositeexplicitautogradnonfunctional_dispatch.h
@@ -0,0 +1,23 @@
+#pragma once
+// @generated by torchgen/gen.py from DispatchKeyFunction.h
+
+// NB: The implementing C++ file is RegisterDispatchKey.cpp
+
+// The only #includes we need are for custom classes that have defaults in the C++ API
+#include
+#include
+#include
+
+// Forward declarations of any types needed in the operator signatures.
+// We can't directly include these classes because it will cause circular include dependencies.
+// This file is included by TensorBody.h, which defines the Tensor class.
+#include
+
+namespace at {
+
+namespace compositeexplicitautogradnonfunctional {
+
+TORCH_API at::Tensor max_pool2d_with_indices_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices);
+
+} // namespace compositeexplicitautogradnonfunctional
+} // namespace at
diff --git a/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_native.h b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_native.h
new file mode 100644
index 0000000000000000000000000000000000000000..85b946fa1d9efd43e4699315d041a8bcb04bd9a9
--- /dev/null
+++ b/infer_4_47_1/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_native.h
@@ -0,0 +1,24 @@
+#pragma once
+
+// @generated by torchgen/gen.py from NativeFunction.h
+
+#include
+#include
+#include