repo_id
stringlengths 32
150
| file_path
stringlengths 46
183
| content
stringlengths 1
290k
| __index_level_0__
int64 0
0
|
---|---|---|---|
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/_rust/asn1.pyi | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import typing
class TestCertificate:
not_after_tag: int
not_before_tag: int
issuer_value_tags: typing.List[int]
subject_value_tags: typing.List[int]
def decode_dss_signature(signature: bytes) -> typing.Tuple[int, int]: ...
def encode_dss_signature(r: int, s: int) -> bytes: ...
def parse_spki_for_data(data: bytes) -> bytes: ...
def test_parse_certificate(data: bytes) -> TestCertificate: ...
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/_rust/pkcs7.pyi | import typing
from cryptography import x509
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.serialization import pkcs7
def serialize_certificates(
certs: typing.List[x509.Certificate],
encoding: serialization.Encoding,
) -> bytes: ...
def sign_and_serialize(
builder: pkcs7.PKCS7SignatureBuilder,
encoding: serialization.Encoding,
options: typing.Iterable[pkcs7.PKCS7Options],
) -> bytes: ...
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/_rust | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/_rust/openssl/__init__.pyi | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import typing
from cryptography.hazmat.bindings._rust.openssl import x25519
__all__ = ["openssl_version", "raise_openssl_error", "x25519"]
def openssl_version() -> int: ...
def raise_openssl_error() -> typing.NoReturn: ...
def capture_error_stack() -> typing.List[OpenSSLError]: ...
class OpenSSLError:
@property
def lib(self) -> int: ...
@property
def reason(self) -> int: ...
@property
def reason_text(self) -> bytes: ...
def _lib_reason_match(self, lib: int, reason: int) -> bool: ...
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/_rust | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/_rust/openssl/x25519.pyi | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from cryptography.hazmat.primitives.asymmetric import x25519
class X25519PrivateKey: ...
class X25519PublicKey: ...
def generate_key() -> x25519.X25519PrivateKey: ...
def private_key_from_ptr(ptr: int) -> x25519.X25519PrivateKey: ...
def public_key_from_ptr(ptr: int) -> x25519.X25519PublicKey: ...
def from_private_bytes(data: bytes) -> x25519.X25519PrivateKey: ...
def from_public_bytes(data: bytes) -> x25519.X25519PublicKey: ...
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/openssl/__init__.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/openssl/_conditional.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import typing
def cryptography_has_set_cert_cb() -> typing.List[str]:
return [
"SSL_CTX_set_cert_cb",
"SSL_set_cert_cb",
]
def cryptography_has_ssl_st() -> typing.List[str]:
return [
"SSL_ST_BEFORE",
"SSL_ST_OK",
"SSL_ST_INIT",
"SSL_ST_RENEGOTIATE",
]
def cryptography_has_tls_st() -> typing.List[str]:
return [
"TLS_ST_BEFORE",
"TLS_ST_OK",
]
def cryptography_has_scrypt() -> typing.List[str]:
return [
"EVP_PBE_scrypt",
]
def cryptography_has_evp_pkey_dhx() -> typing.List[str]:
return [
"EVP_PKEY_DHX",
"d2i_DHxparams_bio",
"i2d_DHxparams_bio",
]
def cryptography_has_mem_functions() -> typing.List[str]:
return [
"Cryptography_CRYPTO_set_mem_functions",
]
def cryptography_has_x509_store_ctx_get_issuer() -> typing.List[str]:
return [
"X509_STORE_set_get_issuer",
]
def cryptography_has_ed448() -> typing.List[str]:
return [
"EVP_PKEY_ED448",
"NID_ED448",
]
def cryptography_has_ed25519() -> typing.List[str]:
return [
"NID_ED25519",
"EVP_PKEY_ED25519",
]
def cryptography_has_poly1305() -> typing.List[str]:
return [
"NID_poly1305",
"EVP_PKEY_POLY1305",
]
def cryptography_has_evp_digestfinal_xof() -> typing.List[str]:
return [
"EVP_DigestFinalXOF",
]
def cryptography_has_fips() -> typing.List[str]:
return [
"FIPS_mode_set",
"FIPS_mode",
]
def cryptography_has_ssl_sigalgs() -> typing.List[str]:
return [
"SSL_CTX_set1_sigalgs_list",
]
def cryptography_has_psk() -> typing.List[str]:
return [
"SSL_CTX_use_psk_identity_hint",
"SSL_CTX_set_psk_server_callback",
"SSL_CTX_set_psk_client_callback",
]
def cryptography_has_psk_tlsv13() -> typing.List[str]:
return [
"SSL_CTX_set_psk_find_session_callback",
"SSL_CTX_set_psk_use_session_callback",
"Cryptography_SSL_SESSION_new",
"SSL_CIPHER_find",
"SSL_SESSION_set1_master_key",
"SSL_SESSION_set_cipher",
"SSL_SESSION_set_protocol_version",
]
def cryptography_has_custom_ext() -> typing.List[str]:
return [
"SSL_CTX_add_client_custom_ext",
"SSL_CTX_add_server_custom_ext",
"SSL_extension_supported",
]
def cryptography_has_openssl_cleanup() -> typing.List[str]:
return [
"OPENSSL_cleanup",
]
def cryptography_has_tlsv13_functions() -> typing.List[str]:
return [
"SSL_VERIFY_POST_HANDSHAKE",
"SSL_CTX_set_ciphersuites",
"SSL_verify_client_post_handshake",
"SSL_CTX_set_post_handshake_auth",
"SSL_set_post_handshake_auth",
"SSL_SESSION_get_max_early_data",
"SSL_write_early_data",
"SSL_read_early_data",
"SSL_CTX_set_max_early_data",
]
def cryptography_has_raw_key() -> typing.List[str]:
return [
"EVP_PKEY_new_raw_private_key",
"EVP_PKEY_new_raw_public_key",
"EVP_PKEY_get_raw_private_key",
"EVP_PKEY_get_raw_public_key",
]
def cryptography_has_engine() -> typing.List[str]:
return [
"ENGINE_by_id",
"ENGINE_init",
"ENGINE_finish",
"ENGINE_get_default_RAND",
"ENGINE_set_default_RAND",
"ENGINE_unregister_RAND",
"ENGINE_ctrl_cmd",
"ENGINE_free",
"ENGINE_get_name",
"Cryptography_add_osrandom_engine",
"ENGINE_ctrl_cmd_string",
"ENGINE_load_builtin_engines",
"ENGINE_load_private_key",
"ENGINE_load_public_key",
"SSL_CTX_set_client_cert_engine",
]
def cryptography_has_verified_chain() -> typing.List[str]:
return [
"SSL_get0_verified_chain",
]
def cryptography_has_srtp() -> typing.List[str]:
return [
"SSL_CTX_set_tlsext_use_srtp",
"SSL_set_tlsext_use_srtp",
"SSL_get_selected_srtp_profile",
]
def cryptography_has_providers() -> typing.List[str]:
return [
"OSSL_PROVIDER_load",
"OSSL_PROVIDER_unload",
"ERR_LIB_PROV",
"PROV_R_WRONG_FINAL_BLOCK_LENGTH",
"PROV_R_BAD_DECRYPT",
]
def cryptography_has_op_no_renegotiation() -> typing.List[str]:
return [
"SSL_OP_NO_RENEGOTIATION",
]
def cryptography_has_dtls_get_data_mtu() -> typing.List[str]:
return [
"DTLS_get_data_mtu",
]
def cryptography_has_300_fips() -> typing.List[str]:
return [
"EVP_default_properties_is_fips_enabled",
"EVP_default_properties_enable_fips",
]
def cryptography_has_ssl_cookie() -> typing.List[str]:
return [
"SSL_OP_COOKIE_EXCHANGE",
"DTLSv1_listen",
"SSL_CTX_set_cookie_generate_cb",
"SSL_CTX_set_cookie_verify_cb",
]
def cryptography_has_pkcs7_funcs() -> typing.List[str]:
return [
"SMIME_write_PKCS7",
"PEM_write_bio_PKCS7_stream",
"PKCS7_sign_add_signer",
"PKCS7_final",
"PKCS7_verify",
"SMIME_read_PKCS7",
"PKCS7_get0_signers",
]
def cryptography_has_bn_flags() -> typing.List[str]:
return [
"BN_FLG_CONSTTIME",
"BN_set_flags",
"BN_prime_checks_for_size",
]
def cryptography_has_evp_pkey_dh() -> typing.List[str]:
return [
"EVP_PKEY_set1_DH",
]
def cryptography_has_300_evp_cipher() -> typing.List[str]:
return ["EVP_CIPHER_fetch", "EVP_CIPHER_free"]
def cryptography_has_unexpected_eof_while_reading() -> typing.List[str]:
return ["SSL_R_UNEXPECTED_EOF_WHILE_READING"]
def cryptography_has_pkcs12_set_mac() -> typing.List[str]:
return ["PKCS12_set_mac"]
def cryptography_has_ssl_op_ignore_unexpected_eof() -> typing.List[str]:
return [
"SSL_OP_IGNORE_UNEXPECTED_EOF",
]
def cryptography_has_get_extms_support() -> typing.List[str]:
return ["SSL_get_extms_support"]
def cryptography_has_evp_pkey_set_peer_ex() -> typing.List[str]:
return ["EVP_PKEY_derive_set_peer_ex"]
# This is a mapping of
# {condition: function-returning-names-dependent-on-that-condition} so we can
# loop over them and delete unsupported names at runtime. It will be removed
# when cffi supports #if in cdef. We use functions instead of just a dict of
# lists so we can use coverage to measure which are used.
CONDITIONAL_NAMES = {
"Cryptography_HAS_SET_CERT_CB": cryptography_has_set_cert_cb,
"Cryptography_HAS_SSL_ST": cryptography_has_ssl_st,
"Cryptography_HAS_TLS_ST": cryptography_has_tls_st,
"Cryptography_HAS_SCRYPT": cryptography_has_scrypt,
"Cryptography_HAS_EVP_PKEY_DHX": cryptography_has_evp_pkey_dhx,
"Cryptography_HAS_MEM_FUNCTIONS": cryptography_has_mem_functions,
"Cryptography_HAS_X509_STORE_CTX_GET_ISSUER": (
cryptography_has_x509_store_ctx_get_issuer
),
"Cryptography_HAS_ED448": cryptography_has_ed448,
"Cryptography_HAS_ED25519": cryptography_has_ed25519,
"Cryptography_HAS_POLY1305": cryptography_has_poly1305,
"Cryptography_HAS_FIPS": cryptography_has_fips,
"Cryptography_HAS_SIGALGS": cryptography_has_ssl_sigalgs,
"Cryptography_HAS_PSK": cryptography_has_psk,
"Cryptography_HAS_PSK_TLSv1_3": cryptography_has_psk_tlsv13,
"Cryptography_HAS_CUSTOM_EXT": cryptography_has_custom_ext,
"Cryptography_HAS_OPENSSL_CLEANUP": cryptography_has_openssl_cleanup,
"Cryptography_HAS_TLSv1_3_FUNCTIONS": cryptography_has_tlsv13_functions,
"Cryptography_HAS_RAW_KEY": cryptography_has_raw_key,
"Cryptography_HAS_EVP_DIGESTFINAL_XOF": (
cryptography_has_evp_digestfinal_xof
),
"Cryptography_HAS_ENGINE": cryptography_has_engine,
"Cryptography_HAS_VERIFIED_CHAIN": cryptography_has_verified_chain,
"Cryptography_HAS_SRTP": cryptography_has_srtp,
"Cryptography_HAS_PROVIDERS": cryptography_has_providers,
"Cryptography_HAS_OP_NO_RENEGOTIATION": (
cryptography_has_op_no_renegotiation
),
"Cryptography_HAS_DTLS_GET_DATA_MTU": cryptography_has_dtls_get_data_mtu,
"Cryptography_HAS_300_FIPS": cryptography_has_300_fips,
"Cryptography_HAS_SSL_COOKIE": cryptography_has_ssl_cookie,
"Cryptography_HAS_PKCS7_FUNCS": cryptography_has_pkcs7_funcs,
"Cryptography_HAS_BN_FLAGS": cryptography_has_bn_flags,
"Cryptography_HAS_EVP_PKEY_DH": cryptography_has_evp_pkey_dh,
"Cryptography_HAS_300_EVP_CIPHER": cryptography_has_300_evp_cipher,
"Cryptography_HAS_UNEXPECTED_EOF_WHILE_READING": (
cryptography_has_unexpected_eof_while_reading
),
"Cryptography_HAS_PKCS12_SET_MAC": cryptography_has_pkcs12_set_mac,
"Cryptography_HAS_SSL_OP_IGNORE_UNEXPECTED_EOF": (
cryptography_has_ssl_op_ignore_unexpected_eof
),
"Cryptography_HAS_GET_EXTMS_SUPPORT": cryptography_has_get_extms_support,
"Cryptography_HAS_EVP_PKEY_SET_PEER_EX": (
cryptography_has_evp_pkey_set_peer_ex
),
}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/hazmat/bindings/openssl/binding.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import os
import sys
import threading
import types
import typing
import warnings
import cryptography
from cryptography import utils
from cryptography.exceptions import InternalError
from cryptography.hazmat.bindings._rust import _openssl, openssl
from cryptography.hazmat.bindings.openssl._conditional import CONDITIONAL_NAMES
def _openssl_assert(
lib,
ok: bool,
errors: typing.Optional[typing.List[openssl.OpenSSLError]] = None,
) -> None:
if not ok:
if errors is None:
errors = openssl.capture_error_stack()
raise InternalError(
"Unknown OpenSSL error. This error is commonly encountered when "
"another library is not cleaning up the OpenSSL error stack. If "
"you are using cryptography with another library that uses "
"OpenSSL try disabling it before reporting a bug. Otherwise "
"please file an issue at https://github.com/pyca/cryptography/"
"issues with information on how to reproduce "
"this. ({!r})".format(errors),
errors,
)
def _legacy_provider_error(loaded: bool) -> None:
if not loaded:
raise RuntimeError(
"OpenSSL 3.0's legacy provider failed to load. This is a fatal "
"error by default, but cryptography supports running without "
"legacy algorithms by setting the environment variable "
"CRYPTOGRAPHY_OPENSSL_NO_LEGACY. If you did not expect this error,"
" you have likely made a mistake with your OpenSSL configuration."
)
def build_conditional_library(
lib: typing.Any,
conditional_names: typing.Dict[str, typing.Callable[[], typing.List[str]]],
) -> typing.Any:
conditional_lib = types.ModuleType("lib")
conditional_lib._original_lib = lib # type: ignore[attr-defined]
excluded_names = set()
for condition, names_cb in conditional_names.items():
if not getattr(lib, condition):
excluded_names.update(names_cb())
for attr in dir(lib):
if attr not in excluded_names:
setattr(conditional_lib, attr, getattr(lib, attr))
return conditional_lib
class Binding:
"""
OpenSSL API wrapper.
"""
lib: typing.ClassVar = None
ffi = _openssl.ffi
_lib_loaded = False
_init_lock = threading.Lock()
_legacy_provider: typing.Any = ffi.NULL
_legacy_provider_loaded = False
_default_provider: typing.Any = ffi.NULL
def __init__(self) -> None:
self._ensure_ffi_initialized()
def _enable_fips(self) -> None:
# This function enables FIPS mode for OpenSSL 3.0.0 on installs that
# have the FIPS provider installed properly.
_openssl_assert(self.lib, self.lib.CRYPTOGRAPHY_OPENSSL_300_OR_GREATER)
self._base_provider = self.lib.OSSL_PROVIDER_load(
self.ffi.NULL, b"base"
)
_openssl_assert(self.lib, self._base_provider != self.ffi.NULL)
self.lib._fips_provider = self.lib.OSSL_PROVIDER_load(
self.ffi.NULL, b"fips"
)
_openssl_assert(self.lib, self.lib._fips_provider != self.ffi.NULL)
res = self.lib.EVP_default_properties_enable_fips(self.ffi.NULL, 1)
_openssl_assert(self.lib, res == 1)
@classmethod
def _register_osrandom_engine(cls) -> None:
# Clear any errors extant in the queue before we start. In many
# scenarios other things may be interacting with OpenSSL in the same
# process space and it has proven untenable to assume that they will
# reliably clear the error queue. Once we clear it here we will
# error on any subsequent unexpected item in the stack.
cls.lib.ERR_clear_error()
if cls.lib.CRYPTOGRAPHY_NEEDS_OSRANDOM_ENGINE:
result = cls.lib.Cryptography_add_osrandom_engine()
_openssl_assert(cls.lib, result in (1, 2))
@classmethod
def _ensure_ffi_initialized(cls) -> None:
with cls._init_lock:
if not cls._lib_loaded:
cls.lib = build_conditional_library(
_openssl.lib, CONDITIONAL_NAMES
)
cls._lib_loaded = True
cls._register_osrandom_engine()
# As of OpenSSL 3.0.0 we must register a legacy cipher provider
# to get RC2 (needed for junk asymmetric private key
# serialization), RC4, Blowfish, IDEA, SEED, etc. These things
# are ugly legacy, but we aren't going to get rid of them
# any time soon.
if cls.lib.CRYPTOGRAPHY_OPENSSL_300_OR_GREATER:
if not os.environ.get("CRYPTOGRAPHY_OPENSSL_NO_LEGACY"):
cls._legacy_provider = cls.lib.OSSL_PROVIDER_load(
cls.ffi.NULL, b"legacy"
)
cls._legacy_provider_loaded = (
cls._legacy_provider != cls.ffi.NULL
)
_legacy_provider_error(cls._legacy_provider_loaded)
cls._default_provider = cls.lib.OSSL_PROVIDER_load(
cls.ffi.NULL, b"default"
)
_openssl_assert(
cls.lib, cls._default_provider != cls.ffi.NULL
)
@classmethod
def init_static_locks(cls) -> None:
cls._ensure_ffi_initialized()
def _verify_package_version(version: str) -> None:
# Occasionally we run into situations where the version of the Python
# package does not match the version of the shared object that is loaded.
# This may occur in environments where multiple versions of cryptography
# are installed and available in the python path. To avoid errors cropping
# up later this code checks that the currently imported package and the
# shared object that were loaded have the same version and raise an
# ImportError if they do not
so_package_version = _openssl.ffi.string(
_openssl.lib.CRYPTOGRAPHY_PACKAGE_VERSION
)
if version.encode("ascii") != so_package_version:
raise ImportError(
"The version of cryptography does not match the loaded "
"shared object. This can happen if you have multiple copies of "
"cryptography installed in your Python path. Please try creating "
"a new virtual environment to resolve this issue. "
"Loaded python version: {}, shared object version: {}".format(
version, so_package_version
)
)
_openssl_assert(
_openssl.lib,
_openssl.lib.OpenSSL_version_num() == openssl.openssl_version(),
)
_verify_package_version(cryptography.__version__)
Binding.init_static_locks()
if (
sys.platform == "win32"
and os.environ.get("PROCESSOR_ARCHITEW6432") is not None
):
warnings.warn(
"You are using cryptography on a 32-bit Python on a 64-bit Windows "
"Operating System. Cryptography will be significantly faster if you "
"switch to using a 64-bit Python.",
UserWarning,
stacklevel=2,
)
def _verify_openssl_version(lib):
if (
not lib.CRYPTOGRAPHY_OPENSSL_111D_OR_GREATER
and not lib.CRYPTOGRAPHY_IS_LIBRESSL
and not lib.CRYPTOGRAPHY_IS_BORINGSSL
):
warnings.warn(
"Support for OpenSSL less than version 1.1.1d is deprecated and "
"the next release of cryptography will drop support. Please "
"upgrade your OpenSSL to version 1.1.1d or newer.",
utils.DeprecatedIn40,
)
_verify_openssl_version(Binding.lib)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/x509/oid.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from cryptography.hazmat._oid import (
AttributeOID,
AuthorityInformationAccessOID,
CertificatePoliciesOID,
CRLEntryExtensionOID,
ExtendedKeyUsageOID,
ExtensionOID,
NameOID,
ObjectIdentifier,
OCSPExtensionOID,
SignatureAlgorithmOID,
SubjectInformationAccessOID,
)
__all__ = [
"AttributeOID",
"AuthorityInformationAccessOID",
"CRLEntryExtensionOID",
"CertificatePoliciesOID",
"ExtendedKeyUsageOID",
"ExtensionOID",
"NameOID",
"OCSPExtensionOID",
"ObjectIdentifier",
"SignatureAlgorithmOID",
"SubjectInformationAccessOID",
]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/x509/ocsp.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import abc
import datetime
import typing
from cryptography import utils, x509
from cryptography.hazmat.bindings._rust import ocsp
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric.types import (
CertificateIssuerPrivateKeyTypes,
)
from cryptography.x509.base import (
_EARLIEST_UTC_TIME,
_convert_to_naive_utc_time,
_reject_duplicate_extension,
)
class OCSPResponderEncoding(utils.Enum):
HASH = "By Hash"
NAME = "By Name"
class OCSPResponseStatus(utils.Enum):
SUCCESSFUL = 0
MALFORMED_REQUEST = 1
INTERNAL_ERROR = 2
TRY_LATER = 3
SIG_REQUIRED = 5
UNAUTHORIZED = 6
_ALLOWED_HASHES = (
hashes.SHA1,
hashes.SHA224,
hashes.SHA256,
hashes.SHA384,
hashes.SHA512,
)
def _verify_algorithm(algorithm: hashes.HashAlgorithm) -> None:
if not isinstance(algorithm, _ALLOWED_HASHES):
raise ValueError(
"Algorithm must be SHA1, SHA224, SHA256, SHA384, or SHA512"
)
class OCSPCertStatus(utils.Enum):
GOOD = 0
REVOKED = 1
UNKNOWN = 2
class _SingleResponse:
def __init__(
self,
cert: x509.Certificate,
issuer: x509.Certificate,
algorithm: hashes.HashAlgorithm,
cert_status: OCSPCertStatus,
this_update: datetime.datetime,
next_update: typing.Optional[datetime.datetime],
revocation_time: typing.Optional[datetime.datetime],
revocation_reason: typing.Optional[x509.ReasonFlags],
):
if not isinstance(cert, x509.Certificate) or not isinstance(
issuer, x509.Certificate
):
raise TypeError("cert and issuer must be a Certificate")
_verify_algorithm(algorithm)
if not isinstance(this_update, datetime.datetime):
raise TypeError("this_update must be a datetime object")
if next_update is not None and not isinstance(
next_update, datetime.datetime
):
raise TypeError("next_update must be a datetime object or None")
self._cert = cert
self._issuer = issuer
self._algorithm = algorithm
self._this_update = this_update
self._next_update = next_update
if not isinstance(cert_status, OCSPCertStatus):
raise TypeError(
"cert_status must be an item from the OCSPCertStatus enum"
)
if cert_status is not OCSPCertStatus.REVOKED:
if revocation_time is not None:
raise ValueError(
"revocation_time can only be provided if the certificate "
"is revoked"
)
if revocation_reason is not None:
raise ValueError(
"revocation_reason can only be provided if the certificate"
" is revoked"
)
else:
if not isinstance(revocation_time, datetime.datetime):
raise TypeError("revocation_time must be a datetime object")
revocation_time = _convert_to_naive_utc_time(revocation_time)
if revocation_time < _EARLIEST_UTC_TIME:
raise ValueError(
"The revocation_time must be on or after"
" 1950 January 1."
)
if revocation_reason is not None and not isinstance(
revocation_reason, x509.ReasonFlags
):
raise TypeError(
"revocation_reason must be an item from the ReasonFlags "
"enum or None"
)
self._cert_status = cert_status
self._revocation_time = revocation_time
self._revocation_reason = revocation_reason
class OCSPRequest(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def issuer_key_hash(self) -> bytes:
"""
The hash of the issuer public key
"""
@property
@abc.abstractmethod
def issuer_name_hash(self) -> bytes:
"""
The hash of the issuer name
"""
@property
@abc.abstractmethod
def hash_algorithm(self) -> hashes.HashAlgorithm:
"""
The hash algorithm used in the issuer name and key hashes
"""
@property
@abc.abstractmethod
def serial_number(self) -> int:
"""
The serial number of the cert whose status is being checked
"""
@abc.abstractmethod
def public_bytes(self, encoding: serialization.Encoding) -> bytes:
"""
Serializes the request to DER
"""
@property
@abc.abstractmethod
def extensions(self) -> x509.Extensions:
"""
The list of request extensions. Not single request extensions.
"""
class OCSPSingleResponse(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def certificate_status(self) -> OCSPCertStatus:
"""
The status of the certificate (an element from the OCSPCertStatus enum)
"""
@property
@abc.abstractmethod
def revocation_time(self) -> typing.Optional[datetime.datetime]:
"""
The date of when the certificate was revoked or None if not
revoked.
"""
@property
@abc.abstractmethod
def revocation_reason(self) -> typing.Optional[x509.ReasonFlags]:
"""
The reason the certificate was revoked or None if not specified or
not revoked.
"""
@property
@abc.abstractmethod
def this_update(self) -> datetime.datetime:
"""
The most recent time at which the status being indicated is known by
the responder to have been correct
"""
@property
@abc.abstractmethod
def next_update(self) -> typing.Optional[datetime.datetime]:
"""
The time when newer information will be available
"""
@property
@abc.abstractmethod
def issuer_key_hash(self) -> bytes:
"""
The hash of the issuer public key
"""
@property
@abc.abstractmethod
def issuer_name_hash(self) -> bytes:
"""
The hash of the issuer name
"""
@property
@abc.abstractmethod
def hash_algorithm(self) -> hashes.HashAlgorithm:
"""
The hash algorithm used in the issuer name and key hashes
"""
@property
@abc.abstractmethod
def serial_number(self) -> int:
"""
The serial number of the cert whose status is being checked
"""
class OCSPResponse(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def responses(self) -> typing.Iterator[OCSPSingleResponse]:
"""
An iterator over the individual SINGLERESP structures in the
response
"""
@property
@abc.abstractmethod
def response_status(self) -> OCSPResponseStatus:
"""
The status of the response. This is a value from the OCSPResponseStatus
enumeration
"""
@property
@abc.abstractmethod
def signature_algorithm_oid(self) -> x509.ObjectIdentifier:
"""
The ObjectIdentifier of the signature algorithm
"""
@property
@abc.abstractmethod
def signature_hash_algorithm(
self,
) -> typing.Optional[hashes.HashAlgorithm]:
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
"""
@property
@abc.abstractmethod
def signature(self) -> bytes:
"""
The signature bytes
"""
@property
@abc.abstractmethod
def tbs_response_bytes(self) -> bytes:
"""
The tbsResponseData bytes
"""
@property
@abc.abstractmethod
def certificates(self) -> typing.List[x509.Certificate]:
"""
A list of certificates used to help build a chain to verify the OCSP
response. This situation occurs when the OCSP responder uses a delegate
certificate.
"""
@property
@abc.abstractmethod
def responder_key_hash(self) -> typing.Optional[bytes]:
"""
The responder's key hash or None
"""
@property
@abc.abstractmethod
def responder_name(self) -> typing.Optional[x509.Name]:
"""
The responder's Name or None
"""
@property
@abc.abstractmethod
def produced_at(self) -> datetime.datetime:
"""
The time the response was produced
"""
@property
@abc.abstractmethod
def certificate_status(self) -> OCSPCertStatus:
"""
The status of the certificate (an element from the OCSPCertStatus enum)
"""
@property
@abc.abstractmethod
def revocation_time(self) -> typing.Optional[datetime.datetime]:
"""
The date of when the certificate was revoked or None if not
revoked.
"""
@property
@abc.abstractmethod
def revocation_reason(self) -> typing.Optional[x509.ReasonFlags]:
"""
The reason the certificate was revoked or None if not specified or
not revoked.
"""
@property
@abc.abstractmethod
def this_update(self) -> datetime.datetime:
"""
The most recent time at which the status being indicated is known by
the responder to have been correct
"""
@property
@abc.abstractmethod
def next_update(self) -> typing.Optional[datetime.datetime]:
"""
The time when newer information will be available
"""
@property
@abc.abstractmethod
def issuer_key_hash(self) -> bytes:
"""
The hash of the issuer public key
"""
@property
@abc.abstractmethod
def issuer_name_hash(self) -> bytes:
"""
The hash of the issuer name
"""
@property
@abc.abstractmethod
def hash_algorithm(self) -> hashes.HashAlgorithm:
"""
The hash algorithm used in the issuer name and key hashes
"""
@property
@abc.abstractmethod
def serial_number(self) -> int:
"""
The serial number of the cert whose status is being checked
"""
@property
@abc.abstractmethod
def extensions(self) -> x509.Extensions:
"""
The list of response extensions. Not single response extensions.
"""
@property
@abc.abstractmethod
def single_extensions(self) -> x509.Extensions:
"""
The list of single response extensions. Not response extensions.
"""
@abc.abstractmethod
def public_bytes(self, encoding: serialization.Encoding) -> bytes:
"""
Serializes the response to DER
"""
class OCSPRequestBuilder:
def __init__(
self,
request: typing.Optional[
typing.Tuple[
x509.Certificate, x509.Certificate, hashes.HashAlgorithm
]
] = None,
request_hash: typing.Optional[
typing.Tuple[bytes, bytes, int, hashes.HashAlgorithm]
] = None,
extensions: typing.List[x509.Extension[x509.ExtensionType]] = [],
) -> None:
self._request = request
self._request_hash = request_hash
self._extensions = extensions
def add_certificate(
self,
cert: x509.Certificate,
issuer: x509.Certificate,
algorithm: hashes.HashAlgorithm,
) -> "OCSPRequestBuilder":
if self._request is not None or self._request_hash is not None:
raise ValueError("Only one certificate can be added to a request")
_verify_algorithm(algorithm)
if not isinstance(cert, x509.Certificate) or not isinstance(
issuer, x509.Certificate
):
raise TypeError("cert and issuer must be a Certificate")
return OCSPRequestBuilder(
(cert, issuer, algorithm), self._request_hash, self._extensions
)
def add_certificate_by_hash(
self,
issuer_name_hash: bytes,
issuer_key_hash: bytes,
serial_number: int,
algorithm: hashes.HashAlgorithm,
) -> "OCSPRequestBuilder":
if self._request is not None or self._request_hash is not None:
raise ValueError("Only one certificate can be added to a request")
if not isinstance(serial_number, int):
raise TypeError("serial_number must be an integer")
_verify_algorithm(algorithm)
utils._check_bytes("issuer_name_hash", issuer_name_hash)
utils._check_bytes("issuer_key_hash", issuer_key_hash)
if algorithm.digest_size != len(
issuer_name_hash
) or algorithm.digest_size != len(issuer_key_hash):
raise ValueError(
"issuer_name_hash and issuer_key_hash must be the same length "
"as the digest size of the algorithm"
)
return OCSPRequestBuilder(
self._request,
(issuer_name_hash, issuer_key_hash, serial_number, algorithm),
self._extensions,
)
def add_extension(
self, extval: x509.ExtensionType, critical: bool
) -> "OCSPRequestBuilder":
if not isinstance(extval, x509.ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = x509.Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return OCSPRequestBuilder(
self._request, self._request_hash, self._extensions + [extension]
)
def build(self) -> OCSPRequest:
if self._request is None and self._request_hash is None:
raise ValueError("You must add a certificate before building")
return ocsp.create_ocsp_request(self)
class OCSPResponseBuilder:
def __init__(
self,
response: typing.Optional[_SingleResponse] = None,
responder_id: typing.Optional[
typing.Tuple[x509.Certificate, OCSPResponderEncoding]
] = None,
certs: typing.Optional[typing.List[x509.Certificate]] = None,
extensions: typing.List[x509.Extension[x509.ExtensionType]] = [],
):
self._response = response
self._responder_id = responder_id
self._certs = certs
self._extensions = extensions
def add_response(
self,
cert: x509.Certificate,
issuer: x509.Certificate,
algorithm: hashes.HashAlgorithm,
cert_status: OCSPCertStatus,
this_update: datetime.datetime,
next_update: typing.Optional[datetime.datetime],
revocation_time: typing.Optional[datetime.datetime],
revocation_reason: typing.Optional[x509.ReasonFlags],
) -> "OCSPResponseBuilder":
if self._response is not None:
raise ValueError("Only one response per OCSPResponse.")
singleresp = _SingleResponse(
cert,
issuer,
algorithm,
cert_status,
this_update,
next_update,
revocation_time,
revocation_reason,
)
return OCSPResponseBuilder(
singleresp,
self._responder_id,
self._certs,
self._extensions,
)
def responder_id(
self, encoding: OCSPResponderEncoding, responder_cert: x509.Certificate
) -> "OCSPResponseBuilder":
if self._responder_id is not None:
raise ValueError("responder_id can only be set once")
if not isinstance(responder_cert, x509.Certificate):
raise TypeError("responder_cert must be a Certificate")
if not isinstance(encoding, OCSPResponderEncoding):
raise TypeError(
"encoding must be an element from OCSPResponderEncoding"
)
return OCSPResponseBuilder(
self._response,
(responder_cert, encoding),
self._certs,
self._extensions,
)
def certificates(
self, certs: typing.Iterable[x509.Certificate]
) -> "OCSPResponseBuilder":
if self._certs is not None:
raise ValueError("certificates may only be set once")
certs = list(certs)
if len(certs) == 0:
raise ValueError("certs must not be an empty list")
if not all(isinstance(x, x509.Certificate) for x in certs):
raise TypeError("certs must be a list of Certificates")
return OCSPResponseBuilder(
self._response,
self._responder_id,
certs,
self._extensions,
)
def add_extension(
self, extval: x509.ExtensionType, critical: bool
) -> "OCSPResponseBuilder":
if not isinstance(extval, x509.ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = x509.Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return OCSPResponseBuilder(
self._response,
self._responder_id,
self._certs,
self._extensions + [extension],
)
def sign(
self,
private_key: CertificateIssuerPrivateKeyTypes,
algorithm: typing.Optional[hashes.HashAlgorithm],
) -> OCSPResponse:
if self._response is None:
raise ValueError("You must add a response before signing")
if self._responder_id is None:
raise ValueError("You must add a responder_id before signing")
return ocsp.create_ocsp_response(
OCSPResponseStatus.SUCCESSFUL, self, private_key, algorithm
)
@classmethod
def build_unsuccessful(
cls, response_status: OCSPResponseStatus
) -> OCSPResponse:
if not isinstance(response_status, OCSPResponseStatus):
raise TypeError(
"response_status must be an item from OCSPResponseStatus"
)
if response_status is OCSPResponseStatus.SUCCESSFUL:
raise ValueError("response_status cannot be SUCCESSFUL")
return ocsp.create_ocsp_response(response_status, None, None, None)
def load_der_ocsp_request(data: bytes) -> OCSPRequest:
return ocsp.load_der_ocsp_request(data)
def load_der_ocsp_response(data: bytes) -> OCSPResponse:
return ocsp.load_der_ocsp_response(data)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/x509/general_name.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import abc
import ipaddress
import typing
from email.utils import parseaddr
from cryptography.x509.name import Name
from cryptography.x509.oid import ObjectIdentifier
_IPAddressTypes = typing.Union[
ipaddress.IPv4Address,
ipaddress.IPv6Address,
ipaddress.IPv4Network,
ipaddress.IPv6Network,
]
class UnsupportedGeneralNameType(Exception):
pass
class GeneralName(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def value(self) -> typing.Any:
"""
Return the value of the object
"""
class RFC822Name(GeneralName):
def __init__(self, value: str) -> None:
if isinstance(value, str):
try:
value.encode("ascii")
except UnicodeEncodeError:
raise ValueError(
"RFC822Name values should be passed as an A-label string. "
"This means unicode characters should be encoded via "
"a library like idna."
)
else:
raise TypeError("value must be string")
name, address = parseaddr(value)
if name or not address:
# parseaddr has found a name (e.g. Name <email>) or the entire
# value is an empty string.
raise ValueError("Invalid rfc822name value")
self._value = value
@property
def value(self) -> str:
return self._value
@classmethod
def _init_without_validation(cls, value: str) -> "RFC822Name":
instance = cls.__new__(cls)
instance._value = value
return instance
def __repr__(self) -> str:
return f"<RFC822Name(value={self.value!r})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, RFC822Name):
return NotImplemented
return self.value == other.value
def __hash__(self) -> int:
return hash(self.value)
class DNSName(GeneralName):
def __init__(self, value: str) -> None:
if isinstance(value, str):
try:
value.encode("ascii")
except UnicodeEncodeError:
raise ValueError(
"DNSName values should be passed as an A-label string. "
"This means unicode characters should be encoded via "
"a library like idna."
)
else:
raise TypeError("value must be string")
self._value = value
@property
def value(self) -> str:
return self._value
@classmethod
def _init_without_validation(cls, value: str) -> "DNSName":
instance = cls.__new__(cls)
instance._value = value
return instance
def __repr__(self) -> str:
return f"<DNSName(value={self.value!r})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, DNSName):
return NotImplemented
return self.value == other.value
def __hash__(self) -> int:
return hash(self.value)
class UniformResourceIdentifier(GeneralName):
def __init__(self, value: str) -> None:
if isinstance(value, str):
try:
value.encode("ascii")
except UnicodeEncodeError:
raise ValueError(
"URI values should be passed as an A-label string. "
"This means unicode characters should be encoded via "
"a library like idna."
)
else:
raise TypeError("value must be string")
self._value = value
@property
def value(self) -> str:
return self._value
@classmethod
def _init_without_validation(
cls, value: str
) -> "UniformResourceIdentifier":
instance = cls.__new__(cls)
instance._value = value
return instance
def __repr__(self) -> str:
return f"<UniformResourceIdentifier(value={self.value!r})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, UniformResourceIdentifier):
return NotImplemented
return self.value == other.value
def __hash__(self) -> int:
return hash(self.value)
class DirectoryName(GeneralName):
def __init__(self, value: Name) -> None:
if not isinstance(value, Name):
raise TypeError("value must be a Name")
self._value = value
@property
def value(self) -> Name:
return self._value
def __repr__(self) -> str:
return f"<DirectoryName(value={self.value})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, DirectoryName):
return NotImplemented
return self.value == other.value
def __hash__(self) -> int:
return hash(self.value)
class RegisteredID(GeneralName):
def __init__(self, value: ObjectIdentifier) -> None:
if not isinstance(value, ObjectIdentifier):
raise TypeError("value must be an ObjectIdentifier")
self._value = value
@property
def value(self) -> ObjectIdentifier:
return self._value
def __repr__(self) -> str:
return f"<RegisteredID(value={self.value})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, RegisteredID):
return NotImplemented
return self.value == other.value
def __hash__(self) -> int:
return hash(self.value)
class IPAddress(GeneralName):
def __init__(self, value: _IPAddressTypes) -> None:
if not isinstance(
value,
(
ipaddress.IPv4Address,
ipaddress.IPv6Address,
ipaddress.IPv4Network,
ipaddress.IPv6Network,
),
):
raise TypeError(
"value must be an instance of ipaddress.IPv4Address, "
"ipaddress.IPv6Address, ipaddress.IPv4Network, or "
"ipaddress.IPv6Network"
)
self._value = value
@property
def value(self) -> _IPAddressTypes:
return self._value
def _packed(self) -> bytes:
if isinstance(
self.value, (ipaddress.IPv4Address, ipaddress.IPv6Address)
):
return self.value.packed
else:
return (
self.value.network_address.packed + self.value.netmask.packed
)
def __repr__(self) -> str:
return f"<IPAddress(value={self.value})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, IPAddress):
return NotImplemented
return self.value == other.value
def __hash__(self) -> int:
return hash(self.value)
class OtherName(GeneralName):
def __init__(self, type_id: ObjectIdentifier, value: bytes) -> None:
if not isinstance(type_id, ObjectIdentifier):
raise TypeError("type_id must be an ObjectIdentifier")
if not isinstance(value, bytes):
raise TypeError("value must be a binary string")
self._type_id = type_id
self._value = value
@property
def type_id(self) -> ObjectIdentifier:
return self._type_id
@property
def value(self) -> bytes:
return self._value
def __repr__(self) -> str:
return "<OtherName(type_id={}, value={!r})>".format(
self.type_id, self.value
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, OtherName):
return NotImplemented
return self.type_id == other.type_id and self.value == other.value
def __hash__(self) -> int:
return hash((self.type_id, self.value))
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/x509/__init__.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
from cryptography.x509 import certificate_transparency
from cryptography.x509.base import (
Attribute,
AttributeNotFound,
Attributes,
Certificate,
CertificateBuilder,
CertificateRevocationList,
CertificateRevocationListBuilder,
CertificateSigningRequest,
CertificateSigningRequestBuilder,
InvalidVersion,
RevokedCertificate,
RevokedCertificateBuilder,
Version,
load_der_x509_certificate,
load_der_x509_crl,
load_der_x509_csr,
load_pem_x509_certificate,
load_pem_x509_certificates,
load_pem_x509_crl,
load_pem_x509_csr,
random_serial_number,
)
from cryptography.x509.extensions import (
AccessDescription,
AuthorityInformationAccess,
AuthorityKeyIdentifier,
BasicConstraints,
CertificateIssuer,
CertificatePolicies,
CRLDistributionPoints,
CRLNumber,
CRLReason,
DeltaCRLIndicator,
DistributionPoint,
DuplicateExtension,
ExtendedKeyUsage,
Extension,
ExtensionNotFound,
Extensions,
ExtensionType,
FreshestCRL,
GeneralNames,
InhibitAnyPolicy,
InvalidityDate,
IssuerAlternativeName,
IssuingDistributionPoint,
KeyUsage,
NameConstraints,
NoticeReference,
OCSPNoCheck,
OCSPNonce,
PolicyConstraints,
PolicyInformation,
PrecertificateSignedCertificateTimestamps,
PrecertPoison,
ReasonFlags,
SignedCertificateTimestamps,
SubjectAlternativeName,
SubjectInformationAccess,
SubjectKeyIdentifier,
TLSFeature,
TLSFeatureType,
UnrecognizedExtension,
UserNotice,
)
from cryptography.x509.general_name import (
DirectoryName,
DNSName,
GeneralName,
IPAddress,
OtherName,
RegisteredID,
RFC822Name,
UniformResourceIdentifier,
UnsupportedGeneralNameType,
)
from cryptography.x509.name import (
Name,
NameAttribute,
RelativeDistinguishedName,
)
from cryptography.x509.oid import (
AuthorityInformationAccessOID,
CertificatePoliciesOID,
CRLEntryExtensionOID,
ExtendedKeyUsageOID,
ExtensionOID,
NameOID,
ObjectIdentifier,
SignatureAlgorithmOID,
)
OID_AUTHORITY_INFORMATION_ACCESS = ExtensionOID.AUTHORITY_INFORMATION_ACCESS
OID_AUTHORITY_KEY_IDENTIFIER = ExtensionOID.AUTHORITY_KEY_IDENTIFIER
OID_BASIC_CONSTRAINTS = ExtensionOID.BASIC_CONSTRAINTS
OID_CERTIFICATE_POLICIES = ExtensionOID.CERTIFICATE_POLICIES
OID_CRL_DISTRIBUTION_POINTS = ExtensionOID.CRL_DISTRIBUTION_POINTS
OID_EXTENDED_KEY_USAGE = ExtensionOID.EXTENDED_KEY_USAGE
OID_FRESHEST_CRL = ExtensionOID.FRESHEST_CRL
OID_INHIBIT_ANY_POLICY = ExtensionOID.INHIBIT_ANY_POLICY
OID_ISSUER_ALTERNATIVE_NAME = ExtensionOID.ISSUER_ALTERNATIVE_NAME
OID_KEY_USAGE = ExtensionOID.KEY_USAGE
OID_NAME_CONSTRAINTS = ExtensionOID.NAME_CONSTRAINTS
OID_OCSP_NO_CHECK = ExtensionOID.OCSP_NO_CHECK
OID_POLICY_CONSTRAINTS = ExtensionOID.POLICY_CONSTRAINTS
OID_POLICY_MAPPINGS = ExtensionOID.POLICY_MAPPINGS
OID_SUBJECT_ALTERNATIVE_NAME = ExtensionOID.SUBJECT_ALTERNATIVE_NAME
OID_SUBJECT_DIRECTORY_ATTRIBUTES = ExtensionOID.SUBJECT_DIRECTORY_ATTRIBUTES
OID_SUBJECT_INFORMATION_ACCESS = ExtensionOID.SUBJECT_INFORMATION_ACCESS
OID_SUBJECT_KEY_IDENTIFIER = ExtensionOID.SUBJECT_KEY_IDENTIFIER
OID_DSA_WITH_SHA1 = SignatureAlgorithmOID.DSA_WITH_SHA1
OID_DSA_WITH_SHA224 = SignatureAlgorithmOID.DSA_WITH_SHA224
OID_DSA_WITH_SHA256 = SignatureAlgorithmOID.DSA_WITH_SHA256
OID_ECDSA_WITH_SHA1 = SignatureAlgorithmOID.ECDSA_WITH_SHA1
OID_ECDSA_WITH_SHA224 = SignatureAlgorithmOID.ECDSA_WITH_SHA224
OID_ECDSA_WITH_SHA256 = SignatureAlgorithmOID.ECDSA_WITH_SHA256
OID_ECDSA_WITH_SHA384 = SignatureAlgorithmOID.ECDSA_WITH_SHA384
OID_ECDSA_WITH_SHA512 = SignatureAlgorithmOID.ECDSA_WITH_SHA512
OID_RSA_WITH_MD5 = SignatureAlgorithmOID.RSA_WITH_MD5
OID_RSA_WITH_SHA1 = SignatureAlgorithmOID.RSA_WITH_SHA1
OID_RSA_WITH_SHA224 = SignatureAlgorithmOID.RSA_WITH_SHA224
OID_RSA_WITH_SHA256 = SignatureAlgorithmOID.RSA_WITH_SHA256
OID_RSA_WITH_SHA384 = SignatureAlgorithmOID.RSA_WITH_SHA384
OID_RSA_WITH_SHA512 = SignatureAlgorithmOID.RSA_WITH_SHA512
OID_RSASSA_PSS = SignatureAlgorithmOID.RSASSA_PSS
OID_COMMON_NAME = NameOID.COMMON_NAME
OID_COUNTRY_NAME = NameOID.COUNTRY_NAME
OID_DOMAIN_COMPONENT = NameOID.DOMAIN_COMPONENT
OID_DN_QUALIFIER = NameOID.DN_QUALIFIER
OID_EMAIL_ADDRESS = NameOID.EMAIL_ADDRESS
OID_GENERATION_QUALIFIER = NameOID.GENERATION_QUALIFIER
OID_GIVEN_NAME = NameOID.GIVEN_NAME
OID_LOCALITY_NAME = NameOID.LOCALITY_NAME
OID_ORGANIZATIONAL_UNIT_NAME = NameOID.ORGANIZATIONAL_UNIT_NAME
OID_ORGANIZATION_NAME = NameOID.ORGANIZATION_NAME
OID_PSEUDONYM = NameOID.PSEUDONYM
OID_SERIAL_NUMBER = NameOID.SERIAL_NUMBER
OID_STATE_OR_PROVINCE_NAME = NameOID.STATE_OR_PROVINCE_NAME
OID_SURNAME = NameOID.SURNAME
OID_TITLE = NameOID.TITLE
OID_CLIENT_AUTH = ExtendedKeyUsageOID.CLIENT_AUTH
OID_CODE_SIGNING = ExtendedKeyUsageOID.CODE_SIGNING
OID_EMAIL_PROTECTION = ExtendedKeyUsageOID.EMAIL_PROTECTION
OID_OCSP_SIGNING = ExtendedKeyUsageOID.OCSP_SIGNING
OID_SERVER_AUTH = ExtendedKeyUsageOID.SERVER_AUTH
OID_TIME_STAMPING = ExtendedKeyUsageOID.TIME_STAMPING
OID_ANY_POLICY = CertificatePoliciesOID.ANY_POLICY
OID_CPS_QUALIFIER = CertificatePoliciesOID.CPS_QUALIFIER
OID_CPS_USER_NOTICE = CertificatePoliciesOID.CPS_USER_NOTICE
OID_CERTIFICATE_ISSUER = CRLEntryExtensionOID.CERTIFICATE_ISSUER
OID_CRL_REASON = CRLEntryExtensionOID.CRL_REASON
OID_INVALIDITY_DATE = CRLEntryExtensionOID.INVALIDITY_DATE
OID_CA_ISSUERS = AuthorityInformationAccessOID.CA_ISSUERS
OID_OCSP = AuthorityInformationAccessOID.OCSP
__all__ = [
"certificate_transparency",
"load_pem_x509_certificate",
"load_pem_x509_certificates",
"load_der_x509_certificate",
"load_pem_x509_csr",
"load_der_x509_csr",
"load_pem_x509_crl",
"load_der_x509_crl",
"random_serial_number",
"Attribute",
"AttributeNotFound",
"Attributes",
"InvalidVersion",
"DeltaCRLIndicator",
"DuplicateExtension",
"ExtensionNotFound",
"UnsupportedGeneralNameType",
"NameAttribute",
"Name",
"RelativeDistinguishedName",
"ObjectIdentifier",
"ExtensionType",
"Extensions",
"Extension",
"ExtendedKeyUsage",
"FreshestCRL",
"IssuingDistributionPoint",
"TLSFeature",
"TLSFeatureType",
"OCSPNoCheck",
"BasicConstraints",
"CRLNumber",
"KeyUsage",
"AuthorityInformationAccess",
"SubjectInformationAccess",
"AccessDescription",
"CertificatePolicies",
"PolicyInformation",
"UserNotice",
"NoticeReference",
"SubjectKeyIdentifier",
"NameConstraints",
"CRLDistributionPoints",
"DistributionPoint",
"ReasonFlags",
"InhibitAnyPolicy",
"SubjectAlternativeName",
"IssuerAlternativeName",
"AuthorityKeyIdentifier",
"GeneralNames",
"GeneralName",
"RFC822Name",
"DNSName",
"UniformResourceIdentifier",
"RegisteredID",
"DirectoryName",
"IPAddress",
"OtherName",
"Certificate",
"CertificateRevocationList",
"CertificateRevocationListBuilder",
"CertificateSigningRequest",
"RevokedCertificate",
"RevokedCertificateBuilder",
"CertificateSigningRequestBuilder",
"CertificateBuilder",
"Version",
"OID_CA_ISSUERS",
"OID_OCSP",
"CertificateIssuer",
"CRLReason",
"InvalidityDate",
"UnrecognizedExtension",
"PolicyConstraints",
"PrecertificateSignedCertificateTimestamps",
"PrecertPoison",
"OCSPNonce",
"SignedCertificateTimestamps",
"SignatureAlgorithmOID",
"NameOID",
]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/x509/extensions.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import abc
import datetime
import hashlib
import ipaddress
import typing
from cryptography import utils
from cryptography.hazmat.bindings._rust import asn1
from cryptography.hazmat.bindings._rust import x509 as rust_x509
from cryptography.hazmat.primitives import constant_time, serialization
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurvePublicKey
from cryptography.hazmat.primitives.asymmetric.rsa import RSAPublicKey
from cryptography.hazmat.primitives.asymmetric.types import (
CertificateIssuerPublicKeyTypes,
CertificatePublicKeyTypes,
)
from cryptography.x509.certificate_transparency import (
SignedCertificateTimestamp,
)
from cryptography.x509.general_name import (
DirectoryName,
DNSName,
GeneralName,
IPAddress,
OtherName,
RegisteredID,
RFC822Name,
UniformResourceIdentifier,
_IPAddressTypes,
)
from cryptography.x509.name import Name, RelativeDistinguishedName
from cryptography.x509.oid import (
CRLEntryExtensionOID,
ExtensionOID,
ObjectIdentifier,
OCSPExtensionOID,
)
ExtensionTypeVar = typing.TypeVar(
"ExtensionTypeVar", bound="ExtensionType", covariant=True
)
def _key_identifier_from_public_key(
public_key: CertificatePublicKeyTypes,
) -> bytes:
if isinstance(public_key, RSAPublicKey):
data = public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.PKCS1,
)
elif isinstance(public_key, EllipticCurvePublicKey):
data = public_key.public_bytes(
serialization.Encoding.X962,
serialization.PublicFormat.UncompressedPoint,
)
else:
# This is a very slow way to do this.
serialized = public_key.public_bytes(
serialization.Encoding.DER,
serialization.PublicFormat.SubjectPublicKeyInfo,
)
data = asn1.parse_spki_for_data(serialized)
return hashlib.sha1(data).digest()
def _make_sequence_methods(field_name: str):
def len_method(self) -> int:
return len(getattr(self, field_name))
def iter_method(self):
return iter(getattr(self, field_name))
def getitem_method(self, idx):
return getattr(self, field_name)[idx]
return len_method, iter_method, getitem_method
class DuplicateExtension(Exception):
def __init__(self, msg: str, oid: ObjectIdentifier) -> None:
super().__init__(msg)
self.oid = oid
class ExtensionNotFound(Exception):
def __init__(self, msg: str, oid: ObjectIdentifier) -> None:
super().__init__(msg)
self.oid = oid
class ExtensionType(metaclass=abc.ABCMeta):
oid: typing.ClassVar[ObjectIdentifier]
def public_bytes(self) -> bytes:
"""
Serializes the extension type to DER.
"""
raise NotImplementedError(
"public_bytes is not implemented for extension type {!r}".format(
self
)
)
class Extensions:
def __init__(
self, extensions: typing.Iterable["Extension[ExtensionType]"]
) -> None:
self._extensions = list(extensions)
def get_extension_for_oid(
self, oid: ObjectIdentifier
) -> "Extension[ExtensionType]":
for ext in self:
if ext.oid == oid:
return ext
raise ExtensionNotFound(f"No {oid} extension was found", oid)
def get_extension_for_class(
self, extclass: typing.Type[ExtensionTypeVar]
) -> "Extension[ExtensionTypeVar]":
if extclass is UnrecognizedExtension:
raise TypeError(
"UnrecognizedExtension can't be used with "
"get_extension_for_class because more than one instance of the"
" class may be present."
)
for ext in self:
if isinstance(ext.value, extclass):
return ext
raise ExtensionNotFound(
f"No {extclass} extension was found", extclass.oid
)
__len__, __iter__, __getitem__ = _make_sequence_methods("_extensions")
def __repr__(self) -> str:
return f"<Extensions({self._extensions})>"
class CRLNumber(ExtensionType):
oid = ExtensionOID.CRL_NUMBER
def __init__(self, crl_number: int) -> None:
if not isinstance(crl_number, int):
raise TypeError("crl_number must be an integer")
self._crl_number = crl_number
def __eq__(self, other: object) -> bool:
if not isinstance(other, CRLNumber):
return NotImplemented
return self.crl_number == other.crl_number
def __hash__(self) -> int:
return hash(self.crl_number)
def __repr__(self) -> str:
return f"<CRLNumber({self.crl_number})>"
@property
def crl_number(self) -> int:
return self._crl_number
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class AuthorityKeyIdentifier(ExtensionType):
oid = ExtensionOID.AUTHORITY_KEY_IDENTIFIER
def __init__(
self,
key_identifier: typing.Optional[bytes],
authority_cert_issuer: typing.Optional[typing.Iterable[GeneralName]],
authority_cert_serial_number: typing.Optional[int],
) -> None:
if (authority_cert_issuer is None) != (
authority_cert_serial_number is None
):
raise ValueError(
"authority_cert_issuer and authority_cert_serial_number "
"must both be present or both None"
)
if authority_cert_issuer is not None:
authority_cert_issuer = list(authority_cert_issuer)
if not all(
isinstance(x, GeneralName) for x in authority_cert_issuer
):
raise TypeError(
"authority_cert_issuer must be a list of GeneralName "
"objects"
)
if authority_cert_serial_number is not None and not isinstance(
authority_cert_serial_number, int
):
raise TypeError("authority_cert_serial_number must be an integer")
self._key_identifier = key_identifier
self._authority_cert_issuer = authority_cert_issuer
self._authority_cert_serial_number = authority_cert_serial_number
# This takes a subset of CertificatePublicKeyTypes because an issuer
# cannot have an X25519/X448 key. This introduces some unfortunate
# asymmetry that requires typing users to explicitly
# narrow their type, but we should make this accurate and not just
# convenient.
@classmethod
def from_issuer_public_key(
cls, public_key: CertificateIssuerPublicKeyTypes
) -> "AuthorityKeyIdentifier":
digest = _key_identifier_from_public_key(public_key)
return cls(
key_identifier=digest,
authority_cert_issuer=None,
authority_cert_serial_number=None,
)
@classmethod
def from_issuer_subject_key_identifier(
cls, ski: "SubjectKeyIdentifier"
) -> "AuthorityKeyIdentifier":
return cls(
key_identifier=ski.digest,
authority_cert_issuer=None,
authority_cert_serial_number=None,
)
def __repr__(self) -> str:
return (
"<AuthorityKeyIdentifier(key_identifier={0.key_identifier!r}, "
"authority_cert_issuer={0.authority_cert_issuer}, "
"authority_cert_serial_number={0.authority_cert_serial_number}"
")>".format(self)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, AuthorityKeyIdentifier):
return NotImplemented
return (
self.key_identifier == other.key_identifier
and self.authority_cert_issuer == other.authority_cert_issuer
and self.authority_cert_serial_number
== other.authority_cert_serial_number
)
def __hash__(self) -> int:
if self.authority_cert_issuer is None:
aci = None
else:
aci = tuple(self.authority_cert_issuer)
return hash(
(self.key_identifier, aci, self.authority_cert_serial_number)
)
@property
def key_identifier(self) -> typing.Optional[bytes]:
return self._key_identifier
@property
def authority_cert_issuer(
self,
) -> typing.Optional[typing.List[GeneralName]]:
return self._authority_cert_issuer
@property
def authority_cert_serial_number(self) -> typing.Optional[int]:
return self._authority_cert_serial_number
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class SubjectKeyIdentifier(ExtensionType):
oid = ExtensionOID.SUBJECT_KEY_IDENTIFIER
def __init__(self, digest: bytes) -> None:
self._digest = digest
@classmethod
def from_public_key(
cls, public_key: CertificatePublicKeyTypes
) -> "SubjectKeyIdentifier":
return cls(_key_identifier_from_public_key(public_key))
@property
def digest(self) -> bytes:
return self._digest
@property
def key_identifier(self) -> bytes:
return self._digest
def __repr__(self) -> str:
return f"<SubjectKeyIdentifier(digest={self.digest!r})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, SubjectKeyIdentifier):
return NotImplemented
return constant_time.bytes_eq(self.digest, other.digest)
def __hash__(self) -> int:
return hash(self.digest)
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class AuthorityInformationAccess(ExtensionType):
oid = ExtensionOID.AUTHORITY_INFORMATION_ACCESS
def __init__(
self, descriptions: typing.Iterable["AccessDescription"]
) -> None:
descriptions = list(descriptions)
if not all(isinstance(x, AccessDescription) for x in descriptions):
raise TypeError(
"Every item in the descriptions list must be an "
"AccessDescription"
)
self._descriptions = descriptions
__len__, __iter__, __getitem__ = _make_sequence_methods("_descriptions")
def __repr__(self) -> str:
return f"<AuthorityInformationAccess({self._descriptions})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, AuthorityInformationAccess):
return NotImplemented
return self._descriptions == other._descriptions
def __hash__(self) -> int:
return hash(tuple(self._descriptions))
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class SubjectInformationAccess(ExtensionType):
oid = ExtensionOID.SUBJECT_INFORMATION_ACCESS
def __init__(
self, descriptions: typing.Iterable["AccessDescription"]
) -> None:
descriptions = list(descriptions)
if not all(isinstance(x, AccessDescription) for x in descriptions):
raise TypeError(
"Every item in the descriptions list must be an "
"AccessDescription"
)
self._descriptions = descriptions
__len__, __iter__, __getitem__ = _make_sequence_methods("_descriptions")
def __repr__(self) -> str:
return f"<SubjectInformationAccess({self._descriptions})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, SubjectInformationAccess):
return NotImplemented
return self._descriptions == other._descriptions
def __hash__(self) -> int:
return hash(tuple(self._descriptions))
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class AccessDescription:
def __init__(
self, access_method: ObjectIdentifier, access_location: GeneralName
) -> None:
if not isinstance(access_method, ObjectIdentifier):
raise TypeError("access_method must be an ObjectIdentifier")
if not isinstance(access_location, GeneralName):
raise TypeError("access_location must be a GeneralName")
self._access_method = access_method
self._access_location = access_location
def __repr__(self) -> str:
return (
"<AccessDescription(access_method={0.access_method}, access_locati"
"on={0.access_location})>".format(self)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, AccessDescription):
return NotImplemented
return (
self.access_method == other.access_method
and self.access_location == other.access_location
)
def __hash__(self) -> int:
return hash((self.access_method, self.access_location))
@property
def access_method(self) -> ObjectIdentifier:
return self._access_method
@property
def access_location(self) -> GeneralName:
return self._access_location
class BasicConstraints(ExtensionType):
oid = ExtensionOID.BASIC_CONSTRAINTS
def __init__(self, ca: bool, path_length: typing.Optional[int]) -> None:
if not isinstance(ca, bool):
raise TypeError("ca must be a boolean value")
if path_length is not None and not ca:
raise ValueError("path_length must be None when ca is False")
if path_length is not None and (
not isinstance(path_length, int) or path_length < 0
):
raise TypeError(
"path_length must be a non-negative integer or None"
)
self._ca = ca
self._path_length = path_length
@property
def ca(self) -> bool:
return self._ca
@property
def path_length(self) -> typing.Optional[int]:
return self._path_length
def __repr__(self) -> str:
return (
"<BasicConstraints(ca={0.ca}, " "path_length={0.path_length})>"
).format(self)
def __eq__(self, other: object) -> bool:
if not isinstance(other, BasicConstraints):
return NotImplemented
return self.ca == other.ca and self.path_length == other.path_length
def __hash__(self) -> int:
return hash((self.ca, self.path_length))
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class DeltaCRLIndicator(ExtensionType):
oid = ExtensionOID.DELTA_CRL_INDICATOR
def __init__(self, crl_number: int) -> None:
if not isinstance(crl_number, int):
raise TypeError("crl_number must be an integer")
self._crl_number = crl_number
@property
def crl_number(self) -> int:
return self._crl_number
def __eq__(self, other: object) -> bool:
if not isinstance(other, DeltaCRLIndicator):
return NotImplemented
return self.crl_number == other.crl_number
def __hash__(self) -> int:
return hash(self.crl_number)
def __repr__(self) -> str:
return f"<DeltaCRLIndicator(crl_number={self.crl_number})>"
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class CRLDistributionPoints(ExtensionType):
oid = ExtensionOID.CRL_DISTRIBUTION_POINTS
def __init__(
self, distribution_points: typing.Iterable["DistributionPoint"]
) -> None:
distribution_points = list(distribution_points)
if not all(
isinstance(x, DistributionPoint) for x in distribution_points
):
raise TypeError(
"distribution_points must be a list of DistributionPoint "
"objects"
)
self._distribution_points = distribution_points
__len__, __iter__, __getitem__ = _make_sequence_methods(
"_distribution_points"
)
def __repr__(self) -> str:
return f"<CRLDistributionPoints({self._distribution_points})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, CRLDistributionPoints):
return NotImplemented
return self._distribution_points == other._distribution_points
def __hash__(self) -> int:
return hash(tuple(self._distribution_points))
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class FreshestCRL(ExtensionType):
oid = ExtensionOID.FRESHEST_CRL
def __init__(
self, distribution_points: typing.Iterable["DistributionPoint"]
) -> None:
distribution_points = list(distribution_points)
if not all(
isinstance(x, DistributionPoint) for x in distribution_points
):
raise TypeError(
"distribution_points must be a list of DistributionPoint "
"objects"
)
self._distribution_points = distribution_points
__len__, __iter__, __getitem__ = _make_sequence_methods(
"_distribution_points"
)
def __repr__(self) -> str:
return f"<FreshestCRL({self._distribution_points})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, FreshestCRL):
return NotImplemented
return self._distribution_points == other._distribution_points
def __hash__(self) -> int:
return hash(tuple(self._distribution_points))
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class DistributionPoint:
def __init__(
self,
full_name: typing.Optional[typing.Iterable[GeneralName]],
relative_name: typing.Optional[RelativeDistinguishedName],
reasons: typing.Optional[typing.FrozenSet["ReasonFlags"]],
crl_issuer: typing.Optional[typing.Iterable[GeneralName]],
) -> None:
if full_name and relative_name:
raise ValueError(
"You cannot provide both full_name and relative_name, at "
"least one must be None."
)
if not full_name and not relative_name and not crl_issuer:
raise ValueError(
"Either full_name, relative_name or crl_issuer must be "
"provided."
)
if full_name is not None:
full_name = list(full_name)
if not all(isinstance(x, GeneralName) for x in full_name):
raise TypeError(
"full_name must be a list of GeneralName objects"
)
if relative_name:
if not isinstance(relative_name, RelativeDistinguishedName):
raise TypeError(
"relative_name must be a RelativeDistinguishedName"
)
if crl_issuer is not None:
crl_issuer = list(crl_issuer)
if not all(isinstance(x, GeneralName) for x in crl_issuer):
raise TypeError(
"crl_issuer must be None or a list of general names"
)
if reasons and (
not isinstance(reasons, frozenset)
or not all(isinstance(x, ReasonFlags) for x in reasons)
):
raise TypeError("reasons must be None or frozenset of ReasonFlags")
if reasons and (
ReasonFlags.unspecified in reasons
or ReasonFlags.remove_from_crl in reasons
):
raise ValueError(
"unspecified and remove_from_crl are not valid reasons in a "
"DistributionPoint"
)
self._full_name = full_name
self._relative_name = relative_name
self._reasons = reasons
self._crl_issuer = crl_issuer
def __repr__(self) -> str:
return (
"<DistributionPoint(full_name={0.full_name}, relative_name={0.rela"
"tive_name}, reasons={0.reasons}, "
"crl_issuer={0.crl_issuer})>".format(self)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, DistributionPoint):
return NotImplemented
return (
self.full_name == other.full_name
and self.relative_name == other.relative_name
and self.reasons == other.reasons
and self.crl_issuer == other.crl_issuer
)
def __hash__(self) -> int:
if self.full_name is not None:
fn: typing.Optional[typing.Tuple[GeneralName, ...]] = tuple(
self.full_name
)
else:
fn = None
if self.crl_issuer is not None:
crl_issuer: typing.Optional[
typing.Tuple[GeneralName, ...]
] = tuple(self.crl_issuer)
else:
crl_issuer = None
return hash((fn, self.relative_name, self.reasons, crl_issuer))
@property
def full_name(self) -> typing.Optional[typing.List[GeneralName]]:
return self._full_name
@property
def relative_name(self) -> typing.Optional[RelativeDistinguishedName]:
return self._relative_name
@property
def reasons(self) -> typing.Optional[typing.FrozenSet["ReasonFlags"]]:
return self._reasons
@property
def crl_issuer(self) -> typing.Optional[typing.List[GeneralName]]:
return self._crl_issuer
class ReasonFlags(utils.Enum):
unspecified = "unspecified"
key_compromise = "keyCompromise"
ca_compromise = "cACompromise"
affiliation_changed = "affiliationChanged"
superseded = "superseded"
cessation_of_operation = "cessationOfOperation"
certificate_hold = "certificateHold"
privilege_withdrawn = "privilegeWithdrawn"
aa_compromise = "aACompromise"
remove_from_crl = "removeFromCRL"
# These are distribution point bit string mappings. Not to be confused with
# CRLReason reason flags bit string mappings.
# ReasonFlags ::= BIT STRING {
# unused (0),
# keyCompromise (1),
# cACompromise (2),
# affiliationChanged (3),
# superseded (4),
# cessationOfOperation (5),
# certificateHold (6),
# privilegeWithdrawn (7),
# aACompromise (8) }
_REASON_BIT_MAPPING = {
1: ReasonFlags.key_compromise,
2: ReasonFlags.ca_compromise,
3: ReasonFlags.affiliation_changed,
4: ReasonFlags.superseded,
5: ReasonFlags.cessation_of_operation,
6: ReasonFlags.certificate_hold,
7: ReasonFlags.privilege_withdrawn,
8: ReasonFlags.aa_compromise,
}
_CRLREASONFLAGS = {
ReasonFlags.key_compromise: 1,
ReasonFlags.ca_compromise: 2,
ReasonFlags.affiliation_changed: 3,
ReasonFlags.superseded: 4,
ReasonFlags.cessation_of_operation: 5,
ReasonFlags.certificate_hold: 6,
ReasonFlags.privilege_withdrawn: 7,
ReasonFlags.aa_compromise: 8,
}
class PolicyConstraints(ExtensionType):
oid = ExtensionOID.POLICY_CONSTRAINTS
def __init__(
self,
require_explicit_policy: typing.Optional[int],
inhibit_policy_mapping: typing.Optional[int],
) -> None:
if require_explicit_policy is not None and not isinstance(
require_explicit_policy, int
):
raise TypeError(
"require_explicit_policy must be a non-negative integer or "
"None"
)
if inhibit_policy_mapping is not None and not isinstance(
inhibit_policy_mapping, int
):
raise TypeError(
"inhibit_policy_mapping must be a non-negative integer or None"
)
if inhibit_policy_mapping is None and require_explicit_policy is None:
raise ValueError(
"At least one of require_explicit_policy and "
"inhibit_policy_mapping must not be None"
)
self._require_explicit_policy = require_explicit_policy
self._inhibit_policy_mapping = inhibit_policy_mapping
def __repr__(self) -> str:
return (
"<PolicyConstraints(require_explicit_policy={0.require_explicit"
"_policy}, inhibit_policy_mapping={0.inhibit_policy_"
"mapping})>".format(self)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, PolicyConstraints):
return NotImplemented
return (
self.require_explicit_policy == other.require_explicit_policy
and self.inhibit_policy_mapping == other.inhibit_policy_mapping
)
def __hash__(self) -> int:
return hash(
(self.require_explicit_policy, self.inhibit_policy_mapping)
)
@property
def require_explicit_policy(self) -> typing.Optional[int]:
return self._require_explicit_policy
@property
def inhibit_policy_mapping(self) -> typing.Optional[int]:
return self._inhibit_policy_mapping
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class CertificatePolicies(ExtensionType):
oid = ExtensionOID.CERTIFICATE_POLICIES
def __init__(self, policies: typing.Iterable["PolicyInformation"]) -> None:
policies = list(policies)
if not all(isinstance(x, PolicyInformation) for x in policies):
raise TypeError(
"Every item in the policies list must be a "
"PolicyInformation"
)
self._policies = policies
__len__, __iter__, __getitem__ = _make_sequence_methods("_policies")
def __repr__(self) -> str:
return f"<CertificatePolicies({self._policies})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, CertificatePolicies):
return NotImplemented
return self._policies == other._policies
def __hash__(self) -> int:
return hash(tuple(self._policies))
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class PolicyInformation:
def __init__(
self,
policy_identifier: ObjectIdentifier,
policy_qualifiers: typing.Optional[
typing.Iterable[typing.Union[str, "UserNotice"]]
],
) -> None:
if not isinstance(policy_identifier, ObjectIdentifier):
raise TypeError("policy_identifier must be an ObjectIdentifier")
self._policy_identifier = policy_identifier
if policy_qualifiers is not None:
policy_qualifiers = list(policy_qualifiers)
if not all(
isinstance(x, (str, UserNotice)) for x in policy_qualifiers
):
raise TypeError(
"policy_qualifiers must be a list of strings and/or "
"UserNotice objects or None"
)
self._policy_qualifiers = policy_qualifiers
def __repr__(self) -> str:
return (
"<PolicyInformation(policy_identifier={0.policy_identifier}, polic"
"y_qualifiers={0.policy_qualifiers})>".format(self)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, PolicyInformation):
return NotImplemented
return (
self.policy_identifier == other.policy_identifier
and self.policy_qualifiers == other.policy_qualifiers
)
def __hash__(self) -> int:
if self.policy_qualifiers is not None:
pq: typing.Optional[
typing.Tuple[typing.Union[str, "UserNotice"], ...]
] = tuple(self.policy_qualifiers)
else:
pq = None
return hash((self.policy_identifier, pq))
@property
def policy_identifier(self) -> ObjectIdentifier:
return self._policy_identifier
@property
def policy_qualifiers(
self,
) -> typing.Optional[typing.List[typing.Union[str, "UserNotice"]]]:
return self._policy_qualifiers
class UserNotice:
def __init__(
self,
notice_reference: typing.Optional["NoticeReference"],
explicit_text: typing.Optional[str],
) -> None:
if notice_reference and not isinstance(
notice_reference, NoticeReference
):
raise TypeError(
"notice_reference must be None or a NoticeReference"
)
self._notice_reference = notice_reference
self._explicit_text = explicit_text
def __repr__(self) -> str:
return (
"<UserNotice(notice_reference={0.notice_reference}, explicit_text="
"{0.explicit_text!r})>".format(self)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, UserNotice):
return NotImplemented
return (
self.notice_reference == other.notice_reference
and self.explicit_text == other.explicit_text
)
def __hash__(self) -> int:
return hash((self.notice_reference, self.explicit_text))
@property
def notice_reference(self) -> typing.Optional["NoticeReference"]:
return self._notice_reference
@property
def explicit_text(self) -> typing.Optional[str]:
return self._explicit_text
class NoticeReference:
def __init__(
self,
organization: typing.Optional[str],
notice_numbers: typing.Iterable[int],
) -> None:
self._organization = organization
notice_numbers = list(notice_numbers)
if not all(isinstance(x, int) for x in notice_numbers):
raise TypeError("notice_numbers must be a list of integers")
self._notice_numbers = notice_numbers
def __repr__(self) -> str:
return (
"<NoticeReference(organization={0.organization!r}, notice_numbers="
"{0.notice_numbers})>".format(self)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, NoticeReference):
return NotImplemented
return (
self.organization == other.organization
and self.notice_numbers == other.notice_numbers
)
def __hash__(self) -> int:
return hash((self.organization, tuple(self.notice_numbers)))
@property
def organization(self) -> typing.Optional[str]:
return self._organization
@property
def notice_numbers(self) -> typing.List[int]:
return self._notice_numbers
class ExtendedKeyUsage(ExtensionType):
oid = ExtensionOID.EXTENDED_KEY_USAGE
def __init__(self, usages: typing.Iterable[ObjectIdentifier]) -> None:
usages = list(usages)
if not all(isinstance(x, ObjectIdentifier) for x in usages):
raise TypeError(
"Every item in the usages list must be an ObjectIdentifier"
)
self._usages = usages
__len__, __iter__, __getitem__ = _make_sequence_methods("_usages")
def __repr__(self) -> str:
return f"<ExtendedKeyUsage({self._usages})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, ExtendedKeyUsage):
return NotImplemented
return self._usages == other._usages
def __hash__(self) -> int:
return hash(tuple(self._usages))
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class OCSPNoCheck(ExtensionType):
oid = ExtensionOID.OCSP_NO_CHECK
def __eq__(self, other: object) -> bool:
if not isinstance(other, OCSPNoCheck):
return NotImplemented
return True
def __hash__(self) -> int:
return hash(OCSPNoCheck)
def __repr__(self) -> str:
return "<OCSPNoCheck()>"
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class PrecertPoison(ExtensionType):
oid = ExtensionOID.PRECERT_POISON
def __eq__(self, other: object) -> bool:
if not isinstance(other, PrecertPoison):
return NotImplemented
return True
def __hash__(self) -> int:
return hash(PrecertPoison)
def __repr__(self) -> str:
return "<PrecertPoison()>"
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class TLSFeature(ExtensionType):
oid = ExtensionOID.TLS_FEATURE
def __init__(self, features: typing.Iterable["TLSFeatureType"]) -> None:
features = list(features)
if (
not all(isinstance(x, TLSFeatureType) for x in features)
or len(features) == 0
):
raise TypeError(
"features must be a list of elements from the TLSFeatureType "
"enum"
)
self._features = features
__len__, __iter__, __getitem__ = _make_sequence_methods("_features")
def __repr__(self) -> str:
return f"<TLSFeature(features={self._features})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, TLSFeature):
return NotImplemented
return self._features == other._features
def __hash__(self) -> int:
return hash(tuple(self._features))
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class TLSFeatureType(utils.Enum):
# status_request is defined in RFC 6066 and is used for what is commonly
# called OCSP Must-Staple when present in the TLS Feature extension in an
# X.509 certificate.
status_request = 5
# status_request_v2 is defined in RFC 6961 and allows multiple OCSP
# responses to be provided. It is not currently in use by clients or
# servers.
status_request_v2 = 17
_TLS_FEATURE_TYPE_TO_ENUM = {x.value: x for x in TLSFeatureType}
class InhibitAnyPolicy(ExtensionType):
oid = ExtensionOID.INHIBIT_ANY_POLICY
def __init__(self, skip_certs: int) -> None:
if not isinstance(skip_certs, int):
raise TypeError("skip_certs must be an integer")
if skip_certs < 0:
raise ValueError("skip_certs must be a non-negative integer")
self._skip_certs = skip_certs
def __repr__(self) -> str:
return f"<InhibitAnyPolicy(skip_certs={self.skip_certs})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, InhibitAnyPolicy):
return NotImplemented
return self.skip_certs == other.skip_certs
def __hash__(self) -> int:
return hash(self.skip_certs)
@property
def skip_certs(self) -> int:
return self._skip_certs
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class KeyUsage(ExtensionType):
oid = ExtensionOID.KEY_USAGE
def __init__(
self,
digital_signature: bool,
content_commitment: bool,
key_encipherment: bool,
data_encipherment: bool,
key_agreement: bool,
key_cert_sign: bool,
crl_sign: bool,
encipher_only: bool,
decipher_only: bool,
) -> None:
if not key_agreement and (encipher_only or decipher_only):
raise ValueError(
"encipher_only and decipher_only can only be true when "
"key_agreement is true"
)
self._digital_signature = digital_signature
self._content_commitment = content_commitment
self._key_encipherment = key_encipherment
self._data_encipherment = data_encipherment
self._key_agreement = key_agreement
self._key_cert_sign = key_cert_sign
self._crl_sign = crl_sign
self._encipher_only = encipher_only
self._decipher_only = decipher_only
@property
def digital_signature(self) -> bool:
return self._digital_signature
@property
def content_commitment(self) -> bool:
return self._content_commitment
@property
def key_encipherment(self) -> bool:
return self._key_encipherment
@property
def data_encipherment(self) -> bool:
return self._data_encipherment
@property
def key_agreement(self) -> bool:
return self._key_agreement
@property
def key_cert_sign(self) -> bool:
return self._key_cert_sign
@property
def crl_sign(self) -> bool:
return self._crl_sign
@property
def encipher_only(self) -> bool:
if not self.key_agreement:
raise ValueError(
"encipher_only is undefined unless key_agreement is true"
)
else:
return self._encipher_only
@property
def decipher_only(self) -> bool:
if not self.key_agreement:
raise ValueError(
"decipher_only is undefined unless key_agreement is true"
)
else:
return self._decipher_only
def __repr__(self) -> str:
try:
encipher_only = self.encipher_only
decipher_only = self.decipher_only
except ValueError:
# Users found None confusing because even though encipher/decipher
# have no meaning unless key_agreement is true, to construct an
# instance of the class you still need to pass False.
encipher_only = False
decipher_only = False
return (
"<KeyUsage(digital_signature={0.digital_signature}, "
"content_commitment={0.content_commitment}, "
"key_encipherment={0.key_encipherment}, "
"data_encipherment={0.data_encipherment}, "
"key_agreement={0.key_agreement}, "
"key_cert_sign={0.key_cert_sign}, crl_sign={0.crl_sign}, "
"encipher_only={1}, decipher_only={2})>"
).format(self, encipher_only, decipher_only)
def __eq__(self, other: object) -> bool:
if not isinstance(other, KeyUsage):
return NotImplemented
return (
self.digital_signature == other.digital_signature
and self.content_commitment == other.content_commitment
and self.key_encipherment == other.key_encipherment
and self.data_encipherment == other.data_encipherment
and self.key_agreement == other.key_agreement
and self.key_cert_sign == other.key_cert_sign
and self.crl_sign == other.crl_sign
and self._encipher_only == other._encipher_only
and self._decipher_only == other._decipher_only
)
def __hash__(self) -> int:
return hash(
(
self.digital_signature,
self.content_commitment,
self.key_encipherment,
self.data_encipherment,
self.key_agreement,
self.key_cert_sign,
self.crl_sign,
self._encipher_only,
self._decipher_only,
)
)
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class NameConstraints(ExtensionType):
oid = ExtensionOID.NAME_CONSTRAINTS
def __init__(
self,
permitted_subtrees: typing.Optional[typing.Iterable[GeneralName]],
excluded_subtrees: typing.Optional[typing.Iterable[GeneralName]],
) -> None:
if permitted_subtrees is not None:
permitted_subtrees = list(permitted_subtrees)
if not permitted_subtrees:
raise ValueError(
"permitted_subtrees must be a non-empty list or None"
)
if not all(isinstance(x, GeneralName) for x in permitted_subtrees):
raise TypeError(
"permitted_subtrees must be a list of GeneralName objects "
"or None"
)
self._validate_tree(permitted_subtrees)
if excluded_subtrees is not None:
excluded_subtrees = list(excluded_subtrees)
if not excluded_subtrees:
raise ValueError(
"excluded_subtrees must be a non-empty list or None"
)
if not all(isinstance(x, GeneralName) for x in excluded_subtrees):
raise TypeError(
"excluded_subtrees must be a list of GeneralName objects "
"or None"
)
self._validate_tree(excluded_subtrees)
if permitted_subtrees is None and excluded_subtrees is None:
raise ValueError(
"At least one of permitted_subtrees and excluded_subtrees "
"must not be None"
)
self._permitted_subtrees = permitted_subtrees
self._excluded_subtrees = excluded_subtrees
def __eq__(self, other: object) -> bool:
if not isinstance(other, NameConstraints):
return NotImplemented
return (
self.excluded_subtrees == other.excluded_subtrees
and self.permitted_subtrees == other.permitted_subtrees
)
def _validate_tree(self, tree: typing.Iterable[GeneralName]) -> None:
self._validate_ip_name(tree)
self._validate_dns_name(tree)
def _validate_ip_name(self, tree: typing.Iterable[GeneralName]) -> None:
if any(
isinstance(name, IPAddress)
and not isinstance(
name.value, (ipaddress.IPv4Network, ipaddress.IPv6Network)
)
for name in tree
):
raise TypeError(
"IPAddress name constraints must be an IPv4Network or"
" IPv6Network object"
)
def _validate_dns_name(self, tree: typing.Iterable[GeneralName]) -> None:
if any(
isinstance(name, DNSName) and "*" in name.value for name in tree
):
raise ValueError(
"DNSName name constraints must not contain the '*' wildcard"
" character"
)
def __repr__(self) -> str:
return (
"<NameConstraints(permitted_subtrees={0.permitted_subtrees}, "
"excluded_subtrees={0.excluded_subtrees})>".format(self)
)
def __hash__(self) -> int:
if self.permitted_subtrees is not None:
ps: typing.Optional[typing.Tuple[GeneralName, ...]] = tuple(
self.permitted_subtrees
)
else:
ps = None
if self.excluded_subtrees is not None:
es: typing.Optional[typing.Tuple[GeneralName, ...]] = tuple(
self.excluded_subtrees
)
else:
es = None
return hash((ps, es))
@property
def permitted_subtrees(
self,
) -> typing.Optional[typing.List[GeneralName]]:
return self._permitted_subtrees
@property
def excluded_subtrees(
self,
) -> typing.Optional[typing.List[GeneralName]]:
return self._excluded_subtrees
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class Extension(typing.Generic[ExtensionTypeVar]):
def __init__(
self, oid: ObjectIdentifier, critical: bool, value: ExtensionTypeVar
) -> None:
if not isinstance(oid, ObjectIdentifier):
raise TypeError(
"oid argument must be an ObjectIdentifier instance."
)
if not isinstance(critical, bool):
raise TypeError("critical must be a boolean value")
self._oid = oid
self._critical = critical
self._value = value
@property
def oid(self) -> ObjectIdentifier:
return self._oid
@property
def critical(self) -> bool:
return self._critical
@property
def value(self) -> ExtensionTypeVar:
return self._value
def __repr__(self) -> str:
return (
"<Extension(oid={0.oid}, critical={0.critical}, "
"value={0.value})>"
).format(self)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Extension):
return NotImplemented
return (
self.oid == other.oid
and self.critical == other.critical
and self.value == other.value
)
def __hash__(self) -> int:
return hash((self.oid, self.critical, self.value))
class GeneralNames:
def __init__(self, general_names: typing.Iterable[GeneralName]) -> None:
general_names = list(general_names)
if not all(isinstance(x, GeneralName) for x in general_names):
raise TypeError(
"Every item in the general_names list must be an "
"object conforming to the GeneralName interface"
)
self._general_names = general_names
__len__, __iter__, __getitem__ = _make_sequence_methods("_general_names")
@typing.overload
def get_values_for_type(
self,
type: typing.Union[
typing.Type[DNSName],
typing.Type[UniformResourceIdentifier],
typing.Type[RFC822Name],
],
) -> typing.List[str]:
...
@typing.overload
def get_values_for_type(
self,
type: typing.Type[DirectoryName],
) -> typing.List[Name]:
...
@typing.overload
def get_values_for_type(
self,
type: typing.Type[RegisteredID],
) -> typing.List[ObjectIdentifier]:
...
@typing.overload
def get_values_for_type(
self, type: typing.Type[IPAddress]
) -> typing.List[_IPAddressTypes]:
...
@typing.overload
def get_values_for_type(
self, type: typing.Type[OtherName]
) -> typing.List[OtherName]:
...
def get_values_for_type(
self,
type: typing.Union[
typing.Type[DNSName],
typing.Type[DirectoryName],
typing.Type[IPAddress],
typing.Type[OtherName],
typing.Type[RFC822Name],
typing.Type[RegisteredID],
typing.Type[UniformResourceIdentifier],
],
) -> typing.Union[
typing.List[_IPAddressTypes],
typing.List[str],
typing.List[OtherName],
typing.List[Name],
typing.List[ObjectIdentifier],
]:
# Return the value of each GeneralName, except for OtherName instances
# which we return directly because it has two important properties not
# just one value.
objs = (i for i in self if isinstance(i, type))
if type != OtherName:
return [i.value for i in objs]
return list(objs)
def __repr__(self) -> str:
return f"<GeneralNames({self._general_names})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, GeneralNames):
return NotImplemented
return self._general_names == other._general_names
def __hash__(self) -> int:
return hash(tuple(self._general_names))
class SubjectAlternativeName(ExtensionType):
oid = ExtensionOID.SUBJECT_ALTERNATIVE_NAME
def __init__(self, general_names: typing.Iterable[GeneralName]) -> None:
self._general_names = GeneralNames(general_names)
__len__, __iter__, __getitem__ = _make_sequence_methods("_general_names")
@typing.overload
def get_values_for_type(
self,
type: typing.Union[
typing.Type[DNSName],
typing.Type[UniformResourceIdentifier],
typing.Type[RFC822Name],
],
) -> typing.List[str]:
...
@typing.overload
def get_values_for_type(
self,
type: typing.Type[DirectoryName],
) -> typing.List[Name]:
...
@typing.overload
def get_values_for_type(
self,
type: typing.Type[RegisteredID],
) -> typing.List[ObjectIdentifier]:
...
@typing.overload
def get_values_for_type(
self, type: typing.Type[IPAddress]
) -> typing.List[_IPAddressTypes]:
...
@typing.overload
def get_values_for_type(
self, type: typing.Type[OtherName]
) -> typing.List[OtherName]:
...
def get_values_for_type(
self,
type: typing.Union[
typing.Type[DNSName],
typing.Type[DirectoryName],
typing.Type[IPAddress],
typing.Type[OtherName],
typing.Type[RFC822Name],
typing.Type[RegisteredID],
typing.Type[UniformResourceIdentifier],
],
) -> typing.Union[
typing.List[_IPAddressTypes],
typing.List[str],
typing.List[OtherName],
typing.List[Name],
typing.List[ObjectIdentifier],
]:
return self._general_names.get_values_for_type(type)
def __repr__(self) -> str:
return f"<SubjectAlternativeName({self._general_names})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, SubjectAlternativeName):
return NotImplemented
return self._general_names == other._general_names
def __hash__(self) -> int:
return hash(self._general_names)
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class IssuerAlternativeName(ExtensionType):
oid = ExtensionOID.ISSUER_ALTERNATIVE_NAME
def __init__(self, general_names: typing.Iterable[GeneralName]) -> None:
self._general_names = GeneralNames(general_names)
__len__, __iter__, __getitem__ = _make_sequence_methods("_general_names")
@typing.overload
def get_values_for_type(
self,
type: typing.Union[
typing.Type[DNSName],
typing.Type[UniformResourceIdentifier],
typing.Type[RFC822Name],
],
) -> typing.List[str]:
...
@typing.overload
def get_values_for_type(
self,
type: typing.Type[DirectoryName],
) -> typing.List[Name]:
...
@typing.overload
def get_values_for_type(
self,
type: typing.Type[RegisteredID],
) -> typing.List[ObjectIdentifier]:
...
@typing.overload
def get_values_for_type(
self, type: typing.Type[IPAddress]
) -> typing.List[_IPAddressTypes]:
...
@typing.overload
def get_values_for_type(
self, type: typing.Type[OtherName]
) -> typing.List[OtherName]:
...
def get_values_for_type(
self,
type: typing.Union[
typing.Type[DNSName],
typing.Type[DirectoryName],
typing.Type[IPAddress],
typing.Type[OtherName],
typing.Type[RFC822Name],
typing.Type[RegisteredID],
typing.Type[UniformResourceIdentifier],
],
) -> typing.Union[
typing.List[_IPAddressTypes],
typing.List[str],
typing.List[OtherName],
typing.List[Name],
typing.List[ObjectIdentifier],
]:
return self._general_names.get_values_for_type(type)
def __repr__(self) -> str:
return f"<IssuerAlternativeName({self._general_names})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, IssuerAlternativeName):
return NotImplemented
return self._general_names == other._general_names
def __hash__(self) -> int:
return hash(self._general_names)
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class CertificateIssuer(ExtensionType):
oid = CRLEntryExtensionOID.CERTIFICATE_ISSUER
def __init__(self, general_names: typing.Iterable[GeneralName]) -> None:
self._general_names = GeneralNames(general_names)
__len__, __iter__, __getitem__ = _make_sequence_methods("_general_names")
@typing.overload
def get_values_for_type(
self,
type: typing.Union[
typing.Type[DNSName],
typing.Type[UniformResourceIdentifier],
typing.Type[RFC822Name],
],
) -> typing.List[str]:
...
@typing.overload
def get_values_for_type(
self,
type: typing.Type[DirectoryName],
) -> typing.List[Name]:
...
@typing.overload
def get_values_for_type(
self,
type: typing.Type[RegisteredID],
) -> typing.List[ObjectIdentifier]:
...
@typing.overload
def get_values_for_type(
self, type: typing.Type[IPAddress]
) -> typing.List[_IPAddressTypes]:
...
@typing.overload
def get_values_for_type(
self, type: typing.Type[OtherName]
) -> typing.List[OtherName]:
...
def get_values_for_type(
self,
type: typing.Union[
typing.Type[DNSName],
typing.Type[DirectoryName],
typing.Type[IPAddress],
typing.Type[OtherName],
typing.Type[RFC822Name],
typing.Type[RegisteredID],
typing.Type[UniformResourceIdentifier],
],
) -> typing.Union[
typing.List[_IPAddressTypes],
typing.List[str],
typing.List[OtherName],
typing.List[Name],
typing.List[ObjectIdentifier],
]:
return self._general_names.get_values_for_type(type)
def __repr__(self) -> str:
return f"<CertificateIssuer({self._general_names})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, CertificateIssuer):
return NotImplemented
return self._general_names == other._general_names
def __hash__(self) -> int:
return hash(self._general_names)
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class CRLReason(ExtensionType):
oid = CRLEntryExtensionOID.CRL_REASON
def __init__(self, reason: ReasonFlags) -> None:
if not isinstance(reason, ReasonFlags):
raise TypeError("reason must be an element from ReasonFlags")
self._reason = reason
def __repr__(self) -> str:
return f"<CRLReason(reason={self._reason})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, CRLReason):
return NotImplemented
return self.reason == other.reason
def __hash__(self) -> int:
return hash(self.reason)
@property
def reason(self) -> ReasonFlags:
return self._reason
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class InvalidityDate(ExtensionType):
oid = CRLEntryExtensionOID.INVALIDITY_DATE
def __init__(self, invalidity_date: datetime.datetime) -> None:
if not isinstance(invalidity_date, datetime.datetime):
raise TypeError("invalidity_date must be a datetime.datetime")
self._invalidity_date = invalidity_date
def __repr__(self) -> str:
return "<InvalidityDate(invalidity_date={})>".format(
self._invalidity_date
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, InvalidityDate):
return NotImplemented
return self.invalidity_date == other.invalidity_date
def __hash__(self) -> int:
return hash(self.invalidity_date)
@property
def invalidity_date(self) -> datetime.datetime:
return self._invalidity_date
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class PrecertificateSignedCertificateTimestamps(ExtensionType):
oid = ExtensionOID.PRECERT_SIGNED_CERTIFICATE_TIMESTAMPS
def __init__(
self,
signed_certificate_timestamps: typing.Iterable[
SignedCertificateTimestamp
],
) -> None:
signed_certificate_timestamps = list(signed_certificate_timestamps)
if not all(
isinstance(sct, SignedCertificateTimestamp)
for sct in signed_certificate_timestamps
):
raise TypeError(
"Every item in the signed_certificate_timestamps list must be "
"a SignedCertificateTimestamp"
)
self._signed_certificate_timestamps = signed_certificate_timestamps
__len__, __iter__, __getitem__ = _make_sequence_methods(
"_signed_certificate_timestamps"
)
def __repr__(self) -> str:
return "<PrecertificateSignedCertificateTimestamps({})>".format(
list(self)
)
def __hash__(self) -> int:
return hash(tuple(self._signed_certificate_timestamps))
def __eq__(self, other: object) -> bool:
if not isinstance(other, PrecertificateSignedCertificateTimestamps):
return NotImplemented
return (
self._signed_certificate_timestamps
== other._signed_certificate_timestamps
)
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class SignedCertificateTimestamps(ExtensionType):
oid = ExtensionOID.SIGNED_CERTIFICATE_TIMESTAMPS
def __init__(
self,
signed_certificate_timestamps: typing.Iterable[
SignedCertificateTimestamp
],
) -> None:
signed_certificate_timestamps = list(signed_certificate_timestamps)
if not all(
isinstance(sct, SignedCertificateTimestamp)
for sct in signed_certificate_timestamps
):
raise TypeError(
"Every item in the signed_certificate_timestamps list must be "
"a SignedCertificateTimestamp"
)
self._signed_certificate_timestamps = signed_certificate_timestamps
__len__, __iter__, __getitem__ = _make_sequence_methods(
"_signed_certificate_timestamps"
)
def __repr__(self) -> str:
return f"<SignedCertificateTimestamps({list(self)})>"
def __hash__(self) -> int:
return hash(tuple(self._signed_certificate_timestamps))
def __eq__(self, other: object) -> bool:
if not isinstance(other, SignedCertificateTimestamps):
return NotImplemented
return (
self._signed_certificate_timestamps
== other._signed_certificate_timestamps
)
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class OCSPNonce(ExtensionType):
oid = OCSPExtensionOID.NONCE
def __init__(self, nonce: bytes) -> None:
if not isinstance(nonce, bytes):
raise TypeError("nonce must be bytes")
self._nonce = nonce
def __eq__(self, other: object) -> bool:
if not isinstance(other, OCSPNonce):
return NotImplemented
return self.nonce == other.nonce
def __hash__(self) -> int:
return hash(self.nonce)
def __repr__(self) -> str:
return f"<OCSPNonce(nonce={self.nonce!r})>"
@property
def nonce(self) -> bytes:
return self._nonce
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class IssuingDistributionPoint(ExtensionType):
oid = ExtensionOID.ISSUING_DISTRIBUTION_POINT
def __init__(
self,
full_name: typing.Optional[typing.Iterable[GeneralName]],
relative_name: typing.Optional[RelativeDistinguishedName],
only_contains_user_certs: bool,
only_contains_ca_certs: bool,
only_some_reasons: typing.Optional[typing.FrozenSet[ReasonFlags]],
indirect_crl: bool,
only_contains_attribute_certs: bool,
) -> None:
if full_name is not None:
full_name = list(full_name)
if only_some_reasons and (
not isinstance(only_some_reasons, frozenset)
or not all(isinstance(x, ReasonFlags) for x in only_some_reasons)
):
raise TypeError(
"only_some_reasons must be None or frozenset of ReasonFlags"
)
if only_some_reasons and (
ReasonFlags.unspecified in only_some_reasons
or ReasonFlags.remove_from_crl in only_some_reasons
):
raise ValueError(
"unspecified and remove_from_crl are not valid reasons in an "
"IssuingDistributionPoint"
)
if not (
isinstance(only_contains_user_certs, bool)
and isinstance(only_contains_ca_certs, bool)
and isinstance(indirect_crl, bool)
and isinstance(only_contains_attribute_certs, bool)
):
raise TypeError(
"only_contains_user_certs, only_contains_ca_certs, "
"indirect_crl and only_contains_attribute_certs "
"must all be boolean."
)
crl_constraints = [
only_contains_user_certs,
only_contains_ca_certs,
indirect_crl,
only_contains_attribute_certs,
]
if len([x for x in crl_constraints if x]) > 1:
raise ValueError(
"Only one of the following can be set to True: "
"only_contains_user_certs, only_contains_ca_certs, "
"indirect_crl, only_contains_attribute_certs"
)
if not any(
[
only_contains_user_certs,
only_contains_ca_certs,
indirect_crl,
only_contains_attribute_certs,
full_name,
relative_name,
only_some_reasons,
]
):
raise ValueError(
"Cannot create empty extension: "
"if only_contains_user_certs, only_contains_ca_certs, "
"indirect_crl, and only_contains_attribute_certs are all False"
", then either full_name, relative_name, or only_some_reasons "
"must have a value."
)
self._only_contains_user_certs = only_contains_user_certs
self._only_contains_ca_certs = only_contains_ca_certs
self._indirect_crl = indirect_crl
self._only_contains_attribute_certs = only_contains_attribute_certs
self._only_some_reasons = only_some_reasons
self._full_name = full_name
self._relative_name = relative_name
def __repr__(self) -> str:
return (
"<IssuingDistributionPoint(full_name={0.full_name}, "
"relative_name={0.relative_name}, "
"only_contains_user_certs={0.only_contains_user_certs}, "
"only_contains_ca_certs={0.only_contains_ca_certs}, "
"only_some_reasons={0.only_some_reasons}, "
"indirect_crl={0.indirect_crl}, "
"only_contains_attribute_certs="
"{0.only_contains_attribute_certs})>".format(self)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, IssuingDistributionPoint):
return NotImplemented
return (
self.full_name == other.full_name
and self.relative_name == other.relative_name
and self.only_contains_user_certs == other.only_contains_user_certs
and self.only_contains_ca_certs == other.only_contains_ca_certs
and self.only_some_reasons == other.only_some_reasons
and self.indirect_crl == other.indirect_crl
and self.only_contains_attribute_certs
== other.only_contains_attribute_certs
)
def __hash__(self) -> int:
return hash(
(
self.full_name,
self.relative_name,
self.only_contains_user_certs,
self.only_contains_ca_certs,
self.only_some_reasons,
self.indirect_crl,
self.only_contains_attribute_certs,
)
)
@property
def full_name(self) -> typing.Optional[typing.List[GeneralName]]:
return self._full_name
@property
def relative_name(self) -> typing.Optional[RelativeDistinguishedName]:
return self._relative_name
@property
def only_contains_user_certs(self) -> bool:
return self._only_contains_user_certs
@property
def only_contains_ca_certs(self) -> bool:
return self._only_contains_ca_certs
@property
def only_some_reasons(
self,
) -> typing.Optional[typing.FrozenSet[ReasonFlags]]:
return self._only_some_reasons
@property
def indirect_crl(self) -> bool:
return self._indirect_crl
@property
def only_contains_attribute_certs(self) -> bool:
return self._only_contains_attribute_certs
def public_bytes(self) -> bytes:
return rust_x509.encode_extension_value(self)
class UnrecognizedExtension(ExtensionType):
def __init__(self, oid: ObjectIdentifier, value: bytes) -> None:
if not isinstance(oid, ObjectIdentifier):
raise TypeError("oid must be an ObjectIdentifier")
self._oid = oid
self._value = value
@property
def oid(self) -> ObjectIdentifier: # type: ignore[override]
return self._oid
@property
def value(self) -> bytes:
return self._value
def __repr__(self) -> str:
return (
"<UnrecognizedExtension(oid={0.oid}, "
"value={0.value!r})>".format(self)
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, UnrecognizedExtension):
return NotImplemented
return self.oid == other.oid and self.value == other.value
def __hash__(self) -> int:
return hash((self.oid, self.value))
def public_bytes(self) -> bytes:
return self.value
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/x509/name.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import binascii
import re
import sys
import typing
import warnings
from cryptography import utils
from cryptography.hazmat.bindings._rust import x509 as rust_x509
from cryptography.x509.oid import NameOID, ObjectIdentifier
class _ASN1Type(utils.Enum):
BitString = 3
OctetString = 4
UTF8String = 12
NumericString = 18
PrintableString = 19
T61String = 20
IA5String = 22
UTCTime = 23
GeneralizedTime = 24
VisibleString = 26
UniversalString = 28
BMPString = 30
_ASN1_TYPE_TO_ENUM = {i.value: i for i in _ASN1Type}
_NAMEOID_DEFAULT_TYPE: typing.Dict[ObjectIdentifier, _ASN1Type] = {
NameOID.COUNTRY_NAME: _ASN1Type.PrintableString,
NameOID.JURISDICTION_COUNTRY_NAME: _ASN1Type.PrintableString,
NameOID.SERIAL_NUMBER: _ASN1Type.PrintableString,
NameOID.DN_QUALIFIER: _ASN1Type.PrintableString,
NameOID.EMAIL_ADDRESS: _ASN1Type.IA5String,
NameOID.DOMAIN_COMPONENT: _ASN1Type.IA5String,
}
# Type alias
_OidNameMap = typing.Mapping[ObjectIdentifier, str]
_NameOidMap = typing.Mapping[str, ObjectIdentifier]
#: Short attribute names from RFC 4514:
#: https://tools.ietf.org/html/rfc4514#page-7
_NAMEOID_TO_NAME: _OidNameMap = {
NameOID.COMMON_NAME: "CN",
NameOID.LOCALITY_NAME: "L",
NameOID.STATE_OR_PROVINCE_NAME: "ST",
NameOID.ORGANIZATION_NAME: "O",
NameOID.ORGANIZATIONAL_UNIT_NAME: "OU",
NameOID.COUNTRY_NAME: "C",
NameOID.STREET_ADDRESS: "STREET",
NameOID.DOMAIN_COMPONENT: "DC",
NameOID.USER_ID: "UID",
}
_NAME_TO_NAMEOID = {v: k for k, v in _NAMEOID_TO_NAME.items()}
def _escape_dn_value(val: typing.Union[str, bytes]) -> str:
"""Escape special characters in RFC4514 Distinguished Name value."""
if not val:
return ""
# RFC 4514 Section 2.4 defines the value as being the # (U+0023) character
# followed by the hexadecimal encoding of the octets.
if isinstance(val, bytes):
return "#" + binascii.hexlify(val).decode("utf8")
# See https://tools.ietf.org/html/rfc4514#section-2.4
val = val.replace("\\", "\\\\")
val = val.replace('"', '\\"')
val = val.replace("+", "\\+")
val = val.replace(",", "\\,")
val = val.replace(";", "\\;")
val = val.replace("<", "\\<")
val = val.replace(">", "\\>")
val = val.replace("\0", "\\00")
if val[0] in ("#", " "):
val = "\\" + val
if val[-1] == " ":
val = val[:-1] + "\\ "
return val
def _unescape_dn_value(val: str) -> str:
if not val:
return ""
# See https://tools.ietf.org/html/rfc4514#section-3
# special = escaped / SPACE / SHARP / EQUALS
# escaped = DQUOTE / PLUS / COMMA / SEMI / LANGLE / RANGLE
def sub(m):
val = m.group(1)
# Regular escape
if len(val) == 1:
return val
# Hex-value scape
return chr(int(val, 16))
return _RFC4514NameParser._PAIR_RE.sub(sub, val)
class NameAttribute:
def __init__(
self,
oid: ObjectIdentifier,
value: typing.Union[str, bytes],
_type: typing.Optional[_ASN1Type] = None,
*,
_validate: bool = True,
) -> None:
if not isinstance(oid, ObjectIdentifier):
raise TypeError(
"oid argument must be an ObjectIdentifier instance."
)
if _type == _ASN1Type.BitString:
if oid != NameOID.X500_UNIQUE_IDENTIFIER:
raise TypeError(
"oid must be X500_UNIQUE_IDENTIFIER for BitString type."
)
if not isinstance(value, bytes):
raise TypeError("value must be bytes for BitString")
else:
if not isinstance(value, str):
raise TypeError("value argument must be a str")
if (
oid == NameOID.COUNTRY_NAME
or oid == NameOID.JURISDICTION_COUNTRY_NAME
):
assert isinstance(value, str)
c_len = len(value.encode("utf8"))
if c_len != 2 and _validate is True:
raise ValueError(
"Country name must be a 2 character country code"
)
elif c_len != 2:
warnings.warn(
"Country names should be two characters, but the "
"attribute is {} characters in length.".format(c_len),
stacklevel=2,
)
# The appropriate ASN1 string type varies by OID and is defined across
# multiple RFCs including 2459, 3280, and 5280. In general UTF8String
# is preferred (2459), but 3280 and 5280 specify several OIDs with
# alternate types. This means when we see the sentinel value we need
# to look up whether the OID has a non-UTF8 type. If it does, set it
# to that. Otherwise, UTF8!
if _type is None:
_type = _NAMEOID_DEFAULT_TYPE.get(oid, _ASN1Type.UTF8String)
if not isinstance(_type, _ASN1Type):
raise TypeError("_type must be from the _ASN1Type enum")
self._oid = oid
self._value = value
self._type = _type
@property
def oid(self) -> ObjectIdentifier:
return self._oid
@property
def value(self) -> typing.Union[str, bytes]:
return self._value
@property
def rfc4514_attribute_name(self) -> str:
"""
The short attribute name (for example "CN") if available,
otherwise the OID dotted string.
"""
return _NAMEOID_TO_NAME.get(self.oid, self.oid.dotted_string)
def rfc4514_string(
self, attr_name_overrides: typing.Optional[_OidNameMap] = None
) -> str:
"""
Format as RFC4514 Distinguished Name string.
Use short attribute name if available, otherwise fall back to OID
dotted string.
"""
attr_name = (
attr_name_overrides.get(self.oid) if attr_name_overrides else None
)
if attr_name is None:
attr_name = self.rfc4514_attribute_name
return f"{attr_name}={_escape_dn_value(self.value)}"
def __eq__(self, other: object) -> bool:
if not isinstance(other, NameAttribute):
return NotImplemented
return self.oid == other.oid and self.value == other.value
def __hash__(self) -> int:
return hash((self.oid, self.value))
def __repr__(self) -> str:
return "<NameAttribute(oid={0.oid}, value={0.value!r})>".format(self)
class RelativeDistinguishedName:
def __init__(self, attributes: typing.Iterable[NameAttribute]):
attributes = list(attributes)
if not attributes:
raise ValueError("a relative distinguished name cannot be empty")
if not all(isinstance(x, NameAttribute) for x in attributes):
raise TypeError("attributes must be an iterable of NameAttribute")
# Keep list and frozenset to preserve attribute order where it matters
self._attributes = attributes
self._attribute_set = frozenset(attributes)
if len(self._attribute_set) != len(attributes):
raise ValueError("duplicate attributes are not allowed")
def get_attributes_for_oid(
self, oid: ObjectIdentifier
) -> typing.List[NameAttribute]:
return [i for i in self if i.oid == oid]
def rfc4514_string(
self, attr_name_overrides: typing.Optional[_OidNameMap] = None
) -> str:
"""
Format as RFC4514 Distinguished Name string.
Within each RDN, attributes are joined by '+', although that is rarely
used in certificates.
"""
return "+".join(
attr.rfc4514_string(attr_name_overrides)
for attr in self._attributes
)
def __eq__(self, other: object) -> bool:
if not isinstance(other, RelativeDistinguishedName):
return NotImplemented
return self._attribute_set == other._attribute_set
def __hash__(self) -> int:
return hash(self._attribute_set)
def __iter__(self) -> typing.Iterator[NameAttribute]:
return iter(self._attributes)
def __len__(self) -> int:
return len(self._attributes)
def __repr__(self) -> str:
return f"<RelativeDistinguishedName({self.rfc4514_string()})>"
class Name:
@typing.overload
def __init__(self, attributes: typing.Iterable[NameAttribute]) -> None:
...
@typing.overload
def __init__(
self, attributes: typing.Iterable[RelativeDistinguishedName]
) -> None:
...
def __init__(
self,
attributes: typing.Iterable[
typing.Union[NameAttribute, RelativeDistinguishedName]
],
) -> None:
attributes = list(attributes)
if all(isinstance(x, NameAttribute) for x in attributes):
self._attributes = [
RelativeDistinguishedName([typing.cast(NameAttribute, x)])
for x in attributes
]
elif all(isinstance(x, RelativeDistinguishedName) for x in attributes):
self._attributes = typing.cast(
typing.List[RelativeDistinguishedName], attributes
)
else:
raise TypeError(
"attributes must be a list of NameAttribute"
" or a list RelativeDistinguishedName"
)
@classmethod
def from_rfc4514_string(
cls,
data: str,
attr_name_overrides: typing.Optional[_NameOidMap] = None,
) -> "Name":
return _RFC4514NameParser(data, attr_name_overrides or {}).parse()
def rfc4514_string(
self, attr_name_overrides: typing.Optional[_OidNameMap] = None
) -> str:
"""
Format as RFC4514 Distinguished Name string.
For example 'CN=foobar.com,O=Foo Corp,C=US'
An X.509 name is a two-level structure: a list of sets of attributes.
Each list element is separated by ',' and within each list element, set
elements are separated by '+'. The latter is almost never used in
real world certificates. According to RFC4514 section 2.1 the
RDNSequence must be reversed when converting to string representation.
"""
return ",".join(
attr.rfc4514_string(attr_name_overrides)
for attr in reversed(self._attributes)
)
def get_attributes_for_oid(
self, oid: ObjectIdentifier
) -> typing.List[NameAttribute]:
return [i for i in self if i.oid == oid]
@property
def rdns(self) -> typing.List[RelativeDistinguishedName]:
return self._attributes
def public_bytes(self, backend: typing.Any = None) -> bytes:
return rust_x509.encode_name_bytes(self)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Name):
return NotImplemented
return self._attributes == other._attributes
def __hash__(self) -> int:
# TODO: this is relatively expensive, if this looks like a bottleneck
# for you, consider optimizing!
return hash(tuple(self._attributes))
def __iter__(self) -> typing.Iterator[NameAttribute]:
for rdn in self._attributes:
for ava in rdn:
yield ava
def __len__(self) -> int:
return sum(len(rdn) for rdn in self._attributes)
def __repr__(self) -> str:
rdns = ",".join(attr.rfc4514_string() for attr in self._attributes)
return f"<Name({rdns})>"
class _RFC4514NameParser:
_OID_RE = re.compile(r"(0|([1-9]\d*))(\.(0|([1-9]\d*)))+")
_DESCR_RE = re.compile(r"[a-zA-Z][a-zA-Z\d-]*")
_PAIR = r"\\([\\ #=\"\+,;<>]|[\da-zA-Z]{2})"
_PAIR_RE = re.compile(_PAIR)
_LUTF1 = r"[\x01-\x1f\x21\x24-\x2A\x2D-\x3A\x3D\x3F-\x5B\x5D-\x7F]"
_SUTF1 = r"[\x01-\x21\x23-\x2A\x2D-\x3A\x3D\x3F-\x5B\x5D-\x7F]"
_TUTF1 = r"[\x01-\x1F\x21\x23-\x2A\x2D-\x3A\x3D\x3F-\x5B\x5D-\x7F]"
_UTFMB = rf"[\x80-{chr(sys.maxunicode)}]"
_LEADCHAR = rf"{_LUTF1}|{_UTFMB}"
_STRINGCHAR = rf"{_SUTF1}|{_UTFMB}"
_TRAILCHAR = rf"{_TUTF1}|{_UTFMB}"
_STRING_RE = re.compile(
rf"""
(
({_LEADCHAR}|{_PAIR})
(
({_STRINGCHAR}|{_PAIR})*
({_TRAILCHAR}|{_PAIR})
)?
)?
""",
re.VERBOSE,
)
_HEXSTRING_RE = re.compile(r"#([\da-zA-Z]{2})+")
def __init__(self, data: str, attr_name_overrides: _NameOidMap) -> None:
self._data = data
self._idx = 0
self._attr_name_overrides = attr_name_overrides
def _has_data(self) -> bool:
return self._idx < len(self._data)
def _peek(self) -> typing.Optional[str]:
if self._has_data():
return self._data[self._idx]
return None
def _read_char(self, ch: str) -> None:
if self._peek() != ch:
raise ValueError
self._idx += 1
def _read_re(self, pat) -> str:
match = pat.match(self._data, pos=self._idx)
if match is None:
raise ValueError
val = match.group()
self._idx += len(val)
return val
def parse(self) -> Name:
"""
Parses the `data` string and converts it to a Name.
According to RFC4514 section 2.1 the RDNSequence must be
reversed when converting to string representation. So, when
we parse it, we need to reverse again to get the RDNs on the
correct order.
"""
rdns = [self._parse_rdn()]
while self._has_data():
self._read_char(",")
rdns.append(self._parse_rdn())
return Name(reversed(rdns))
def _parse_rdn(self) -> RelativeDistinguishedName:
nas = [self._parse_na()]
while self._peek() == "+":
self._read_char("+")
nas.append(self._parse_na())
return RelativeDistinguishedName(nas)
def _parse_na(self) -> NameAttribute:
try:
oid_value = self._read_re(self._OID_RE)
except ValueError:
name = self._read_re(self._DESCR_RE)
oid = self._attr_name_overrides.get(
name, _NAME_TO_NAMEOID.get(name)
)
if oid is None:
raise ValueError
else:
oid = ObjectIdentifier(oid_value)
self._read_char("=")
if self._peek() == "#":
value = self._read_re(self._HEXSTRING_RE)
value = binascii.unhexlify(value[1:]).decode()
else:
raw_value = self._read_re(self._STRING_RE)
value = _unescape_dn_value(raw_value)
return NameAttribute(oid, value)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/x509/base.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import abc
import datetime
import os
import typing
from cryptography import utils
from cryptography.hazmat.bindings._rust import x509 as rust_x509
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import (
dsa,
ec,
ed448,
ed25519,
rsa,
x448,
x25519,
)
from cryptography.hazmat.primitives.asymmetric.types import (
CertificateIssuerPrivateKeyTypes,
CertificateIssuerPublicKeyTypes,
CertificatePublicKeyTypes,
)
from cryptography.x509.extensions import (
Extension,
Extensions,
ExtensionType,
_make_sequence_methods,
)
from cryptography.x509.name import Name, _ASN1Type
from cryptography.x509.oid import ObjectIdentifier
_EARLIEST_UTC_TIME = datetime.datetime(1950, 1, 1)
# This must be kept in sync with sign.rs's list of allowable types in
# identify_hash_type
_AllowedHashTypes = typing.Union[
hashes.SHA224,
hashes.SHA256,
hashes.SHA384,
hashes.SHA512,
hashes.SHA3_224,
hashes.SHA3_256,
hashes.SHA3_384,
hashes.SHA3_512,
]
class AttributeNotFound(Exception):
def __init__(self, msg: str, oid: ObjectIdentifier) -> None:
super().__init__(msg)
self.oid = oid
def _reject_duplicate_extension(
extension: Extension[ExtensionType],
extensions: typing.List[Extension[ExtensionType]],
) -> None:
# This is quadratic in the number of extensions
for e in extensions:
if e.oid == extension.oid:
raise ValueError("This extension has already been set.")
def _reject_duplicate_attribute(
oid: ObjectIdentifier,
attributes: typing.List[
typing.Tuple[ObjectIdentifier, bytes, typing.Optional[int]]
],
) -> None:
# This is quadratic in the number of attributes
for attr_oid, _, _ in attributes:
if attr_oid == oid:
raise ValueError("This attribute has already been set.")
def _convert_to_naive_utc_time(time: datetime.datetime) -> datetime.datetime:
"""Normalizes a datetime to a naive datetime in UTC.
time -- datetime to normalize. Assumed to be in UTC if not timezone
aware.
"""
if time.tzinfo is not None:
offset = time.utcoffset()
offset = offset if offset else datetime.timedelta()
return time.replace(tzinfo=None) - offset
else:
return time
class Attribute:
def __init__(
self,
oid: ObjectIdentifier,
value: bytes,
_type: int = _ASN1Type.UTF8String.value,
) -> None:
self._oid = oid
self._value = value
self._type = _type
@property
def oid(self) -> ObjectIdentifier:
return self._oid
@property
def value(self) -> bytes:
return self._value
def __repr__(self) -> str:
return f"<Attribute(oid={self.oid}, value={self.value!r})>"
def __eq__(self, other: object) -> bool:
if not isinstance(other, Attribute):
return NotImplemented
return (
self.oid == other.oid
and self.value == other.value
and self._type == other._type
)
def __hash__(self) -> int:
return hash((self.oid, self.value, self._type))
class Attributes:
def __init__(
self,
attributes: typing.Iterable[Attribute],
) -> None:
self._attributes = list(attributes)
__len__, __iter__, __getitem__ = _make_sequence_methods("_attributes")
def __repr__(self) -> str:
return f"<Attributes({self._attributes})>"
def get_attribute_for_oid(self, oid: ObjectIdentifier) -> Attribute:
for attr in self:
if attr.oid == oid:
return attr
raise AttributeNotFound(f"No {oid} attribute was found", oid)
class Version(utils.Enum):
v1 = 0
v3 = 2
class InvalidVersion(Exception):
def __init__(self, msg: str, parsed_version: int) -> None:
super().__init__(msg)
self.parsed_version = parsed_version
class Certificate(metaclass=abc.ABCMeta):
@abc.abstractmethod
def fingerprint(self, algorithm: hashes.HashAlgorithm) -> bytes:
"""
Returns bytes using digest passed.
"""
@property
@abc.abstractmethod
def serial_number(self) -> int:
"""
Returns certificate serial number
"""
@property
@abc.abstractmethod
def version(self) -> Version:
"""
Returns the certificate version
"""
@abc.abstractmethod
def public_key(self) -> CertificatePublicKeyTypes:
"""
Returns the public key
"""
@property
@abc.abstractmethod
def not_valid_before(self) -> datetime.datetime:
"""
Not before time (represented as UTC datetime)
"""
@property
@abc.abstractmethod
def not_valid_after(self) -> datetime.datetime:
"""
Not after time (represented as UTC datetime)
"""
@property
@abc.abstractmethod
def issuer(self) -> Name:
"""
Returns the issuer name object.
"""
@property
@abc.abstractmethod
def subject(self) -> Name:
"""
Returns the subject name object.
"""
@property
@abc.abstractmethod
def signature_hash_algorithm(
self,
) -> typing.Optional[hashes.HashAlgorithm]:
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
@property
@abc.abstractmethod
def signature_algorithm_oid(self) -> ObjectIdentifier:
"""
Returns the ObjectIdentifier of the signature algorithm.
"""
@property
@abc.abstractmethod
def extensions(self) -> Extensions:
"""
Returns an Extensions object.
"""
@property
@abc.abstractmethod
def signature(self) -> bytes:
"""
Returns the signature bytes.
"""
@property
@abc.abstractmethod
def tbs_certificate_bytes(self) -> bytes:
"""
Returns the tbsCertificate payload bytes as defined in RFC 5280.
"""
@property
@abc.abstractmethod
def tbs_precertificate_bytes(self) -> bytes:
"""
Returns the tbsCertificate payload bytes with the SCT list extension
stripped.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Checks equality.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Computes a hash.
"""
@abc.abstractmethod
def public_bytes(self, encoding: serialization.Encoding) -> bytes:
"""
Serializes the certificate to PEM or DER format.
"""
@abc.abstractmethod
def verify_directly_issued_by(self, issuer: "Certificate") -> None:
"""
This method verifies that certificate issuer name matches the
issuer subject name and that the certificate is signed by the
issuer's private key. No other validation is performed.
"""
# Runtime isinstance checks need this since the rust class is not a subclass.
Certificate.register(rust_x509.Certificate)
class RevokedCertificate(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def serial_number(self) -> int:
"""
Returns the serial number of the revoked certificate.
"""
@property
@abc.abstractmethod
def revocation_date(self) -> datetime.datetime:
"""
Returns the date of when this certificate was revoked.
"""
@property
@abc.abstractmethod
def extensions(self) -> Extensions:
"""
Returns an Extensions object containing a list of Revoked extensions.
"""
# Runtime isinstance checks need this since the rust class is not a subclass.
RevokedCertificate.register(rust_x509.RevokedCertificate)
class _RawRevokedCertificate(RevokedCertificate):
def __init__(
self,
serial_number: int,
revocation_date: datetime.datetime,
extensions: Extensions,
):
self._serial_number = serial_number
self._revocation_date = revocation_date
self._extensions = extensions
@property
def serial_number(self) -> int:
return self._serial_number
@property
def revocation_date(self) -> datetime.datetime:
return self._revocation_date
@property
def extensions(self) -> Extensions:
return self._extensions
class CertificateRevocationList(metaclass=abc.ABCMeta):
@abc.abstractmethod
def public_bytes(self, encoding: serialization.Encoding) -> bytes:
"""
Serializes the CRL to PEM or DER format.
"""
@abc.abstractmethod
def fingerprint(self, algorithm: hashes.HashAlgorithm) -> bytes:
"""
Returns bytes using digest passed.
"""
@abc.abstractmethod
def get_revoked_certificate_by_serial_number(
self, serial_number: int
) -> typing.Optional[RevokedCertificate]:
"""
Returns an instance of RevokedCertificate or None if the serial_number
is not in the CRL.
"""
@property
@abc.abstractmethod
def signature_hash_algorithm(
self,
) -> typing.Optional[hashes.HashAlgorithm]:
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
@property
@abc.abstractmethod
def signature_algorithm_oid(self) -> ObjectIdentifier:
"""
Returns the ObjectIdentifier of the signature algorithm.
"""
@property
@abc.abstractmethod
def issuer(self) -> Name:
"""
Returns the X509Name with the issuer of this CRL.
"""
@property
@abc.abstractmethod
def next_update(self) -> typing.Optional[datetime.datetime]:
"""
Returns the date of next update for this CRL.
"""
@property
@abc.abstractmethod
def last_update(self) -> datetime.datetime:
"""
Returns the date of last update for this CRL.
"""
@property
@abc.abstractmethod
def extensions(self) -> Extensions:
"""
Returns an Extensions object containing a list of CRL extensions.
"""
@property
@abc.abstractmethod
def signature(self) -> bytes:
"""
Returns the signature bytes.
"""
@property
@abc.abstractmethod
def tbs_certlist_bytes(self) -> bytes:
"""
Returns the tbsCertList payload bytes as defined in RFC 5280.
"""
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Checks equality.
"""
@abc.abstractmethod
def __len__(self) -> int:
"""
Number of revoked certificates in the CRL.
"""
@typing.overload
def __getitem__(self, idx: int) -> RevokedCertificate:
...
@typing.overload
def __getitem__(self, idx: slice) -> typing.List[RevokedCertificate]:
...
@abc.abstractmethod
def __getitem__(
self, idx: typing.Union[int, slice]
) -> typing.Union[RevokedCertificate, typing.List[RevokedCertificate]]:
"""
Returns a revoked certificate (or slice of revoked certificates).
"""
@abc.abstractmethod
def __iter__(self) -> typing.Iterator[RevokedCertificate]:
"""
Iterator over the revoked certificates
"""
@abc.abstractmethod
def is_signature_valid(
self, public_key: CertificateIssuerPublicKeyTypes
) -> bool:
"""
Verifies signature of revocation list against given public key.
"""
CertificateRevocationList.register(rust_x509.CertificateRevocationList)
class CertificateSigningRequest(metaclass=abc.ABCMeta):
@abc.abstractmethod
def __eq__(self, other: object) -> bool:
"""
Checks equality.
"""
@abc.abstractmethod
def __hash__(self) -> int:
"""
Computes a hash.
"""
@abc.abstractmethod
def public_key(self) -> CertificatePublicKeyTypes:
"""
Returns the public key
"""
@property
@abc.abstractmethod
def subject(self) -> Name:
"""
Returns the subject name object.
"""
@property
@abc.abstractmethod
def signature_hash_algorithm(
self,
) -> typing.Optional[hashes.HashAlgorithm]:
"""
Returns a HashAlgorithm corresponding to the type of the digest signed
in the certificate.
"""
@property
@abc.abstractmethod
def signature_algorithm_oid(self) -> ObjectIdentifier:
"""
Returns the ObjectIdentifier of the signature algorithm.
"""
@property
@abc.abstractmethod
def extensions(self) -> Extensions:
"""
Returns the extensions in the signing request.
"""
@property
@abc.abstractmethod
def attributes(self) -> Attributes:
"""
Returns an Attributes object.
"""
@abc.abstractmethod
def public_bytes(self, encoding: serialization.Encoding) -> bytes:
"""
Encodes the request to PEM or DER format.
"""
@property
@abc.abstractmethod
def signature(self) -> bytes:
"""
Returns the signature bytes.
"""
@property
@abc.abstractmethod
def tbs_certrequest_bytes(self) -> bytes:
"""
Returns the PKCS#10 CertificationRequestInfo bytes as defined in RFC
2986.
"""
@property
@abc.abstractmethod
def is_signature_valid(self) -> bool:
"""
Verifies signature of signing request.
"""
@abc.abstractmethod
def get_attribute_for_oid(self, oid: ObjectIdentifier) -> bytes:
"""
Get the attribute value for a given OID.
"""
# Runtime isinstance checks need this since the rust class is not a subclass.
CertificateSigningRequest.register(rust_x509.CertificateSigningRequest)
# Backend argument preserved for API compatibility, but ignored.
def load_pem_x509_certificate(
data: bytes, backend: typing.Any = None
) -> Certificate:
return rust_x509.load_pem_x509_certificate(data)
def load_pem_x509_certificates(data: bytes) -> typing.List[Certificate]:
return rust_x509.load_pem_x509_certificates(data)
# Backend argument preserved for API compatibility, but ignored.
def load_der_x509_certificate(
data: bytes, backend: typing.Any = None
) -> Certificate:
return rust_x509.load_der_x509_certificate(data)
# Backend argument preserved for API compatibility, but ignored.
def load_pem_x509_csr(
data: bytes, backend: typing.Any = None
) -> CertificateSigningRequest:
return rust_x509.load_pem_x509_csr(data)
# Backend argument preserved for API compatibility, but ignored.
def load_der_x509_csr(
data: bytes, backend: typing.Any = None
) -> CertificateSigningRequest:
return rust_x509.load_der_x509_csr(data)
# Backend argument preserved for API compatibility, but ignored.
def load_pem_x509_crl(
data: bytes, backend: typing.Any = None
) -> CertificateRevocationList:
return rust_x509.load_pem_x509_crl(data)
# Backend argument preserved for API compatibility, but ignored.
def load_der_x509_crl(
data: bytes, backend: typing.Any = None
) -> CertificateRevocationList:
return rust_x509.load_der_x509_crl(data)
class CertificateSigningRequestBuilder:
def __init__(
self,
subject_name: typing.Optional[Name] = None,
extensions: typing.List[Extension[ExtensionType]] = [],
attributes: typing.List[
typing.Tuple[ObjectIdentifier, bytes, typing.Optional[int]]
] = [],
):
"""
Creates an empty X.509 certificate request (v1).
"""
self._subject_name = subject_name
self._extensions = extensions
self._attributes = attributes
def subject_name(self, name: Name) -> "CertificateSigningRequestBuilder":
"""
Sets the certificate requestor's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError("Expecting x509.Name object.")
if self._subject_name is not None:
raise ValueError("The subject name may only be set once.")
return CertificateSigningRequestBuilder(
name, self._extensions, self._attributes
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> "CertificateSigningRequestBuilder":
"""
Adds an X.509 extension to the certificate request.
"""
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return CertificateSigningRequestBuilder(
self._subject_name,
self._extensions + [extension],
self._attributes,
)
def add_attribute(
self,
oid: ObjectIdentifier,
value: bytes,
*,
_tag: typing.Optional[_ASN1Type] = None,
) -> "CertificateSigningRequestBuilder":
"""
Adds an X.509 attribute with an OID and associated value.
"""
if not isinstance(oid, ObjectIdentifier):
raise TypeError("oid must be an ObjectIdentifier")
if not isinstance(value, bytes):
raise TypeError("value must be bytes")
if _tag is not None and not isinstance(_tag, _ASN1Type):
raise TypeError("tag must be _ASN1Type")
_reject_duplicate_attribute(oid, self._attributes)
if _tag is not None:
tag = _tag.value
else:
tag = None
return CertificateSigningRequestBuilder(
self._subject_name,
self._extensions,
self._attributes + [(oid, value, tag)],
)
def sign(
self,
private_key: CertificateIssuerPrivateKeyTypes,
algorithm: typing.Optional[_AllowedHashTypes],
backend: typing.Any = None,
) -> CertificateSigningRequest:
"""
Signs the request using the requestor's private key.
"""
if self._subject_name is None:
raise ValueError("A CertificateSigningRequest must have a subject")
return rust_x509.create_x509_csr(self, private_key, algorithm)
class CertificateBuilder:
_extensions: typing.List[Extension[ExtensionType]]
def __init__(
self,
issuer_name: typing.Optional[Name] = None,
subject_name: typing.Optional[Name] = None,
public_key: typing.Optional[CertificatePublicKeyTypes] = None,
serial_number: typing.Optional[int] = None,
not_valid_before: typing.Optional[datetime.datetime] = None,
not_valid_after: typing.Optional[datetime.datetime] = None,
extensions: typing.List[Extension[ExtensionType]] = [],
) -> None:
self._version = Version.v3
self._issuer_name = issuer_name
self._subject_name = subject_name
self._public_key = public_key
self._serial_number = serial_number
self._not_valid_before = not_valid_before
self._not_valid_after = not_valid_after
self._extensions = extensions
def issuer_name(self, name: Name) -> "CertificateBuilder":
"""
Sets the CA's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError("Expecting x509.Name object.")
if self._issuer_name is not None:
raise ValueError("The issuer name may only be set once.")
return CertificateBuilder(
name,
self._subject_name,
self._public_key,
self._serial_number,
self._not_valid_before,
self._not_valid_after,
self._extensions,
)
def subject_name(self, name: Name) -> "CertificateBuilder":
"""
Sets the requestor's distinguished name.
"""
if not isinstance(name, Name):
raise TypeError("Expecting x509.Name object.")
if self._subject_name is not None:
raise ValueError("The subject name may only be set once.")
return CertificateBuilder(
self._issuer_name,
name,
self._public_key,
self._serial_number,
self._not_valid_before,
self._not_valid_after,
self._extensions,
)
def public_key(
self,
key: CertificatePublicKeyTypes,
) -> "CertificateBuilder":
"""
Sets the requestor's public key (as found in the signing request).
"""
if not isinstance(
key,
(
dsa.DSAPublicKey,
rsa.RSAPublicKey,
ec.EllipticCurvePublicKey,
ed25519.Ed25519PublicKey,
ed448.Ed448PublicKey,
x25519.X25519PublicKey,
x448.X448PublicKey,
),
):
raise TypeError(
"Expecting one of DSAPublicKey, RSAPublicKey,"
" EllipticCurvePublicKey, Ed25519PublicKey,"
" Ed448PublicKey, X25519PublicKey, or "
"X448PublicKey."
)
if self._public_key is not None:
raise ValueError("The public key may only be set once.")
return CertificateBuilder(
self._issuer_name,
self._subject_name,
key,
self._serial_number,
self._not_valid_before,
self._not_valid_after,
self._extensions,
)
def serial_number(self, number: int) -> "CertificateBuilder":
"""
Sets the certificate serial number.
"""
if not isinstance(number, int):
raise TypeError("Serial number must be of integral type.")
if self._serial_number is not None:
raise ValueError("The serial number may only be set once.")
if number <= 0:
raise ValueError("The serial number should be positive.")
# ASN.1 integers are always signed, so most significant bit must be
# zero.
if number.bit_length() >= 160: # As defined in RFC 5280
raise ValueError(
"The serial number should not be more than 159 " "bits."
)
return CertificateBuilder(
self._issuer_name,
self._subject_name,
self._public_key,
number,
self._not_valid_before,
self._not_valid_after,
self._extensions,
)
def not_valid_before(
self, time: datetime.datetime
) -> "CertificateBuilder":
"""
Sets the certificate activation time.
"""
if not isinstance(time, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._not_valid_before is not None:
raise ValueError("The not valid before may only be set once.")
time = _convert_to_naive_utc_time(time)
if time < _EARLIEST_UTC_TIME:
raise ValueError(
"The not valid before date must be on or after"
" 1950 January 1)."
)
if self._not_valid_after is not None and time > self._not_valid_after:
raise ValueError(
"The not valid before date must be before the not valid after "
"date."
)
return CertificateBuilder(
self._issuer_name,
self._subject_name,
self._public_key,
self._serial_number,
time,
self._not_valid_after,
self._extensions,
)
def not_valid_after(self, time: datetime.datetime) -> "CertificateBuilder":
"""
Sets the certificate expiration time.
"""
if not isinstance(time, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._not_valid_after is not None:
raise ValueError("The not valid after may only be set once.")
time = _convert_to_naive_utc_time(time)
if time < _EARLIEST_UTC_TIME:
raise ValueError(
"The not valid after date must be on or after"
" 1950 January 1."
)
if (
self._not_valid_before is not None
and time < self._not_valid_before
):
raise ValueError(
"The not valid after date must be after the not valid before "
"date."
)
return CertificateBuilder(
self._issuer_name,
self._subject_name,
self._public_key,
self._serial_number,
self._not_valid_before,
time,
self._extensions,
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> "CertificateBuilder":
"""
Adds an X.509 extension to the certificate.
"""
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return CertificateBuilder(
self._issuer_name,
self._subject_name,
self._public_key,
self._serial_number,
self._not_valid_before,
self._not_valid_after,
self._extensions + [extension],
)
def sign(
self,
private_key: CertificateIssuerPrivateKeyTypes,
algorithm: typing.Optional[_AllowedHashTypes],
backend: typing.Any = None,
) -> Certificate:
"""
Signs the certificate using the CA's private key.
"""
if self._subject_name is None:
raise ValueError("A certificate must have a subject name")
if self._issuer_name is None:
raise ValueError("A certificate must have an issuer name")
if self._serial_number is None:
raise ValueError("A certificate must have a serial number")
if self._not_valid_before is None:
raise ValueError("A certificate must have a not valid before time")
if self._not_valid_after is None:
raise ValueError("A certificate must have a not valid after time")
if self._public_key is None:
raise ValueError("A certificate must have a public key")
return rust_x509.create_x509_certificate(self, private_key, algorithm)
class CertificateRevocationListBuilder:
_extensions: typing.List[Extension[ExtensionType]]
_revoked_certificates: typing.List[RevokedCertificate]
def __init__(
self,
issuer_name: typing.Optional[Name] = None,
last_update: typing.Optional[datetime.datetime] = None,
next_update: typing.Optional[datetime.datetime] = None,
extensions: typing.List[Extension[ExtensionType]] = [],
revoked_certificates: typing.List[RevokedCertificate] = [],
):
self._issuer_name = issuer_name
self._last_update = last_update
self._next_update = next_update
self._extensions = extensions
self._revoked_certificates = revoked_certificates
def issuer_name(
self, issuer_name: Name
) -> "CertificateRevocationListBuilder":
if not isinstance(issuer_name, Name):
raise TypeError("Expecting x509.Name object.")
if self._issuer_name is not None:
raise ValueError("The issuer name may only be set once.")
return CertificateRevocationListBuilder(
issuer_name,
self._last_update,
self._next_update,
self._extensions,
self._revoked_certificates,
)
def last_update(
self, last_update: datetime.datetime
) -> "CertificateRevocationListBuilder":
if not isinstance(last_update, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._last_update is not None:
raise ValueError("Last update may only be set once.")
last_update = _convert_to_naive_utc_time(last_update)
if last_update < _EARLIEST_UTC_TIME:
raise ValueError(
"The last update date must be on or after" " 1950 January 1."
)
if self._next_update is not None and last_update > self._next_update:
raise ValueError(
"The last update date must be before the next update date."
)
return CertificateRevocationListBuilder(
self._issuer_name,
last_update,
self._next_update,
self._extensions,
self._revoked_certificates,
)
def next_update(
self, next_update: datetime.datetime
) -> "CertificateRevocationListBuilder":
if not isinstance(next_update, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._next_update is not None:
raise ValueError("Last update may only be set once.")
next_update = _convert_to_naive_utc_time(next_update)
if next_update < _EARLIEST_UTC_TIME:
raise ValueError(
"The last update date must be on or after" " 1950 January 1."
)
if self._last_update is not None and next_update < self._last_update:
raise ValueError(
"The next update date must be after the last update date."
)
return CertificateRevocationListBuilder(
self._issuer_name,
self._last_update,
next_update,
self._extensions,
self._revoked_certificates,
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> "CertificateRevocationListBuilder":
"""
Adds an X.509 extension to the certificate revocation list.
"""
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return CertificateRevocationListBuilder(
self._issuer_name,
self._last_update,
self._next_update,
self._extensions + [extension],
self._revoked_certificates,
)
def add_revoked_certificate(
self, revoked_certificate: RevokedCertificate
) -> "CertificateRevocationListBuilder":
"""
Adds a revoked certificate to the CRL.
"""
if not isinstance(revoked_certificate, RevokedCertificate):
raise TypeError("Must be an instance of RevokedCertificate")
return CertificateRevocationListBuilder(
self._issuer_name,
self._last_update,
self._next_update,
self._extensions,
self._revoked_certificates + [revoked_certificate],
)
def sign(
self,
private_key: CertificateIssuerPrivateKeyTypes,
algorithm: typing.Optional[_AllowedHashTypes],
backend: typing.Any = None,
) -> CertificateRevocationList:
if self._issuer_name is None:
raise ValueError("A CRL must have an issuer name")
if self._last_update is None:
raise ValueError("A CRL must have a last update time")
if self._next_update is None:
raise ValueError("A CRL must have a next update time")
return rust_x509.create_x509_crl(self, private_key, algorithm)
class RevokedCertificateBuilder:
def __init__(
self,
serial_number: typing.Optional[int] = None,
revocation_date: typing.Optional[datetime.datetime] = None,
extensions: typing.List[Extension[ExtensionType]] = [],
):
self._serial_number = serial_number
self._revocation_date = revocation_date
self._extensions = extensions
def serial_number(self, number: int) -> "RevokedCertificateBuilder":
if not isinstance(number, int):
raise TypeError("Serial number must be of integral type.")
if self._serial_number is not None:
raise ValueError("The serial number may only be set once.")
if number <= 0:
raise ValueError("The serial number should be positive")
# ASN.1 integers are always signed, so most significant bit must be
# zero.
if number.bit_length() >= 160: # As defined in RFC 5280
raise ValueError(
"The serial number should not be more than 159 " "bits."
)
return RevokedCertificateBuilder(
number, self._revocation_date, self._extensions
)
def revocation_date(
self, time: datetime.datetime
) -> "RevokedCertificateBuilder":
if not isinstance(time, datetime.datetime):
raise TypeError("Expecting datetime object.")
if self._revocation_date is not None:
raise ValueError("The revocation date may only be set once.")
time = _convert_to_naive_utc_time(time)
if time < _EARLIEST_UTC_TIME:
raise ValueError(
"The revocation date must be on or after" " 1950 January 1."
)
return RevokedCertificateBuilder(
self._serial_number, time, self._extensions
)
def add_extension(
self, extval: ExtensionType, critical: bool
) -> "RevokedCertificateBuilder":
if not isinstance(extval, ExtensionType):
raise TypeError("extension must be an ExtensionType")
extension = Extension(extval.oid, critical, extval)
_reject_duplicate_extension(extension, self._extensions)
return RevokedCertificateBuilder(
self._serial_number,
self._revocation_date,
self._extensions + [extension],
)
def build(self, backend: typing.Any = None) -> RevokedCertificate:
if self._serial_number is None:
raise ValueError("A revoked certificate must have a serial number")
if self._revocation_date is None:
raise ValueError(
"A revoked certificate must have a revocation date"
)
return _RawRevokedCertificate(
self._serial_number,
self._revocation_date,
Extensions(self._extensions),
)
def random_serial_number() -> int:
return int.from_bytes(os.urandom(20), "big") >> 1
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography/x509/certificate_transparency.py | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import abc
import datetime
from cryptography import utils
from cryptography.hazmat.bindings._rust import x509 as rust_x509
from cryptography.hazmat.primitives.hashes import HashAlgorithm
class LogEntryType(utils.Enum):
X509_CERTIFICATE = 0
PRE_CERTIFICATE = 1
class Version(utils.Enum):
v1 = 0
class SignatureAlgorithm(utils.Enum):
"""
Signature algorithms that are valid for SCTs.
These are exactly the same as SignatureAlgorithm in RFC 5246 (TLS 1.2).
See: <https://datatracker.ietf.org/doc/html/rfc5246#section-7.4.1.4.1>
"""
ANONYMOUS = 0
RSA = 1
DSA = 2
ECDSA = 3
class SignedCertificateTimestamp(metaclass=abc.ABCMeta):
@property
@abc.abstractmethod
def version(self) -> Version:
"""
Returns the SCT version.
"""
@property
@abc.abstractmethod
def log_id(self) -> bytes:
"""
Returns an identifier indicating which log this SCT is for.
"""
@property
@abc.abstractmethod
def timestamp(self) -> datetime.datetime:
"""
Returns the timestamp for this SCT.
"""
@property
@abc.abstractmethod
def entry_type(self) -> LogEntryType:
"""
Returns whether this is an SCT for a certificate or pre-certificate.
"""
@property
@abc.abstractmethod
def signature_hash_algorithm(self) -> HashAlgorithm:
"""
Returns the hash algorithm used for the SCT's signature.
"""
@property
@abc.abstractmethod
def signature_algorithm(self) -> SignatureAlgorithm:
"""
Returns the signing algorithm used for the SCT's signature.
"""
@property
@abc.abstractmethod
def signature(self) -> bytes:
"""
Returns the signature for this SCT.
"""
@property
@abc.abstractmethod
def extension_bytes(self) -> bytes:
"""
Returns the raw bytes of any extensions for this SCT.
"""
SignedCertificateTimestamp.register(rust_x509.Sct)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/inflection-0.5.1.dist-info/RECORD | __pycache__/inflection.cpython-311.pyc,,
inflection-0.5.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
inflection-0.5.1.dist-info/LICENSE,sha256=np-bSy51Q60UhvEyi_nsUOWl4rucrOtZZSSW3WHmQTM,1062
inflection-0.5.1.dist-info/METADATA,sha256=HzhEOEIN1xKH0DD-z9mPJ2_4ZXu_mYk4DVDxmcHiDCU,1730
inflection-0.5.1.dist-info/RECORD,,
inflection-0.5.1.dist-info/WHEEL,sha256=ADKeyaGyKF5DwBNE0sRE5pvW-bSkFMJfBuhzZ3rceP4,110
inflection-0.5.1.dist-info/top_level.txt,sha256=AT2vYRZGMGYnasNdj4oJbpSddltYSZ3cZGH8Bjo7xQo,11
inflection.py,sha256=kJmjhziRjCeORghR4iWBJQqmXLOpFRwseGVW6gFheas,11437
inflection/__init__.py,sha256=Py387trh0P9zmcI45w2gLrDAplji9kmtGr5s7DY3TD8,11437
inflection/__pycache__/__init__.cpython-311.pyc,,
inflection/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/inflection-0.5.1.dist-info/LICENSE | Copyright (C) 2012-2020 Janne Vanhala
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/inflection-0.5.1.dist-info/WHEEL | Wheel-Version: 1.0
Generator: bdist_wheel (0.35.1)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/inflection-0.5.1.dist-info/top_level.txt | inflection
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/inflection-0.5.1.dist-info/INSTALLER | pip
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/inflection-0.5.1.dist-info/METADATA | Metadata-Version: 2.1
Name: inflection
Version: 0.5.1
Summary: A port of Ruby on Rails inflector to Python
Home-page: https://github.com/jpvanhal/inflection
Author: Janne Vanhala
Author-email: janne.vanhala@gmail.com
License: MIT
Platform: UNKNOWN
Classifier: Development Status :: 4 - Beta
Classifier: Intended Audience :: Developers
Classifier: Natural Language :: English
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.5
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Requires-Python: >=3.5
Inflection
==========
|build status|_
.. |build status| image:: https://travis-ci.org/jpvanhal/inflection.svg?branch=master
:alt: Build Status
.. _build status: http://travis-ci.org/jpvanhal/inflection
Inflection is a string transformation library. It singularizes and pluralizes
English words, and transforms strings from CamelCase to underscored string.
Inflection is a port of `Ruby on Rails`_' `inflector`_ to Python.
.. _Ruby on Rails: http://rubyonrails.org
.. _inflector: http://api.rubyonrails.org/classes/ActiveSupport/Inflector.html
Resources
---------
- `Documentation <https://inflection.readthedocs.io/>`_
- `Issue Tracker <http://github.com/jpvanhal/inflection/issues>`_
- `Code <http://github.com/jpvanhal/inflection>`_
- `Development Version
<http://github.com/jpvanhal/inflection/zipball/master#egg=Inflection-dev>`_
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography-40.0.2.dist-info/RECORD | cryptography-40.0.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
cryptography-40.0.2.dist-info/LICENSE,sha256=Q9rSzHUqtyHNmp827OcPtTq3cTVR8tPYaU2OjFoG1uI,323
cryptography-40.0.2.dist-info/LICENSE.APACHE,sha256=qsc7MUj20dcRHbyjIJn2jSbGRMaBOuHk8F9leaomY_4,11360
cryptography-40.0.2.dist-info/LICENSE.BSD,sha256=YCxMdILeZHndLpeTzaJ15eY9dz2s0eymiSMqtwCPtPs,1532
cryptography-40.0.2.dist-info/LICENSE.PSF,sha256=aT7ApmKzn5laTyUrA6YiKUVHDBtvEsoCkY5O_g32S58,2415
cryptography-40.0.2.dist-info/METADATA,sha256=4vr_Ul75nFZ67oS_cZnlG8AlL_7Vf22dXzFkq5RyGI4,5429
cryptography-40.0.2.dist-info/RECORD,,
cryptography-40.0.2.dist-info/WHEEL,sha256=z0IicLszzw49wRGUWpQ-tg9c2xoB_vTQrgq52nbV4V0,114
cryptography-40.0.2.dist-info/top_level.txt,sha256=KNaT-Sn2K4uxNaEbe6mYdDn3qWDMlp4y-MtWfB73nJc,13
cryptography/__about__.py,sha256=JBoadGQdL7H57fU7gVI4SwOFyL2HeX-BG9g5NeifhL0,409
cryptography/__init__.py,sha256=v4pF_XcZ6sp_b7YlfP2eJA4lNCckeH2NCzN6WYXNnEc,759
cryptography/__pycache__/__about__.cpython-311.pyc,,
cryptography/__pycache__/__init__.cpython-311.pyc,,
cryptography/__pycache__/exceptions.cpython-311.pyc,,
cryptography/__pycache__/fernet.cpython-311.pyc,,
cryptography/__pycache__/utils.cpython-311.pyc,,
cryptography/exceptions.py,sha256=GNQJUZ9hpKs2ISYLQfA3FlwGjuWRTdhpsSXLmOex6j4,1405
cryptography/fernet.py,sha256=qO4sQurx79k-5yOh4UnUZGm51zod0wRXJchz0l063To,6851
cryptography/hazmat/__init__.py,sha256=OYlvgprzULzZlsf3yYTsd6VUVyQmpsbHjgJdNnsyRwE,418
cryptography/hazmat/__pycache__/__init__.cpython-311.pyc,,
cryptography/hazmat/__pycache__/_oid.cpython-311.pyc,,
cryptography/hazmat/_oid.py,sha256=rCvnwb0z0VCKn7Y92IEQAoPErrANWREydYflZSNRrao,14155
cryptography/hazmat/backends/__init__.py,sha256=bgrjB1SX2vXX-rmfG7A4PqGkq-isqQVXGaZtjWHAgj0,324
cryptography/hazmat/backends/__pycache__/__init__.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__init__.py,sha256=oCa7eZbqvHsQ1pBeD_OOfnGxVaZbCfWnAKnHqOyPf1c,270
cryptography/hazmat/backends/openssl/__pycache__/__init__.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/aead.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/backend.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/ciphers.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/cmac.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/decode_asn1.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/dh.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/dsa.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/ec.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/ed25519.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/ed448.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/hashes.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/hmac.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/poly1305.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/rsa.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/utils.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/__pycache__/x448.cpython-311.pyc,,
cryptography/hazmat/backends/openssl/aead.py,sha256=wzIsASMPfFuTEoCPSbfbuN67BglLdvENTft8j-fQDOM,10025
cryptography/hazmat/backends/openssl/backend.py,sha256=ENYxxus4QtRhQFJBN5ezgKgris87MGj8nzVhcuE3Eyg,91919
cryptography/hazmat/backends/openssl/ciphers.py,sha256=uCd2tiwF_-wdjgr2GSMHB6o6EKutHFh053feOjJkKWg,10346
cryptography/hazmat/backends/openssl/cmac.py,sha256=cFZtDpqN5PNzo1X9tm8N8WDV5X81GRFXuXRUsjyFtF4,3005
cryptography/hazmat/backends/openssl/decode_asn1.py,sha256=nSqtgO5MJVf_UUkvw9tez10zhGnsGHq24OP1X2GKOe4,1113
cryptography/hazmat/backends/openssl/dh.py,sha256=1CNiNiqxAhzwfzaJcFT0M1vta_iEKPkiyO36ercd1dw,12186
cryptography/hazmat/backends/openssl/dsa.py,sha256=SQwoCTiNHrWjDQOFag3GznWG5K9CWM1AizqJ4usTRbY,8927
cryptography/hazmat/backends/openssl/ec.py,sha256=Wh83LtvxRfPHC-ZIxSUuCOFqIx1KT_jeC9gwCtLr1fY,11197
cryptography/hazmat/backends/openssl/ed25519.py,sha256=adWaawleloe9T0BctejcclybE51dwb-CmL_b0f6zBiU,5921
cryptography/hazmat/backends/openssl/ed448.py,sha256=Ja_GMzDBcs_8N2PpmU2dd6sszbJh3xP-TrN88MkQLBI,5875
cryptography/hazmat/backends/openssl/hashes.py,sha256=yFuHeO8qDPRbH2B9JJtW51wEVfhu11SFs3lhHBHGyPA,3240
cryptography/hazmat/backends/openssl/hmac.py,sha256=mN7irlzO6Rbc3UIDqlySwaW5KoCn28N8gKS3lh9WEUg,3094
cryptography/hazmat/backends/openssl/poly1305.py,sha256=Oivx5k9DcAU_BSySxEQiw5tE1pcz-ljmFpmXAPZqJrI,2513
cryptography/hazmat/backends/openssl/rsa.py,sha256=zrFVhttn-pc8HHmRZjR42z-XinFRvBZTftGLrPjqMMA,21580
cryptography/hazmat/backends/openssl/utils.py,sha256=VZHD8U8p3G00LyeS0ImY36iu7TC0RW7nx9f2BCOAyQs,2156
cryptography/hazmat/backends/openssl/x448.py,sha256=6tZgh44ipS_UWJ6amueXxc8xIXdIfFtdpvnhri-oxXs,4339
cryptography/hazmat/bindings/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180
cryptography/hazmat/bindings/__pycache__/__init__.cpython-311.pyc,,
cryptography/hazmat/bindings/_rust.abi3.so,sha256=Gt2626Ns8ejVh0VX-WYqD4asaaKKbvDvsrdiwH0ftf8,15340458
cryptography/hazmat/bindings/_rust/__init__.pyi,sha256=IumK7zP9Ko3HjLLb5hwZiY2rbfmfsuyTZLLcHOMvSdk,981
cryptography/hazmat/bindings/_rust/_openssl.pyi,sha256=mpNJLuYLbCVrd5i33FBTmWwL_55Dw7JPkSLlSX9Q7oI,230
cryptography/hazmat/bindings/_rust/asn1.pyi,sha256=9CyI-grOsLQB_hfnhJPoG9dNOdJ7Zg6B0iUpzCowh44,592
cryptography/hazmat/bindings/_rust/ocsp.pyi,sha256=RzVaLkY0y9L8W8opAL_uVD8bySKxP23pSQtEbLOStXI,905
cryptography/hazmat/bindings/_rust/openssl/__init__.pyi,sha256=sSz-RQXVQZ5EDbmEr0e5Km4OqrBKxHXUQwUQmRRkfdw,701
cryptography/hazmat/bindings/_rust/openssl/x25519.pyi,sha256=-1F5QDZfrdhmDLKTeSERuuDUHBTV-EhxIYk9mjpwcG4,616
cryptography/hazmat/bindings/_rust/pkcs7.pyi,sha256=VkTC78wjJgb_qrboOYIFPuFZ3W46zsr6zsxnlrOMwao,460
cryptography/hazmat/bindings/_rust/x509.pyi,sha256=RaSbjBtObgnM66n1IudB34cFXrXamNpk_b2agiT99qE,1743
cryptography/hazmat/bindings/openssl/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180
cryptography/hazmat/bindings/openssl/__pycache__/__init__.cpython-311.pyc,,
cryptography/hazmat/bindings/openssl/__pycache__/_conditional.cpython-311.pyc,,
cryptography/hazmat/bindings/openssl/__pycache__/binding.cpython-311.pyc,,
cryptography/hazmat/bindings/openssl/_conditional.py,sha256=uruUpaLLjgG5z2sckNFxS5TyJRhLcQ4zacklfdiEo8A,9165
cryptography/hazmat/bindings/openssl/binding.py,sha256=2tiCAZziG2bMsa9Ke05hYY8EAiyczxOrvoCMFS_Ly38,7893
cryptography/hazmat/primitives/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180
cryptography/hazmat/primitives/__pycache__/__init__.cpython-311.pyc,,
cryptography/hazmat/primitives/__pycache__/_asymmetric.cpython-311.pyc,,
cryptography/hazmat/primitives/__pycache__/_cipheralgorithm.cpython-311.pyc,,
cryptography/hazmat/primitives/__pycache__/_serialization.cpython-311.pyc,,
cryptography/hazmat/primitives/__pycache__/cmac.cpython-311.pyc,,
cryptography/hazmat/primitives/__pycache__/constant_time.cpython-311.pyc,,
cryptography/hazmat/primitives/__pycache__/hashes.cpython-311.pyc,,
cryptography/hazmat/primitives/__pycache__/hmac.cpython-311.pyc,,
cryptography/hazmat/primitives/__pycache__/keywrap.cpython-311.pyc,,
cryptography/hazmat/primitives/__pycache__/padding.cpython-311.pyc,,
cryptography/hazmat/primitives/__pycache__/poly1305.cpython-311.pyc,,
cryptography/hazmat/primitives/_asymmetric.py,sha256=QacvnyA1fcXWbSAASCiodHVcTYwkaMdzq6KUIlaO7H0,496
cryptography/hazmat/primitives/_cipheralgorithm.py,sha256=TAlnDCAdYaa23-mb0TTbFLFhWwfdBF1DtXQdY9Koqf0,1057
cryptography/hazmat/primitives/_serialization.py,sha256=r2ECtWEJ3JEgSpGkpaZrMfGhoQWdTHIn4gyLCB71fMg,5188
cryptography/hazmat/primitives/asymmetric/__init__.py,sha256=s9oKCQ2ycFdXoERdS1imafueSkBsL9kvbyfghaauZ9Y,180
cryptography/hazmat/primitives/asymmetric/__pycache__/__init__.cpython-311.pyc,,
cryptography/hazmat/primitives/asymmetric/__pycache__/dh.cpython-311.pyc,,
cryptography/hazmat/primitives/asymmetric/__pycache__/dsa.cpython-311.pyc,,
cryptography/hazmat/primitives/asymmetric/__pycache__/ec.cpython-311.pyc,,
cryptography/hazmat/primitives/asymmetric/__pycache__/ed25519.cpython-311.pyc,,
cryptography/hazmat/primitives/asymmetric/__pycache__/ed448.cpython-311.pyc,,
cryptography/hazmat/primitives/asymmetric/__pycache__/padding.cpython-311.pyc,,
cryptography/hazmat/primitives/asymmetric/__pycache__/rsa.cpython-311.pyc,,
cryptography/hazmat/primitives/asymmetric/__pycache__/types.cpython-311.pyc,,
cryptography/hazmat/primitives/asymmetric/__pycache__/utils.cpython-311.pyc,,
cryptography/hazmat/primitives/asymmetric/__pycache__/x25519.cpython-311.pyc,,
cryptography/hazmat/primitives/asymmetric/__pycache__/x448.cpython-311.pyc,,
cryptography/hazmat/primitives/asymmetric/dh.py,sha256=qm9VWIMPIaWUlCxlscb5bAM2aCe11fu-y85Y6nYjC7I,6619
cryptography/hazmat/primitives/asymmetric/dsa.py,sha256=JufsxrrxeJQlsiWMmx_44l90FNRw19o9kcKtk4rO8TU,7885
cryptography/hazmat/primitives/asymmetric/ec.py,sha256=CdxppDV1lV2QlrQ0EhniqvFi8wp8PDYsvFWdpzyyVIY,12725
cryptography/hazmat/primitives/asymmetric/ed25519.py,sha256=MqgOJFdMOXcMBJ-b84tJYOOkORL9xmEUHwCpVpa1k2o,3344
cryptography/hazmat/primitives/asymmetric/ed448.py,sha256=6XjKKEvLQUzZgOFyLZCRkK4Tl0sKMrxfL8CYx8_omxM,3264
cryptography/hazmat/primitives/asymmetric/padding.py,sha256=EkKuY9e6UFqSuQ0LvyKYKl_L19tOfNCTlHWEiKgHeUc,2690
cryptography/hazmat/primitives/asymmetric/rsa.py,sha256=njFky5AkSrsBh47PeVLjj81SOLOiZaxAUSzGWD2Znxw,11479
cryptography/hazmat/primitives/asymmetric/types.py,sha256=_etLWzFIYf01_NHTi3lg5q593wckK2LXxAK_SF94Dpk,2960
cryptography/hazmat/primitives/asymmetric/utils.py,sha256=p6nF7EzF0sp5GYFTw1HEhPYYjuTik53WTUkvuPIfDRk,755
cryptography/hazmat/primitives/asymmetric/x25519.py,sha256=H9gXtrvoO8qJutrDJ-rQNW1kjdbydkp6MD3PWxDWDiQ,3289
cryptography/hazmat/primitives/asymmetric/x448.py,sha256=u9Ma5viyGMVjil4tv9GKsBxcT0rikom9MigjoJ3OgQ4,3189
cryptography/hazmat/primitives/ciphers/__init__.py,sha256=2K5I_haxK0BLNqSZcQUqcjf8FmHY8xV1U-XjfgUmkM8,645
cryptography/hazmat/primitives/ciphers/__pycache__/__init__.cpython-311.pyc,,
cryptography/hazmat/primitives/ciphers/__pycache__/aead.cpython-311.pyc,,
cryptography/hazmat/primitives/ciphers/__pycache__/algorithms.cpython-311.pyc,,
cryptography/hazmat/primitives/ciphers/__pycache__/base.cpython-311.pyc,,
cryptography/hazmat/primitives/ciphers/__pycache__/modes.cpython-311.pyc,,
cryptography/hazmat/primitives/ciphers/aead.py,sha256=c1wfOJ5alX-pDkWxz9cSq5M7ug2CtNc6gCovyMoy_bY,12032
cryptography/hazmat/primitives/ciphers/algorithms.py,sha256=vR1tcKRpaYbQhv3dLIiQNmaZpq7x9kLCdUvOgrWIG0I,4965
cryptography/hazmat/primitives/ciphers/base.py,sha256=RYTyXd8gXgBggdEl0cWcEnx6mkd1YsdRT_3tLEV4iGU,8269
cryptography/hazmat/primitives/ciphers/modes.py,sha256=U_flGFnHih452GO9X66BSN8U9xUmYh12wB6FFEIk6Xc,8326
cryptography/hazmat/primitives/cmac.py,sha256=ZbpwI87EhO3maiwqzttN1z0ObsAO1ufnl2Px5b9uJ1c,2036
cryptography/hazmat/primitives/constant_time.py,sha256=6bkW00QjhKusdgsQbexXhMlGX0XRN59XNmxWS2W38NA,387
cryptography/hazmat/primitives/hashes.py,sha256=cwMQYC0An0UOVTFWqeDIXiokSBorSh4BwHKSWxz8HB0,6041
cryptography/hazmat/primitives/hmac.py,sha256=pKiyxmJVcixW7Xk7w4ofde6Z7F8UohqGZa01PoxRotc,2122
cryptography/hazmat/primitives/kdf/__init__.py,sha256=DcZhzfLG8d8IYBH771lGTVU5S87OQDpu3nrfOwZnsmA,715
cryptography/hazmat/primitives/kdf/__pycache__/__init__.cpython-311.pyc,,
cryptography/hazmat/primitives/kdf/__pycache__/concatkdf.cpython-311.pyc,,
cryptography/hazmat/primitives/kdf/__pycache__/hkdf.cpython-311.pyc,,
cryptography/hazmat/primitives/kdf/__pycache__/kbkdf.cpython-311.pyc,,
cryptography/hazmat/primitives/kdf/__pycache__/pbkdf2.cpython-311.pyc,,
cryptography/hazmat/primitives/kdf/__pycache__/scrypt.cpython-311.pyc,,
cryptography/hazmat/primitives/kdf/__pycache__/x963kdf.cpython-311.pyc,,
cryptography/hazmat/primitives/kdf/concatkdf.py,sha256=giEvBnD5eAB6ymUcYKSgd_2D_qRznOUSaplS1CQoE_A,3691
cryptography/hazmat/primitives/kdf/hkdf.py,sha256=cKBjtCVaKVRhUzbNK99HDEyWUxvPwSfOIygKAMyyb3g,3010
cryptography/hazmat/primitives/kdf/kbkdf.py,sha256=Ys2ITSbEw49V1v_DagQBd17owQr2A2iyPue4mot4Z_g,9196
cryptography/hazmat/primitives/kdf/pbkdf2.py,sha256=wEMH4CJfPccCg9apQLXyWUWBrZLTpYLLnoZEnzvaHQo,2032
cryptography/hazmat/primitives/kdf/scrypt.py,sha256=Wt7jj51vsedNtQX-LZI41geqUZnBFYnrhOXpoheLsOM,2227
cryptography/hazmat/primitives/kdf/x963kdf.py,sha256=bDhxg0gllboQ--a9gdDaiTbO6XMemZPlu3TW91dRp8o,1967
cryptography/hazmat/primitives/keywrap.py,sha256=TWqyG9K7k-Ymq4kcIw7u3NIKUPVDtv6bimwxIJYTe20,5643
cryptography/hazmat/primitives/padding.py,sha256=xruasOE5Cd8KEQ-yp9W6v9WKPvKH-GudHCPKQ7A8HfI,6207
cryptography/hazmat/primitives/poly1305.py,sha256=QvxPMrqjgKJt0mOZSeZKk4NcxsNCd2kgfI-X1CmyUW4,1837
cryptography/hazmat/primitives/serialization/__init__.py,sha256=G-BRfGpQzYrRf5r9QS9BN7QdgpF1k5cLCfdlMH_Z0yw,1618
cryptography/hazmat/primitives/serialization/__pycache__/__init__.cpython-311.pyc,,
cryptography/hazmat/primitives/serialization/__pycache__/base.cpython-311.pyc,,
cryptography/hazmat/primitives/serialization/__pycache__/pkcs12.cpython-311.pyc,,
cryptography/hazmat/primitives/serialization/__pycache__/pkcs7.cpython-311.pyc,,
cryptography/hazmat/primitives/serialization/__pycache__/ssh.cpython-311.pyc,,
cryptography/hazmat/primitives/serialization/base.py,sha256=5ebkgRqVHpVOST3OkcWkdoqo-0sVfdIuZUoyL8tj0es,1955
cryptography/hazmat/primitives/serialization/pkcs12.py,sha256=p8ZWAxNEat-MRov2JaRR3TUAtcp8MAE6v4DDodNQURA,6731
cryptography/hazmat/primitives/serialization/pkcs7.py,sha256=AeyFKpvoll-AUHkLKMGh5lZg7xGwVWl9Y6fXyxdhXFs,7362
cryptography/hazmat/primitives/serialization/ssh.py,sha256=Tgt8fK1F4OyZngYPINW2zor8iUosv2yrQ7qLlJrztlo,48438
cryptography/hazmat/primitives/twofactor/__init__.py,sha256=ZHo4zwWidFP2RWFl8luiNuYkVMZPghzx54izPNSCtD4,222
cryptography/hazmat/primitives/twofactor/__pycache__/__init__.cpython-311.pyc,,
cryptography/hazmat/primitives/twofactor/__pycache__/hotp.cpython-311.pyc,,
cryptography/hazmat/primitives/twofactor/__pycache__/totp.cpython-311.pyc,,
cryptography/hazmat/primitives/twofactor/hotp.py,sha256=LLMBoHczZen-hfUd6NmECPjGIc5kyys7pvbHiZOpkKE,2977
cryptography/hazmat/primitives/twofactor/totp.py,sha256=hEsH9rd8AdTEjVjswPrB9HzRiZZSV9qfpJDmyEPppg4,1437
cryptography/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
cryptography/utils.py,sha256=y3KjdVAv2vW_42r3TJs4YGq9IMPWAjQhamV9UzPU31k,3981
cryptography/x509/__init__.py,sha256=4WL6dxMLEWM9Wa9f_SpNbwcQNg76dut5zigRcHpRoTA,7719
cryptography/x509/__pycache__/__init__.cpython-311.pyc,,
cryptography/x509/__pycache__/base.cpython-311.pyc,,
cryptography/x509/__pycache__/certificate_transparency.cpython-311.pyc,,
cryptography/x509/__pycache__/extensions.cpython-311.pyc,,
cryptography/x509/__pycache__/general_name.cpython-311.pyc,,
cryptography/x509/__pycache__/name.cpython-311.pyc,,
cryptography/x509/__pycache__/ocsp.cpython-311.pyc,,
cryptography/x509/__pycache__/oid.cpython-311.pyc,,
cryptography/x509/base.py,sha256=N24nEkrizgtWi5Acd1M9gCbyp8mGdw5srTq_wgTkEzo,34966
cryptography/x509/certificate_transparency.py,sha256=jkjOvVu8bS5ljHov2AWdWScENQxylmDgESk01koC0Rs,2226
cryptography/x509/extensions.py,sha256=XWx_WnFCu4s_GA_aalW3OZwhjkyttW6OGJsBaz3IDbs,65516
cryptography/x509/general_name.py,sha256=EExe3dR0lBj6V8i4R_nEhj-Vj1B0UIRmRil3wqMBaDA,7853
cryptography/x509/name.py,sha256=krFYM8XyZrEHMDFChMwVDd3D-5cq40VmSWrZF2lqXZc,14821
cryptography/x509/ocsp.py,sha256=gfVQzFPPmUh8SYzVX000GeWNSLka6EYq3AUBvANTk8c,18513
cryptography/x509/oid.py,sha256=dAllMplMi_Kc_lEiQKnSM-rTN5w--a1UZucV-HvQOb0,793
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography-40.0.2.dist-info/LICENSE | This software is made available under the terms of *either* of the licenses
found in LICENSE.APACHE or LICENSE.BSD. Contributions to cryptography are made
under the terms of *both* these licenses.
The code used in the OS random engine is derived from CPython, and is licensed
under the terms of the PSF License Agreement.
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography-40.0.2.dist-info/LICENSE.PSF | 1. This LICENSE AGREEMENT is between the Python Software Foundation ("PSF"), and
the Individual or Organization ("Licensee") accessing and otherwise using Python
2.7.12 software in source or binary form and its associated documentation.
2. Subject to the terms and conditions of this License Agreement, PSF hereby
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
analyze, test, perform and/or display publicly, prepare derivative works,
distribute, and otherwise use Python 2.7.12 alone or in any derivative
version, provided, however, that PSF's License Agreement and PSF's notice of
copyright, i.e., "Copyright © 2001-2016 Python Software Foundation; All Rights
Reserved" are retained in Python 2.7.12 alone or in any derivative version
prepared by Licensee.
3. In the event Licensee prepares a derivative work that is based on or
incorporates Python 2.7.12 or any part thereof, and wants to make the
derivative work available to others as provided herein, then Licensee hereby
agrees to include in any such work a brief summary of the changes made to Python
2.7.12.
4. PSF is making Python 2.7.12 available to Licensee on an "AS IS" basis.
PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR IMPLIED. BY WAY OF
EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND DISCLAIMS ANY REPRESENTATION OR
WARRANTY OF MERCHANTABILITY OR FITNESS FOR ANY PARTICULAR PURPOSE OR THAT THE
USE OF PYTHON 2.7.12 WILL NOT INFRINGE ANY THIRD PARTY RIGHTS.
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON 2.7.12
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS A RESULT OF
MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON 2.7.12, OR ANY DERIVATIVE
THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
6. This License Agreement will automatically terminate upon a material breach of
its terms and conditions.
7. Nothing in this License Agreement shall be deemed to create any relationship
of agency, partnership, or joint venture between PSF and Licensee. This License
Agreement does not grant permission to use PSF trademarks or trade name in a
trademark sense to endorse or promote products or services of Licensee, or any
third party.
8. By copying, installing or otherwise using Python 2.7.12, Licensee agrees
to be bound by the terms and conditions of this License Agreement.
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography-40.0.2.dist-info/LICENSE.APACHE |
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography-40.0.2.dist-info/WHEEL | Wheel-Version: 1.0
Generator: bdist_wheel (0.40.0)
Root-Is-Purelib: false
Tag: cp36-abi3-macosx_10_12_universal2
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography-40.0.2.dist-info/LICENSE.BSD | Copyright (c) Individual contributors.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. Neither the name of PyCA Cryptography nor the names of its contributors
may be used to endorse or promote products derived from this software
without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography-40.0.2.dist-info/top_level.txt | cryptography
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography-40.0.2.dist-info/INSTALLER | pip
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/cryptography-40.0.2.dist-info/METADATA | Metadata-Version: 2.1
Name: cryptography
Version: 40.0.2
Summary: cryptography is a package which provides cryptographic recipes and primitives to Python developers.
Home-page: https://github.com/pyca/cryptography
Author: The Python Cryptographic Authority and individual contributors
Author-email: cryptography-dev@python.org
License: (Apache-2.0 OR BSD-3-Clause) AND PSF-2.0
Project-URL: Documentation, https://cryptography.io/
Project-URL: Source, https://github.com/pyca/cryptography/
Project-URL: Issues, https://github.com/pyca/cryptography/issues
Project-URL: Changelog, https://cryptography.io/en/latest/changelog/
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Apache Software License
Classifier: License :: OSI Approved :: BSD License
Classifier: Natural Language :: English
Classifier: Operating System :: MacOS :: MacOS X
Classifier: Operating System :: POSIX
Classifier: Operating System :: POSIX :: BSD
Classifier: Operating System :: POSIX :: Linux
Classifier: Operating System :: Microsoft :: Windows
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3 :: Only
Classifier: Programming Language :: Python :: 3.6
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Topic :: Security :: Cryptography
Requires-Python: >=3.6
Description-Content-Type: text/x-rst
License-File: LICENSE
License-File: LICENSE.APACHE
License-File: LICENSE.BSD
License-File: LICENSE.PSF
Requires-Dist: cffi (>=1.12)
Provides-Extra: docs
Requires-Dist: sphinx (>=5.3.0) ; extra == 'docs'
Requires-Dist: sphinx-rtd-theme (>=1.1.1) ; extra == 'docs'
Provides-Extra: docstest
Requires-Dist: pyenchant (>=1.6.11) ; extra == 'docstest'
Requires-Dist: twine (>=1.12.0) ; extra == 'docstest'
Requires-Dist: sphinxcontrib-spelling (>=4.0.1) ; extra == 'docstest'
Provides-Extra: pep8test
Requires-Dist: black ; extra == 'pep8test'
Requires-Dist: ruff ; extra == 'pep8test'
Requires-Dist: mypy ; extra == 'pep8test'
Requires-Dist: check-manifest ; extra == 'pep8test'
Provides-Extra: sdist
Requires-Dist: setuptools-rust (>=0.11.4) ; extra == 'sdist'
Provides-Extra: ssh
Requires-Dist: bcrypt (>=3.1.5) ; extra == 'ssh'
Provides-Extra: test
Requires-Dist: pytest (>=6.2.0) ; extra == 'test'
Requires-Dist: pytest-shard (>=0.1.2) ; extra == 'test'
Requires-Dist: pytest-benchmark ; extra == 'test'
Requires-Dist: pytest-cov ; extra == 'test'
Requires-Dist: pytest-subtests ; extra == 'test'
Requires-Dist: pytest-xdist ; extra == 'test'
Requires-Dist: pretend ; extra == 'test'
Requires-Dist: iso8601 ; extra == 'test'
Provides-Extra: test-randomorder
Requires-Dist: pytest-randomly ; extra == 'test-randomorder'
Provides-Extra: tox
Requires-Dist: tox ; extra == 'tox'
pyca/cryptography
=================
.. image:: https://img.shields.io/pypi/v/cryptography.svg
:target: https://pypi.org/project/cryptography/
:alt: Latest Version
.. image:: https://readthedocs.org/projects/cryptography/badge/?version=latest
:target: https://cryptography.io
:alt: Latest Docs
.. image:: https://github.com/pyca/cryptography/workflows/CI/badge.svg?branch=main
:target: https://github.com/pyca/cryptography/actions?query=workflow%3ACI+branch%3Amain
``cryptography`` is a package which provides cryptographic recipes and
primitives to Python developers. Our goal is for it to be your "cryptographic
standard library". It supports Python 3.6+ and PyPy3 7.3.10+.
``cryptography`` includes both high level recipes and low level interfaces to
common cryptographic algorithms such as symmetric ciphers, message digests, and
key derivation functions. For example, to encrypt something with
``cryptography``'s high level symmetric encryption recipe:
.. code-block:: pycon
>>> from cryptography.fernet import Fernet
>>> # Put this somewhere safe!
>>> key = Fernet.generate_key()
>>> f = Fernet(key)
>>> token = f.encrypt(b"A really secret message. Not for prying eyes.")
>>> token
b'...'
>>> f.decrypt(token)
b'A really secret message. Not for prying eyes.'
You can find more information in the `documentation`_.
You can install ``cryptography`` with:
.. code-block:: console
$ pip install cryptography
For full details see `the installation documentation`_.
Discussion
~~~~~~~~~~
If you run into bugs, you can file them in our `issue tracker`_.
We maintain a `cryptography-dev`_ mailing list for development discussion.
You can also join ``#pyca`` on ``irc.libera.chat`` to ask questions or get
involved.
Security
~~~~~~~~
Need to report a security issue? Please consult our `security reporting`_
documentation.
.. _`documentation`: https://cryptography.io/
.. _`the installation documentation`: https://cryptography.io/en/latest/installation/
.. _`issue tracker`: https://github.com/pyca/cryptography/issues
.. _`cryptography-dev`: https://mail.python.org/mailman/listinfo/cryptography-dev
.. _`security reporting`: https://cryptography.io/en/latest/security/
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/six-1.16.0.dist-info/RECORD | __pycache__/six.cpython-311.pyc,,
six-1.16.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
six-1.16.0.dist-info/LICENSE,sha256=i7hQxWWqOJ_cFvOkaWWtI9gq3_YPI5P8J2K2MYXo5sk,1066
six-1.16.0.dist-info/METADATA,sha256=VQcGIFCAEmfZcl77E5riPCN4v2TIsc_qtacnjxKHJoI,1795
six-1.16.0.dist-info/RECORD,,
six-1.16.0.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
six-1.16.0.dist-info/top_level.txt,sha256=_iVH_iYEtEXnD8nYGQYpYFUvkUW9sEO1GYbkeKSAais,4
six.py,sha256=TOOfQi7nFGfMrIvtdr6wX4wyHH8M7aknmuLfo2cBBrM,34549
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/six-1.16.0.dist-info/LICENSE | Copyright (c) 2010-2020 Benjamin Peterson
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/six-1.16.0.dist-info/WHEEL | Wheel-Version: 1.0
Generator: bdist_wheel (0.36.2)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/six-1.16.0.dist-info/top_level.txt | six
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/six-1.16.0.dist-info/INSTALLER | pip
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/six-1.16.0.dist-info/METADATA | Metadata-Version: 2.1
Name: six
Version: 1.16.0
Summary: Python 2 and 3 compatibility utilities
Home-page: https://github.com/benjaminp/six
Author: Benjamin Peterson
Author-email: benjamin@python.org
License: MIT
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Programming Language :: Python :: 2
Classifier: Programming Language :: Python :: 3
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: MIT License
Classifier: Topic :: Software Development :: Libraries
Classifier: Topic :: Utilities
Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*
.. image:: https://img.shields.io/pypi/v/six.svg
:target: https://pypi.org/project/six/
:alt: six on PyPI
.. image:: https://travis-ci.org/benjaminp/six.svg?branch=master
:target: https://travis-ci.org/benjaminp/six
:alt: six on TravisCI
.. image:: https://readthedocs.org/projects/six/badge/?version=latest
:target: https://six.readthedocs.io/
:alt: six's documentation on Read the Docs
.. image:: https://img.shields.io/badge/license-MIT-green.svg
:target: https://github.com/benjaminp/six/blob/master/LICENSE
:alt: MIT License badge
Six is a Python 2 and 3 compatibility library. It provides utility functions
for smoothing over the differences between the Python versions with the goal of
writing Python code that is compatible on both Python versions. See the
documentation for more information on what is provided.
Six supports Python 2.7 and 3.3+. It is contained in only one Python
file, so it can be easily copied into your project. (The copyright and license
notice must be retained.)
Online documentation is at https://six.readthedocs.io/.
Bugs can be reported to https://github.com/benjaminp/six. The code can also
be found there.
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/__init__.py | """
Cross-specification, implementation-agnostic JSON referencing.
"""
from referencing._core import Anchor, Registry, Resource, Specification
__all__ = ["Anchor", "Registry", "Resource", "Specification"]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/_core.py | from __future__ import annotations
from collections.abc import Iterable, Iterator, Sequence
from enum import Enum
from typing import Any, Callable, ClassVar, Generic, Protocol, TypeVar
from urllib.parse import unquote, urldefrag, urljoin
from attrs import evolve, field
from rpds import HashTrieMap, HashTrieSet, List
from referencing import exceptions
from referencing._attrs import frozen
from referencing.typing import URI, Anchor as AnchorType, D, Mapping, Retrieve
EMPTY_UNCRAWLED: HashTrieSet[URI] = HashTrieSet()
EMPTY_PREVIOUS_RESOLVERS: List[URI] = List()
class _Unset(Enum):
"""
What sillyness...
"""
SENTINEL = 1
_UNSET = _Unset.SENTINEL
class _MaybeInSubresource(Protocol[D]):
def __call__(
self,
segments: Sequence[int | str],
resolver: Resolver[D],
subresource: Resource[D],
) -> Resolver[D]:
...
def _detect_or_error(contents: D) -> Specification[D]:
if not isinstance(contents, Mapping):
raise exceptions.CannotDetermineSpecification(contents)
jsonschema_dialect_id = contents.get("$schema") # type: ignore[reportUnknownMemberType]
if jsonschema_dialect_id is None:
raise exceptions.CannotDetermineSpecification(contents)
from referencing.jsonschema import specification_with
return specification_with(
jsonschema_dialect_id, # type: ignore[reportUnknownArgumentType]
)
def _detect_or_default(
default: Specification[D],
) -> Callable[[D], Specification[D]]:
def _detect(contents: D) -> Specification[D]:
if not isinstance(contents, Mapping):
return default
jsonschema_dialect_id = contents.get("$schema") # type: ignore[reportUnknownMemberType]
if jsonschema_dialect_id is None:
return default
from referencing.jsonschema import specification_with
return specification_with(
jsonschema_dialect_id, # type: ignore[reportUnknownArgumentType]
default=default,
)
return _detect
class _SpecificationDetector:
def __get__(
self,
instance: Specification[D] | None,
cls: type[Specification[D]],
) -> Callable[[D], Specification[D]]:
if instance is None:
return _detect_or_error
else:
return _detect_or_default(instance)
@frozen
class Specification(Generic[D]):
"""
A specification which defines referencing behavior.
The various methods of a `Specification` allow for varying referencing
behavior across JSON Schema specification versions, etc.
"""
#: A short human-readable name for the specification, used for debugging.
name: str
#: Find the ID of a given document.
id_of: Callable[[D], URI | None]
#: Retrieve the subresources of the given document (without traversing into
#: the subresources themselves).
subresources_of: Callable[[D], Iterable[D]]
#: While resolving a JSON pointer, conditionally enter a subresource
#: (if e.g. we have just entered a keyword whose value is a subresource)
maybe_in_subresource: _MaybeInSubresource[D]
#: Retrieve the anchors contained in the given document.
_anchors_in: Callable[
[Specification[D], D],
Iterable[AnchorType[D]],
] = field(alias="anchors_in")
#: An opaque specification where resources have no subresources
#: nor internal identifiers.
OPAQUE: ClassVar[Specification[Any]]
#: Attempt to discern which specification applies to the given contents.
#:
#: May be called either as an instance method or as a class method, with
#: slightly different behavior in the following case:
#:
#: Recall that not all contents contains enough internal information about
#: which specification it is written for -- the JSON Schema ``{}``,
#: for instance, is valid under many different dialects and may be
#: interpreted as any one of them.
#:
#: When this method is used as an instance method (i.e. called on a
#: specific specification), that specification is used as the default
#: if the given contents are unidentifiable.
#:
#: On the other hand when called as a class method, an error is raised.
#:
#: To reiterate, ``DRAFT202012.detect({})`` will return ``DRAFT202012``
#: whereas the class method ``Specification.detect({})`` will raise an
#: error.
#:
#: (Note that of course ``DRAFT202012.detect(...)`` may return some other
#: specification when given a schema which *does* identify as being for
#: another version).
#:
#: Raises:
#:
#: `CannotDetermineSpecification`
#:
#: if the given contents don't have any discernible
#: information which could be used to guess which
#: specification they identify as
detect = _SpecificationDetector()
def __repr__(self) -> str:
return f"<Specification name={self.name!r}>"
def anchors_in(self, contents: D):
"""
Retrieve the anchors contained in the given document.
"""
return self._anchors_in(self, contents)
def create_resource(self, contents: D) -> Resource[D]:
"""
Create a resource which is interpreted using this specification.
"""
return Resource(contents=contents, specification=self)
Specification.OPAQUE = Specification(
name="opaque",
id_of=lambda contents: None,
subresources_of=lambda contents: [],
anchors_in=lambda specification, contents: [],
maybe_in_subresource=lambda segments, resolver, subresource: resolver,
)
@frozen
class Resource(Generic[D]):
r"""
A document (deserialized JSON) with a concrete interpretation under a spec.
In other words, a Python object, along with an instance of `Specification`
which describes how the document interacts with referencing -- both
internally (how it refers to other `Resource`\ s) and externally (how it
should be identified such that it is referenceable by other documents).
"""
contents: D
_specification: Specification[D] = field(alias="specification")
@classmethod
def from_contents(
cls,
contents: D,
default_specification: type[Specification[D]]
| Specification[D] = Specification,
) -> Resource[D]:
"""
Create a resource guessing which specification applies to the contents.
Raises:
`CannotDetermineSpecification`
if the given contents don't have any discernible
information which could be used to guess which
specification they identify as
"""
specification = default_specification.detect(contents)
return specification.create_resource(contents=contents)
@classmethod
def opaque(cls, contents: D) -> Resource[D]:
"""
Create an opaque `Resource` -- i.e. one with opaque specification.
See `Specification.OPAQUE` for details.
"""
return Specification.OPAQUE.create_resource(contents=contents)
def id(self) -> URI | None:
"""
Retrieve this resource's (specification-specific) identifier.
"""
id = self._specification.id_of(self.contents)
if id is None:
return
return id.rstrip("#")
def subresources(self) -> Iterable[Resource[D]]:
"""
Retrieve this resource's subresources.
"""
return (
Resource.from_contents(
each,
default_specification=self._specification,
)
for each in self._specification.subresources_of(self.contents)
)
def anchors(self) -> Iterable[AnchorType[D]]:
"""
Retrieve this resource's (specification-specific) identifier.
"""
return self._specification.anchors_in(self.contents)
def pointer(self, pointer: str, resolver: Resolver[D]) -> Resolved[D]:
"""
Resolve the given JSON pointer.
Raises:
`exceptions.PointerToNowhere`
if the pointer points to a location not present in the document
"""
contents = self.contents
segments: list[int | str] = []
for segment in unquote(pointer[1:]).split("/"):
if isinstance(contents, Sequence):
segment = int(segment)
else:
segment = segment.replace("~1", "/").replace("~0", "~")
try:
contents = contents[segment] # type: ignore[reportUnknownArgumentType]
except LookupError:
raise exceptions.PointerToNowhere(ref=pointer, resource=self)
segments.append(segment)
last = resolver
resolver = self._specification.maybe_in_subresource(
segments=segments,
resolver=resolver,
subresource=self._specification.create_resource(contents),
)
if resolver is not last:
segments = []
return Resolved(contents=contents, resolver=resolver) # type: ignore[reportUnknownArgumentType]
def _fail_to_retrieve(uri: URI):
raise exceptions.NoSuchResource(ref=uri)
@frozen
class Registry(Mapping[URI, Resource[D]]):
r"""
A registry of `Resource`\ s, each identified by their canonical URIs.
Registries store a collection of in-memory resources, and optionally
enable additional resources which may be stored elsewhere (e.g. in a
database, a separate set of files, over the network, etc.).
They also lazily walk their known resources, looking for subresources
within them. In other words, subresources contained within any added
resources will be retrievable via their own IDs (though this discovery of
subresources will be delayed until necessary).
Registries are immutable, and their methods return new instances of the
registry with the additional resources added to them.
The ``retrieve`` argument can be used to configure retrieval of resources
dynamically, either over the network, from a database, or the like.
Pass it a callable which will be called if any URI not present in the
registry is accessed. It must either return a `Resource` or else raise a
`NoSuchResource` exception indicating that the resource does not exist
even according to the retrieval logic.
"""
_resources: HashTrieMap[URI, Resource[D]] = field(
default=HashTrieMap(),
converter=HashTrieMap.convert, # type: ignore[reportGeneralTypeIssues]
alias="resources",
)
_anchors: HashTrieMap[tuple[URI, str], AnchorType[D]] = HashTrieMap()
_uncrawled: HashTrieSet[URI] = EMPTY_UNCRAWLED
_retrieve: Retrieve[D] = field(default=_fail_to_retrieve, alias="retrieve")
def __getitem__(self, uri: URI) -> Resource[D]:
"""
Return the (already crawled) `Resource` identified by the given URI.
"""
try:
return self._resources[uri.rstrip("#")]
except KeyError:
raise exceptions.NoSuchResource(ref=uri)
def __iter__(self) -> Iterator[URI]:
"""
Iterate over all crawled URIs in the registry.
"""
return iter(self._resources)
def __len__(self) -> int:
"""
Count the total number of fully crawled resources in this registry.
"""
return len(self._resources)
def __rmatmul__(
self,
new: Resource[D] | Iterable[Resource[D]],
) -> Registry[D]:
"""
Create a new registry with resource(s) added using their internal IDs.
Resources must have a internal IDs (e.g. the :kw:`$id` keyword in
modern JSON Schema versions), otherwise an error will be raised.
Both a single resource as well as an iterable of resources works, i.e.:
* ``resource @ registry`` or
* ``[iterable, of, multiple, resources] @ registry``
which -- again, assuming the resources have internal IDs -- is
equivalent to calling `Registry.with_resources` as such:
.. code:: python
registry.with_resources(
(resource.id(), resource) for resource in new_resources
)
Raises:
`NoInternalID`
if the resource(s) in fact do not have IDs
"""
if isinstance(new, Resource):
new = (new,)
resources = self._resources
uncrawled = self._uncrawled
for resource in new:
id = resource.id()
if id is None:
raise exceptions.NoInternalID(resource=resource)
uncrawled = uncrawled.insert(id)
resources = resources.insert(id, resource)
return evolve(self, resources=resources, uncrawled=uncrawled)
def __repr__(self) -> str:
size = len(self)
pluralized = "resource" if size == 1 else "resources"
if self._uncrawled:
uncrawled = len(self._uncrawled)
if uncrawled == size:
summary = f"uncrawled {pluralized}"
else:
summary = f"{pluralized}, {uncrawled} uncrawled"
else:
summary = f"{pluralized}"
return f"<Registry ({size} {summary})>"
def get_or_retrieve(self, uri: URI) -> Retrieved[D, Resource[D]]:
"""
Get a resource from the registry, crawling or retrieving if necessary.
May involve crawling to find the given URI if it is not already known,
so the returned object is a `Retrieved` object which contains both the
resource value as well as the registry which ultimately contained it.
"""
resource = self._resources.get(uri)
if resource is not None:
return Retrieved(registry=self, value=resource)
registry = self.crawl()
resource = registry._resources.get(uri)
if resource is not None:
return Retrieved(registry=registry, value=resource)
try:
resource = registry._retrieve(uri)
except (
exceptions.CannotDetermineSpecification,
exceptions.NoSuchResource,
):
raise
except Exception:
raise exceptions.Unretrievable(ref=uri)
else:
registry = registry.with_resource(uri, resource)
return Retrieved(registry=registry, value=resource)
def remove(self, uri: URI):
"""
Return a registry with the resource identified by a given URI removed.
"""
if uri not in self._resources:
raise exceptions.NoSuchResource(ref=uri)
return evolve(
self,
resources=self._resources.remove(uri),
uncrawled=self._uncrawled.discard(uri),
anchors=HashTrieMap(
(k, v) for k, v in self._anchors.items() if k[0] != uri
),
)
def anchor(self, uri: URI, name: str):
"""
Retrieve a given anchor from a resource which must already be crawled.
"""
value = self._anchors.get((uri, name))
if value is not None:
return Retrieved(value=value, registry=self)
registry = self.crawl()
value = registry._anchors.get((uri, name))
if value is not None:
return Retrieved(value=value, registry=registry)
resource = self[uri]
canonical_uri = resource.id()
if canonical_uri is not None:
value = registry._anchors.get((canonical_uri, name))
if value is not None:
return Retrieved(value=value, registry=registry)
if "/" in name:
raise exceptions.InvalidAnchor(
ref=uri,
resource=resource,
anchor=name,
)
raise exceptions.NoSuchAnchor(ref=uri, resource=resource, anchor=name)
def contents(self, uri: URI) -> D:
"""
Retrieve the (already crawled) contents identified by the given URI.
"""
# Empty fragment URIs are equivalent to URIs without the fragment.
# TODO: Is this true for non JSON Schema resources? Probably not.
return self._resources[uri.rstrip("#")].contents
def crawl(self) -> Registry[D]:
"""
Crawl all added resources, discovering subresources.
"""
resources = self._resources
anchors = self._anchors
uncrawled = [(uri, resources[uri]) for uri in self._uncrawled]
while uncrawled:
uri, resource = uncrawled.pop()
id = resource.id()
if id is not None:
uri = urljoin(uri, id)
resources = resources.insert(uri, resource)
for each in resource.anchors():
anchors = anchors.insert((uri, each.name), each)
uncrawled.extend((uri, each) for each in resource.subresources())
return evolve(
self,
resources=resources,
anchors=anchors,
uncrawled=EMPTY_UNCRAWLED,
)
def with_resource(self, uri: URI, resource: Resource[D]):
"""
Add the given `Resource` to the registry, without crawling it.
"""
return self.with_resources([(uri, resource)])
def with_resources(
self,
pairs: Iterable[tuple[URI, Resource[D]]],
) -> Registry[D]:
r"""
Add the given `Resource`\ s to the registry, without crawling them.
"""
resources = self._resources
uncrawled = self._uncrawled
for uri, resource in pairs:
# Empty fragment URIs are equivalent to URIs without the fragment.
# TODO: Is this true for non JSON Schema resources? Probably not.
uri = uri.rstrip("#")
uncrawled = uncrawled.insert(uri)
resources = resources.insert(uri, resource)
return evolve(self, resources=resources, uncrawled=uncrawled)
def with_contents(
self,
pairs: Iterable[tuple[URI, D]],
**kwargs: Any,
) -> Registry[D]:
r"""
Add the given contents to the registry, autodetecting when necessary.
"""
return self.with_resources(
(uri, Resource.from_contents(each, **kwargs))
for uri, each in pairs
)
def combine(self, *registries: Registry[D]) -> Registry[D]:
"""
Combine together one or more other registries, producing a unified one.
"""
if registries == (self,):
return self
resources = self._resources
anchors = self._anchors
uncrawled = self._uncrawled
retrieve = self._retrieve
for registry in registries:
resources = resources.update(registry._resources) # type: ignore[reportUnknownMemberType]
anchors = anchors.update(registry._anchors) # type: ignore[reportUnknownMemberType]
uncrawled = uncrawled.update(registry._uncrawled)
if registry._retrieve is not _fail_to_retrieve:
if registry._retrieve is not retrieve is not _fail_to_retrieve:
raise ValueError(
"Cannot combine registries with conflicting retrieval "
"functions.",
)
retrieve = registry._retrieve
return evolve(
self,
anchors=anchors,
resources=resources,
uncrawled=uncrawled,
retrieve=retrieve,
)
def resolver(self, base_uri: URI = "") -> Resolver[D]:
"""
Return a `Resolver` which resolves references against this registry.
"""
return Resolver(base_uri=base_uri, registry=self)
def resolver_with_root(self, resource: Resource[D]) -> Resolver[D]:
"""
Return a `Resolver` with a specific root resource.
"""
uri = resource.id() or ""
return Resolver(
base_uri=uri,
registry=self.with_resource(uri, resource),
)
#: An anchor or resource.
AnchorOrResource = TypeVar("AnchorOrResource", AnchorType[Any], Resource[Any])
@frozen
class Retrieved(Generic[D, AnchorOrResource]):
"""
A value retrieved from a `Registry`.
"""
value: AnchorOrResource
registry: Registry[D]
@frozen
class Resolved(Generic[D]):
"""
A reference resolved to its contents by a `Resolver`.
"""
contents: D
resolver: Resolver[D]
@frozen
class Resolver(Generic[D]):
"""
A reference resolver.
Resolvers help resolve references (including relative ones) by
pairing a fixed base URI with a `Registry`.
This object, under normal circumstances, is expected to be used by
*implementers of libraries* built on top of `referencing` (e.g. JSON Schema
implementations or other libraries resolving JSON references),
not directly by end-users populating registries or while writing
schemas or other resources.
References are resolved against the base URI, and the combined URI
is then looked up within the registry.
The process of resolving a reference may itself involve calculating
a *new* base URI for future reference resolution (e.g. if an
intermediate resource sets a new base URI), or may involve encountering
additional subresources and adding them to a new registry.
"""
_base_uri: URI = field(alias="base_uri")
_registry: Registry[D] = field(alias="registry")
_previous: List[URI] = field(default=List(), repr=False, alias="previous")
def lookup(self, ref: URI) -> Resolved[D]:
"""
Resolve the given reference to the resource it points to.
Raises:
`exceptions.Unresolvable`
or a subclass thereof (see below) if the reference isn't
resolvable
`exceptions.NoSuchAnchor`
if the reference is to a URI where a resource exists but
contains a plain name fragment which does not exist within
the resource
`exceptions.PointerToNowhere`
if the reference is to a URI where a resource exists but
contains a JSON pointer to a location within the resource
that does not exist
"""
if ref.startswith("#"):
uri, fragment = self._base_uri, ref[1:]
else:
uri, fragment = urldefrag(urljoin(self._base_uri, ref))
try:
retrieved = self._registry.get_or_retrieve(uri)
except exceptions.NoSuchResource:
raise exceptions.Unresolvable(ref=ref) from None
except exceptions.Unretrievable:
raise exceptions.Unresolvable(ref=ref)
if fragment.startswith("/"):
resolver = self._evolve(registry=retrieved.registry, base_uri=uri)
return retrieved.value.pointer(pointer=fragment, resolver=resolver)
if fragment:
retrieved = retrieved.registry.anchor(uri, fragment)
resolver = self._evolve(registry=retrieved.registry, base_uri=uri)
return retrieved.value.resolve(resolver=resolver)
resolver = self._evolve(registry=retrieved.registry, base_uri=uri)
return Resolved(contents=retrieved.value.contents, resolver=resolver)
def in_subresource(self, subresource: Resource[D]) -> Resolver[D]:
"""
Create a resolver for a subresource (which may have a new base URI).
"""
id = subresource.id()
if id is None:
return self
return evolve(self, base_uri=urljoin(self._base_uri, id))
def dynamic_scope(self) -> Iterable[tuple[URI, Registry[D]]]:
"""
In specs with such a notion, return the URIs in the dynamic scope.
"""
for uri in self._previous:
yield uri, self._registry
def _evolve(self, base_uri: URI, **kwargs: Any):
"""
Evolve, appending to the dynamic scope.
"""
previous = self._previous
if self._base_uri and (not previous or base_uri != self._base_uri):
previous = previous.push_front(self._base_uri)
return evolve(self, base_uri=base_uri, previous=previous, **kwargs)
@frozen
class Anchor(Generic[D]):
"""
A simple anchor in a `Resource`.
"""
name: str
resource: Resource[D]
def resolve(self, resolver: Resolver[D]):
"""
Return the resource for this anchor.
"""
return Resolved(contents=self.resource.contents, resolver=resolver)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/retrieval.py | """
Helpers related to (dynamic) resource retrieval.
"""
from __future__ import annotations
from functools import lru_cache
from typing import TYPE_CHECKING, Callable, TypeVar
import json
from referencing import Resource
if TYPE_CHECKING:
from referencing.typing import URI, D, Retrieve
#: A serialized document (e.g. a JSON string)
_T = TypeVar("_T")
def to_cached_resource(
cache: Callable[[Retrieve[D]], Retrieve[D]] | None = None,
loads: Callable[[_T], D] = json.loads,
from_contents: Callable[[D], Resource[D]] = Resource.from_contents,
) -> Callable[[Callable[[URI], _T]], Retrieve[D]]:
"""
Create a retriever which caches its return values from a simpler callable.
Takes a function which returns things like serialized JSON (strings) and
returns something suitable for passing to `Registry` as a retrieve
function.
This decorator both reduces a small bit of boilerplate for a common case
(deserializing JSON from strings and creating `Resource` objects from the
result) as well as makes the probable need for caching a bit easier.
Retrievers which otherwise do expensive operations (like hitting the
network) might otherwise be called repeatedly.
Examples
--------
.. testcode::
from referencing import Registry
from referencing.typing import URI
import referencing.retrieval
@referencing.retrieval.to_cached_resource()
def retrieve(uri: URI):
print(f"Retrieved {uri}")
# Normally, go get some expensive JSON from the network, a file ...
return '''
{
"$schema": "https://json-schema.org/draft/2020-12/schema",
"foo": "bar"
}
'''
one = Registry(retrieve=retrieve).get_or_retrieve("urn:example:foo")
print(one.value.contents["foo"])
# Retrieving the same URI again reuses the same value (and thus doesn't
# print another retrieval message here)
two = Registry(retrieve=retrieve).get_or_retrieve("urn:example:foo")
print(two.value.contents["foo"])
.. testoutput::
Retrieved urn:example:foo
bar
bar
"""
if cache is None:
cache = lru_cache(maxsize=None)
def decorator(retrieve: Callable[[URI], _T]):
@cache
def cached_retrieve(uri: URI):
response = retrieve(uri)
contents = loads(response)
return from_contents(contents)
return cached_retrieve
return decorator
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/_attrs.py | from __future__ import annotations
from typing import NoReturn, TypeVar
from attrs import define as _define, frozen as _frozen
_T = TypeVar("_T")
def define(cls: type[_T]) -> type[_T]: # pragma: no cover
cls.__init_subclass__ = _do_not_subclass
return _define(cls)
def frozen(cls: type[_T]) -> type[_T]:
cls.__init_subclass__ = _do_not_subclass
return _frozen(cls)
class UnsupportedSubclassing(Exception):
pass
@staticmethod
def _do_not_subclass() -> NoReturn: # pragma: no cover
raise UnsupportedSubclassing(
"Subclassing is not part of referencing's public API. "
"If no other suitable API exists for what you're trying to do, "
"feel free to file an issue asking for one.",
)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/exceptions.py | """
Errors, oh no!
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Any
import attrs
from referencing._attrs import frozen
if TYPE_CHECKING:
from referencing import Resource
from referencing.typing import URI
@frozen
class NoSuchResource(KeyError):
"""
The given URI is not present in a registry.
Unlike most exceptions, this class *is* intended to be publicly
instantiable and *is* part of the public API of the package.
"""
ref: URI
def __eq__(self, other: Any) -> bool:
if self.__class__ is not other.__class__:
return NotImplemented
return attrs.astuple(self) == attrs.astuple(other)
def __hash__(self) -> int:
return hash(attrs.astuple(self))
@frozen
class NoInternalID(Exception):
"""
A resource has no internal ID, but one is needed.
E.g. in modern JSON Schema drafts, this is the :kw:`$id` keyword.
One might be needed if a resource was to-be added to a registry but no
other URI is available, and the resource doesn't declare its canonical URI.
"""
resource: Resource[Any]
def __eq__(self, other: Any) -> bool:
if self.__class__ is not other.__class__:
return NotImplemented
return attrs.astuple(self) == attrs.astuple(other)
def __hash__(self) -> int:
return hash(attrs.astuple(self))
@frozen
class Unretrievable(KeyError):
"""
The given URI is not present in a registry, and retrieving it failed.
"""
ref: URI
def __eq__(self, other: Any) -> bool:
if self.__class__ is not other.__class__:
return NotImplemented
return attrs.astuple(self) == attrs.astuple(other)
def __hash__(self) -> int:
return hash(attrs.astuple(self))
@frozen
class CannotDetermineSpecification(Exception):
"""
Attempting to detect the appropriate `Specification` failed.
This happens if no discernible information is found in the contents of the
new resource which would help identify it.
"""
contents: Any
def __eq__(self, other: Any) -> bool:
if self.__class__ is not other.__class__:
return NotImplemented
return attrs.astuple(self) == attrs.astuple(other)
def __hash__(self) -> int:
return hash(attrs.astuple(self))
@attrs.frozen # Because here we allow subclassing below.
class Unresolvable(Exception):
"""
A reference was unresolvable.
"""
ref: URI
def __eq__(self, other: Any) -> bool:
if self.__class__ is not other.__class__:
return NotImplemented
return attrs.astuple(self) == attrs.astuple(other)
def __hash__(self) -> int:
return hash(attrs.astuple(self))
@frozen
class PointerToNowhere(Unresolvable):
"""
A JSON Pointer leads to a part of a document that does not exist.
"""
resource: Resource[Any]
def __str__(self) -> str:
msg = f"{self.ref!r} does not exist within {self.resource.contents!r}"
if self.ref == "/":
msg += (
". The pointer '/' is a valid JSON Pointer but it points to "
"an empty string property ''. If you intended to point "
"to the entire resource, you should use '#'."
)
return msg
@frozen
class NoSuchAnchor(Unresolvable):
"""
An anchor does not exist within a particular resource.
"""
resource: Resource[Any]
anchor: str
def __str__(self) -> str:
return (
f"{self.anchor!r} does not exist within {self.resource.contents!r}"
)
@frozen
class InvalidAnchor(Unresolvable):
"""
An anchor which could never exist in a resource was dereferenced.
It is somehow syntactically invalid.
"""
resource: Resource[Any]
anchor: str
def __str__(self) -> str:
return (
f"'#{self.anchor}' is not a valid anchor, neither as a "
"plain name anchor nor as a JSON Pointer. You may have intended "
f"to use '#/{self.anchor}', as the slash is required *before each "
"segment* of a JSON pointer."
)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/typing.py | """
Type-annotation related support for the referencing library.
"""
from __future__ import annotations
from typing import TYPE_CHECKING, Protocol, TypeVar
try:
from collections.abc import Mapping as Mapping
Mapping[str, str]
except TypeError: # pragma: no cover
from typing import Mapping as Mapping
if TYPE_CHECKING:
from referencing._core import Resolved, Resolver, Resource
#: A URI which identifies a `Resource`.
URI = str
#: The type of documents within a registry.
D = TypeVar("D")
class Retrieve(Protocol[D]):
"""
A retrieval callable, usable within a `Registry` for resource retrieval.
Does not make assumptions about where the resource might be coming from.
"""
def __call__(self, uri: URI) -> Resource[D]:
"""
Retrieve the resource with the given URI.
Raise `referencing.exceptions.NoSuchResource` if you wish to indicate
the retriever cannot lookup the given URI.
"""
...
class Anchor(Protocol[D]):
"""
An anchor within a `Resource`.
Beyond "simple" anchors, some specifications like JSON Schema's 2020
version have dynamic anchors.
"""
@property
def name(self) -> str:
"""
Return the name of this anchor.
"""
...
def resolve(self, resolver: Resolver[D]) -> Resolved[D]:
"""
Return the resource for this anchor.
"""
...
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/jsonschema.py | """
Referencing implementations for JSON Schema specs (historic & current).
"""
from __future__ import annotations
from collections.abc import Sequence, Set
from typing import Any, Iterable, Union
from referencing import Anchor, Registry, Resource, Specification, exceptions
from referencing._attrs import frozen
from referencing._core import _UNSET # type: ignore[reportPrivateUsage]
from referencing._core import _Unset # type: ignore[reportPrivateUsage]
from referencing._core import Resolved as _Resolved, Resolver as _Resolver
from referencing.typing import URI, Anchor as AnchorType, Mapping
#: A JSON Schema which is a JSON object
ObjectSchema = Mapping[str, Any]
#: A JSON Schema of any kind
Schema = Union[bool, ObjectSchema]
#: A JSON Schema Registry
SchemaRegistry = Registry[Schema]
#: The empty JSON Schema Registry
EMPTY_REGISTRY: SchemaRegistry = Registry()
@frozen
class UnknownDialect(Exception):
"""
A dialect identifier was found for a dialect unknown by this library.
If it's a custom ("unofficial") dialect, be sure you've registered it.
"""
uri: URI
def _dollar_id(contents: Schema) -> URI | None:
if isinstance(contents, bool):
return
return contents.get("$id")
def _legacy_dollar_id(contents: Schema) -> URI | None:
if isinstance(contents, bool) or "$ref" in contents:
return
id = contents.get("$id")
if id is not None and not id.startswith("#"):
return id
def _legacy_id(contents: ObjectSchema) -> URI | None:
if "$ref" in contents:
return
id = contents.get("id")
if id is not None and not id.startswith("#"):
return id
def _anchor(
specification: Specification[Schema],
contents: Schema,
) -> Iterable[AnchorType[Schema]]:
if isinstance(contents, bool):
return
anchor = contents.get("$anchor")
if anchor is not None:
yield Anchor(
name=anchor,
resource=specification.create_resource(contents),
)
dynamic_anchor = contents.get("$dynamicAnchor")
if dynamic_anchor is not None:
yield DynamicAnchor(
name=dynamic_anchor,
resource=specification.create_resource(contents),
)
def _anchor_2019(
specification: Specification[Schema],
contents: Schema,
) -> Iterable[Anchor[Schema]]:
if isinstance(contents, bool):
return []
anchor = contents.get("$anchor")
if anchor is None:
return []
return [
Anchor(
name=anchor,
resource=specification.create_resource(contents),
),
]
def _legacy_anchor_in_dollar_id(
specification: Specification[Schema],
contents: Schema,
) -> Iterable[Anchor[Schema]]:
if isinstance(contents, bool):
return []
id = contents.get("$id", "")
if not id.startswith("#"):
return []
return [
Anchor(
name=id[1:],
resource=specification.create_resource(contents),
),
]
def _legacy_anchor_in_id(
specification: Specification[ObjectSchema],
contents: ObjectSchema,
) -> Iterable[Anchor[ObjectSchema]]:
id = contents.get("id", "")
if not id.startswith("#"):
return []
return [
Anchor(
name=id[1:],
resource=specification.create_resource(contents),
),
]
def _subresources_of(
in_value: Set[str] = frozenset(),
in_subvalues: Set[str] = frozenset(),
in_subarray: Set[str] = frozenset(),
):
"""
Create a callable returning JSON Schema specification-style subschemas.
Relies on specifying the set of keywords containing subschemas in their
values, in a subobject's values, or in a subarray.
"""
def subresources_of(contents: Schema) -> Iterable[ObjectSchema]:
if isinstance(contents, bool):
return
for each in in_value:
if each in contents:
yield contents[each]
for each in in_subarray:
if each in contents:
yield from contents[each]
for each in in_subvalues:
if each in contents:
yield from contents[each].values()
return subresources_of
def _subresources_of_with_crazy_items(
in_value: Set[str] = frozenset(),
in_subvalues: Set[str] = frozenset(),
in_subarray: Set[str] = frozenset(),
):
"""
Specifically handle older drafts where there are some funky keywords.
"""
def subresources_of(contents: Schema) -> Iterable[ObjectSchema]:
if isinstance(contents, bool):
return
for each in in_value:
if each in contents:
yield contents[each]
for each in in_subarray:
if each in contents:
yield from contents[each]
for each in in_subvalues:
if each in contents:
yield from contents[each].values()
items = contents.get("items")
if items is not None:
if isinstance(items, Sequence):
yield from items
else:
yield items
return subresources_of
def _subresources_of_with_crazy_items_dependencies(
in_value: Set[str] = frozenset(),
in_subvalues: Set[str] = frozenset(),
in_subarray: Set[str] = frozenset(),
):
"""
Specifically handle older drafts where there are some funky keywords.
"""
def subresources_of(contents: Schema) -> Iterable[ObjectSchema]:
if isinstance(contents, bool):
return
for each in in_value:
if each in contents:
yield contents[each]
for each in in_subarray:
if each in contents:
yield from contents[each]
for each in in_subvalues:
if each in contents:
yield from contents[each].values()
items = contents.get("items")
if items is not None:
if isinstance(items, Sequence):
yield from items
else:
yield items
dependencies = contents.get("dependencies")
if dependencies is not None:
values = iter(dependencies.values())
value = next(values, None)
if isinstance(value, Mapping):
yield value
yield from values
return subresources_of
def _subresources_of_with_crazy_aP_items_dependencies(
in_value: Set[str] = frozenset(),
in_subvalues: Set[str] = frozenset(),
in_subarray: Set[str] = frozenset(),
):
"""
Specifically handle even older drafts where there are some funky keywords.
"""
def subresources_of(contents: ObjectSchema) -> Iterable[ObjectSchema]:
for each in in_value:
if each in contents:
yield contents[each]
for each in in_subarray:
if each in contents:
yield from contents[each]
for each in in_subvalues:
if each in contents:
yield from contents[each].values()
items = contents.get("items")
if items is not None:
if isinstance(items, Sequence):
yield from items
else:
yield items
dependencies = contents.get("dependencies")
if dependencies is not None:
values = iter(dependencies.values())
value = next(values, None)
if isinstance(value, Mapping):
yield value
yield from values
for each in "additionalItems", "additionalProperties":
value = contents.get(each)
if isinstance(value, Mapping):
yield value
return subresources_of
def _maybe_in_subresource(
in_value: Set[str] = frozenset(),
in_subvalues: Set[str] = frozenset(),
in_subarray: Set[str] = frozenset(),
):
in_child = in_subvalues | in_subarray
def maybe_in_subresource(
segments: Sequence[int | str],
resolver: _Resolver[Any],
subresource: Resource[Any],
) -> _Resolver[Any]:
_segments = iter(segments)
for segment in _segments:
if segment not in in_value and (
segment not in in_child or next(_segments, None) is None
):
return resolver
return resolver.in_subresource(subresource)
return maybe_in_subresource
def _maybe_in_subresource_crazy_items(
in_value: Set[str] = frozenset(),
in_subvalues: Set[str] = frozenset(),
in_subarray: Set[str] = frozenset(),
):
in_child = in_subvalues | in_subarray
def maybe_in_subresource(
segments: Sequence[int | str],
resolver: _Resolver[Any],
subresource: Resource[Any],
) -> _Resolver[Any]:
_segments = iter(segments)
for segment in _segments:
if segment == "items" and isinstance(
subresource.contents,
Mapping,
):
return resolver.in_subresource(subresource)
if segment not in in_value and (
segment not in in_child or next(_segments, None) is None
):
return resolver
return resolver.in_subresource(subresource)
return maybe_in_subresource
def _maybe_in_subresource_crazy_items_dependencies(
in_value: Set[str] = frozenset(),
in_subvalues: Set[str] = frozenset(),
in_subarray: Set[str] = frozenset(),
):
in_child = in_subvalues | in_subarray
def maybe_in_subresource(
segments: Sequence[int | str],
resolver: _Resolver[Any],
subresource: Resource[Any],
) -> _Resolver[Any]:
_segments = iter(segments)
for segment in _segments:
if (
segment == "items" or segment == "dependencies"
) and isinstance(subresource.contents, Mapping):
return resolver.in_subresource(subresource)
if segment not in in_value and (
segment not in in_child or next(_segments, None) is None
):
return resolver
return resolver.in_subresource(subresource)
return maybe_in_subresource
#: JSON Schema draft 2020-12
DRAFT202012 = Specification(
name="draft2020-12",
id_of=_dollar_id,
subresources_of=_subresources_of(
in_value={
"additionalProperties",
"contains",
"contentSchema",
"else",
"if",
"items",
"not",
"propertyNames",
"then",
"unevaluatedItems",
"unevaluatedProperties",
},
in_subarray={"allOf", "anyOf", "oneOf", "prefixItems"},
in_subvalues={
"$defs",
"dependentSchemas",
"patternProperties",
"properties",
},
),
anchors_in=_anchor,
maybe_in_subresource=_maybe_in_subresource(
in_value={
"additionalProperties",
"contains",
"contentSchema",
"else",
"if",
"items",
"not",
"propertyNames",
"then",
"unevaluatedItems",
"unevaluatedProperties",
},
in_subarray={"allOf", "anyOf", "oneOf", "prefixItems"},
in_subvalues={
"$defs",
"dependentSchemas",
"patternProperties",
"properties",
},
),
)
#: JSON Schema draft 2019-09
DRAFT201909 = Specification(
name="draft2019-09",
id_of=_dollar_id,
subresources_of=_subresources_of_with_crazy_items(
in_value={
"additionalItems",
"additionalProperties",
"contains",
"contentSchema",
"else",
"if",
"not",
"propertyNames",
"then",
"unevaluatedItems",
"unevaluatedProperties",
},
in_subarray={"allOf", "anyOf", "oneOf"},
in_subvalues={
"$defs",
"dependentSchemas",
"patternProperties",
"properties",
},
),
anchors_in=_anchor_2019, # type: ignore[reportGeneralTypeIssues] TODO: check whether this is real
maybe_in_subresource=_maybe_in_subresource_crazy_items(
in_value={
"additionalItems",
"additionalProperties",
"contains",
"contentSchema",
"else",
"if",
"not",
"propertyNames",
"then",
"unevaluatedItems",
"unevaluatedProperties",
},
in_subarray={"allOf", "anyOf", "oneOf"},
in_subvalues={
"$defs",
"dependentSchemas",
"patternProperties",
"properties",
},
),
)
#: JSON Schema draft 7
DRAFT7 = Specification(
name="draft-07",
id_of=_legacy_dollar_id,
subresources_of=_subresources_of_with_crazy_items_dependencies(
in_value={
"additionalItems",
"additionalProperties",
"contains",
"else",
"if",
"not",
"propertyNames",
"then",
},
in_subarray={"allOf", "anyOf", "oneOf"},
in_subvalues={"definitions", "patternProperties", "properties"},
),
anchors_in=_legacy_anchor_in_dollar_id, # type: ignore[reportGeneralTypeIssues] TODO: check whether this is real
maybe_in_subresource=_maybe_in_subresource_crazy_items_dependencies(
in_value={
"additionalItems",
"additionalProperties",
"contains",
"else",
"if",
"not",
"propertyNames",
"then",
},
in_subarray={"allOf", "anyOf", "oneOf"},
in_subvalues={"definitions", "patternProperties", "properties"},
),
)
#: JSON Schema draft 6
DRAFT6 = Specification(
name="draft-06",
id_of=_legacy_dollar_id,
subresources_of=_subresources_of_with_crazy_items_dependencies(
in_value={
"additionalItems",
"additionalProperties",
"contains",
"not",
"propertyNames",
},
in_subarray={"allOf", "anyOf", "oneOf"},
in_subvalues={"definitions", "patternProperties", "properties"},
),
anchors_in=_legacy_anchor_in_dollar_id, # type: ignore[reportGeneralTypeIssues] TODO: check whether this is real
maybe_in_subresource=_maybe_in_subresource_crazy_items_dependencies(
in_value={
"additionalItems",
"additionalProperties",
"contains",
"not",
"propertyNames",
},
in_subarray={"allOf", "anyOf", "oneOf"},
in_subvalues={"definitions", "patternProperties", "properties"},
),
)
#: JSON Schema draft 4
DRAFT4 = Specification(
name="draft-04",
id_of=_legacy_id,
subresources_of=_subresources_of_with_crazy_aP_items_dependencies(
in_value={"not"},
in_subarray={"allOf", "anyOf", "oneOf"},
in_subvalues={"definitions", "patternProperties", "properties"},
),
anchors_in=_legacy_anchor_in_id,
maybe_in_subresource=_maybe_in_subresource_crazy_items_dependencies(
in_value={"additionalItems", "additionalProperties", "not"},
in_subarray={"allOf", "anyOf", "oneOf"},
in_subvalues={"definitions", "patternProperties", "properties"},
),
)
#: JSON Schema draft 3
DRAFT3 = Specification(
name="draft-03",
id_of=_legacy_id,
subresources_of=_subresources_of_with_crazy_aP_items_dependencies(
in_subarray={"extends"},
in_subvalues={"definitions", "patternProperties", "properties"},
),
anchors_in=_legacy_anchor_in_id,
maybe_in_subresource=_maybe_in_subresource_crazy_items_dependencies(
in_value={"additionalItems", "additionalProperties"},
in_subarray={"extends"},
in_subvalues={"definitions", "patternProperties", "properties"},
),
)
_SPECIFICATIONS: Registry[Specification[Schema]] = Registry(
{ # type: ignore[reportGeneralTypeIssues] # :/ internal vs external types
dialect_id: Resource.opaque(specification)
for dialect_id, specification in [
("https://json-schema.org/draft/2020-12/schema", DRAFT202012),
("https://json-schema.org/draft/2019-09/schema", DRAFT201909),
("http://json-schema.org/draft-07/schema", DRAFT7),
("http://json-schema.org/draft-06/schema", DRAFT6),
("http://json-schema.org/draft-04/schema", DRAFT4),
("http://json-schema.org/draft-03/schema", DRAFT3),
]
},
)
def specification_with(
dialect_id: URI,
default: Specification[Any] | _Unset = _UNSET,
) -> Specification[Any]:
"""
Retrieve the `Specification` with the given dialect identifier.
Raises:
`UnknownDialect`
if the given ``dialect_id`` isn't known
"""
resource = _SPECIFICATIONS.get(dialect_id.rstrip("#"))
if resource is not None:
return resource.contents
if default is _UNSET:
raise UnknownDialect(dialect_id)
return default
@frozen
class DynamicAnchor:
"""
Dynamic anchors, introduced in draft 2020.
"""
name: str
resource: Resource[Schema]
def resolve(self, resolver: _Resolver[Schema]) -> _Resolved[Schema]:
"""
Resolve this anchor dynamically.
"""
last = self.resource
for uri, registry in resolver.dynamic_scope():
try:
anchor = registry.anchor(uri, self.name).value
except exceptions.NoSuchAnchor:
continue
if isinstance(anchor, DynamicAnchor):
last = anchor.resource
return _Resolved(
contents=last.contents,
resolver=resolver.in_subresource(last),
)
def lookup_recursive_ref(resolver: _Resolver[Schema]) -> _Resolved[Schema]:
"""
Recursive references (via recursive anchors), present only in draft 2019.
As per the 2019 specification (§ 8.2.4.2.1), only the ``#`` recursive
reference is supported (and is therefore assumed to be the relevant
reference).
"""
resolved = resolver.lookup("#")
if isinstance(resolved.contents, Mapping) and resolved.contents.get(
"$recursiveAnchor",
):
for uri, _ in resolver.dynamic_scope():
next_resolved = resolver.lookup(uri)
if not isinstance(
next_resolved.contents,
Mapping,
) or not next_resolved.contents.get("$recursiveAnchor"):
break
resolved = next_resolved
return resolved
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/_attrs.pyi | from typing import Any, Callable, TypeVar, Union
from attr import attrib, field
class UnsupportedSubclassing(Exception): ...
_T = TypeVar("_T")
def __dataclass_transform__(
*,
frozen_default: bool = False,
field_descriptors: tuple[Union[type, Callable[..., Any]], ...] = ...,
) -> Callable[[_T], _T]: ...
@__dataclass_transform__(field_descriptors=(attrib, field))
def define(cls: type[_T]) -> type[_T]: ...
@__dataclass_transform__(
frozen_default=True,
field_descriptors=(attrib, field),
)
def frozen(cls: type[_T]) -> type[_T]: ...
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/tests/test_jsonschema.py | import pytest
from referencing import Registry, Resource, Specification
import referencing.jsonschema
@pytest.mark.parametrize(
"uri, expected",
[
(
"https://json-schema.org/draft/2020-12/schema",
referencing.jsonschema.DRAFT202012,
),
(
"https://json-schema.org/draft/2019-09/schema",
referencing.jsonschema.DRAFT201909,
),
(
"http://json-schema.org/draft-07/schema#",
referencing.jsonschema.DRAFT7,
),
(
"http://json-schema.org/draft-06/schema#",
referencing.jsonschema.DRAFT6,
),
(
"http://json-schema.org/draft-04/schema#",
referencing.jsonschema.DRAFT4,
),
(
"http://json-schema.org/draft-03/schema#",
referencing.jsonschema.DRAFT3,
),
],
)
def test_schemas_with_explicit_schema_keywords_are_detected(uri, expected):
"""
The $schema keyword in JSON Schema is a dialect identifier.
"""
contents = {"$schema": uri}
resource = Resource.from_contents(contents)
assert resource == Resource(contents=contents, specification=expected)
def test_unknown_dialect():
dialect_id = "http://example.com/unknown-json-schema-dialect-id"
with pytest.raises(referencing.jsonschema.UnknownDialect) as excinfo:
Resource.from_contents({"$schema": dialect_id})
assert excinfo.value.uri == dialect_id
@pytest.mark.parametrize(
"id, specification",
[
("$id", referencing.jsonschema.DRAFT202012),
("$id", referencing.jsonschema.DRAFT201909),
("$id", referencing.jsonschema.DRAFT7),
("$id", referencing.jsonschema.DRAFT6),
("id", referencing.jsonschema.DRAFT4),
("id", referencing.jsonschema.DRAFT3),
],
)
def test_id_of_mapping(id, specification):
uri = "http://example.com/some-schema"
assert specification.id_of({id: uri}) == uri
@pytest.mark.parametrize(
"specification",
[
referencing.jsonschema.DRAFT202012,
referencing.jsonschema.DRAFT201909,
referencing.jsonschema.DRAFT7,
referencing.jsonschema.DRAFT6,
],
)
@pytest.mark.parametrize("value", [True, False])
def test_id_of_bool(specification, value):
assert specification.id_of(value) is None
@pytest.mark.parametrize(
"specification",
[
referencing.jsonschema.DRAFT202012,
referencing.jsonschema.DRAFT201909,
referencing.jsonschema.DRAFT7,
referencing.jsonschema.DRAFT6,
],
)
@pytest.mark.parametrize("value", [True, False])
def test_anchors_in_bool(specification, value):
assert list(specification.anchors_in(value)) == []
@pytest.mark.parametrize(
"specification",
[
referencing.jsonschema.DRAFT202012,
referencing.jsonschema.DRAFT201909,
referencing.jsonschema.DRAFT7,
referencing.jsonschema.DRAFT6,
],
)
@pytest.mark.parametrize("value", [True, False])
def test_subresources_of_bool(specification, value):
assert list(specification.subresources_of(value)) == []
@pytest.mark.parametrize(
"uri, expected",
[
(
"https://json-schema.org/draft/2020-12/schema",
referencing.jsonschema.DRAFT202012,
),
(
"https://json-schema.org/draft/2019-09/schema",
referencing.jsonschema.DRAFT201909,
),
(
"http://json-schema.org/draft-07/schema#",
referencing.jsonschema.DRAFT7,
),
(
"http://json-schema.org/draft-06/schema#",
referencing.jsonschema.DRAFT6,
),
(
"http://json-schema.org/draft-04/schema#",
referencing.jsonschema.DRAFT4,
),
(
"http://json-schema.org/draft-03/schema#",
referencing.jsonschema.DRAFT3,
),
],
)
def test_specification_with(uri, expected):
assert referencing.jsonschema.specification_with(uri) == expected
@pytest.mark.parametrize(
"uri, expected",
[
(
"http://json-schema.org/draft-07/schema",
referencing.jsonschema.DRAFT7,
),
(
"http://json-schema.org/draft-06/schema",
referencing.jsonschema.DRAFT6,
),
(
"http://json-schema.org/draft-04/schema",
referencing.jsonschema.DRAFT4,
),
(
"http://json-schema.org/draft-03/schema",
referencing.jsonschema.DRAFT3,
),
],
)
def test_specification_with_no_empty_fragment(uri, expected):
assert referencing.jsonschema.specification_with(uri) == expected
def test_specification_with_unknown_dialect():
dialect_id = "http://example.com/unknown-json-schema-dialect-id"
with pytest.raises(referencing.jsonschema.UnknownDialect) as excinfo:
referencing.jsonschema.specification_with(dialect_id)
assert excinfo.value.uri == dialect_id
def test_specification_with_default():
dialect_id = "http://example.com/unknown-json-schema-dialect-id"
specification = referencing.jsonschema.specification_with(
dialect_id,
default=Specification.OPAQUE,
)
assert specification is Specification.OPAQUE
# FIXME: The tests below should move to the referencing suite but I haven't yet
# figured out how to represent dynamic (& recursive) ref lookups in it.
def test_lookup_trivial_dynamic_ref():
one = referencing.jsonschema.DRAFT202012.create_resource(
{"$dynamicAnchor": "foo"},
)
resolver = Registry().with_resource("http://example.com", one).resolver()
resolved = resolver.lookup("http://example.com#foo")
assert resolved.contents == one.contents
def test_multiple_lookup_trivial_dynamic_ref():
TRUE = referencing.jsonschema.DRAFT202012.create_resource(True)
root = referencing.jsonschema.DRAFT202012.create_resource(
{
"$id": "http://example.com",
"$dynamicAnchor": "fooAnchor",
"$defs": {
"foo": {
"$id": "foo",
"$dynamicAnchor": "fooAnchor",
"$defs": {
"bar": True,
"baz": {
"$dynamicAnchor": "fooAnchor",
},
},
},
},
},
)
resolver = (
Registry()
.with_resources(
[
("http://example.com", root),
("http://example.com/foo/", TRUE),
("http://example.com/foo/bar", root),
],
)
.resolver()
)
first = resolver.lookup("http://example.com")
second = first.resolver.lookup("foo/")
resolver = second.resolver.lookup("bar").resolver
fourth = resolver.lookup("#fooAnchor")
assert fourth.contents == root.contents
def test_multiple_lookup_dynamic_ref_to_nondynamic_ref():
one = referencing.jsonschema.DRAFT202012.create_resource(
{"$anchor": "fooAnchor"},
)
two = referencing.jsonschema.DRAFT202012.create_resource(
{
"$id": "http://example.com",
"$dynamicAnchor": "fooAnchor",
"$defs": {
"foo": {
"$id": "foo",
"$dynamicAnchor": "fooAnchor",
"$defs": {
"bar": True,
"baz": {
"$dynamicAnchor": "fooAnchor",
},
},
},
},
},
)
resolver = (
Registry()
.with_resources(
[
("http://example.com", two),
("http://example.com/foo/", one),
("http://example.com/foo/bar", two),
],
)
.resolver()
)
first = resolver.lookup("http://example.com")
second = first.resolver.lookup("foo/")
resolver = second.resolver.lookup("bar").resolver
fourth = resolver.lookup("#fooAnchor")
assert fourth.contents == two.contents
def test_lookup_trivial_recursive_ref():
one = referencing.jsonschema.DRAFT201909.create_resource(
{"$recursiveAnchor": True},
)
resolver = Registry().with_resource("http://example.com", one).resolver()
first = resolver.lookup("http://example.com")
resolved = referencing.jsonschema.lookup_recursive_ref(
resolver=first.resolver,
)
assert resolved.contents == one.contents
def test_lookup_recursive_ref_to_bool():
TRUE = referencing.jsonschema.DRAFT201909.create_resource(True)
registry = Registry({"http://example.com": TRUE})
resolved = referencing.jsonschema.lookup_recursive_ref(
resolver=registry.resolver(base_uri="http://example.com"),
)
assert resolved.contents == TRUE.contents
def test_multiple_lookup_recursive_ref_to_bool():
TRUE = referencing.jsonschema.DRAFT201909.create_resource(True)
root = referencing.jsonschema.DRAFT201909.create_resource(
{
"$id": "http://example.com",
"$recursiveAnchor": True,
"$defs": {
"foo": {
"$id": "foo",
"$recursiveAnchor": True,
"$defs": {
"bar": True,
"baz": {
"$recursiveAnchor": True,
"$anchor": "fooAnchor",
},
},
},
},
},
)
resolver = (
Registry()
.with_resources(
[
("http://example.com", root),
("http://example.com/foo/", TRUE),
("http://example.com/foo/bar", root),
],
)
.resolver()
)
first = resolver.lookup("http://example.com")
second = first.resolver.lookup("foo/")
resolver = second.resolver.lookup("bar").resolver
fourth = referencing.jsonschema.lookup_recursive_ref(resolver=resolver)
assert fourth.contents == root.contents
def test_multiple_lookup_recursive_ref_with_nonrecursive_ref():
one = referencing.jsonschema.DRAFT201909.create_resource(
{"$recursiveAnchor": True},
)
two = referencing.jsonschema.DRAFT201909.create_resource(
{
"$id": "http://example.com",
"$recursiveAnchor": True,
"$defs": {
"foo": {
"$id": "foo",
"$recursiveAnchor": True,
"$defs": {
"bar": True,
"baz": {
"$recursiveAnchor": True,
"$anchor": "fooAnchor",
},
},
},
},
},
)
three = referencing.jsonschema.DRAFT201909.create_resource(
{"$recursiveAnchor": False},
)
resolver = (
Registry()
.with_resources(
[
("http://example.com", three),
("http://example.com/foo/", two),
("http://example.com/foo/bar", one),
],
)
.resolver()
)
first = resolver.lookup("http://example.com")
second = first.resolver.lookup("foo/")
resolver = second.resolver.lookup("bar").resolver
fourth = referencing.jsonschema.lookup_recursive_ref(resolver=resolver)
assert fourth.contents == two.contents
def test_empty_registry():
assert referencing.jsonschema.EMPTY_REGISTRY == Registry()
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/tests/test_retrieval.py | from functools import lru_cache
import json
import pytest
from referencing import Registry, Resource, exceptions
from referencing.jsonschema import DRAFT202012
from referencing.retrieval import to_cached_resource
class TestToCachedResource:
def test_it_caches_retrieved_resources(self):
contents = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
stack = [json.dumps(contents)]
@to_cached_resource()
def retrieve(uri):
return stack.pop()
registry = Registry(retrieve=retrieve)
expected = Resource.from_contents(contents)
got = registry.get_or_retrieve("urn:example:schema")
assert got.value == expected
# And a second time we get the same value.
again = registry.get_or_retrieve("urn:example:schema")
assert again.value is got.value
def test_custom_loader(self):
contents = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
stack = [json.dumps(contents)[::-1]]
@to_cached_resource(loads=lambda s: json.loads(s[::-1]))
def retrieve(uri):
return stack.pop()
registry = Registry(retrieve=retrieve)
expected = Resource.from_contents(contents)
got = registry.get_or_retrieve("urn:example:schema")
assert got.value == expected
# And a second time we get the same value.
again = registry.get_or_retrieve("urn:example:schema")
assert again.value is got.value
def test_custom_from_contents(self):
contents = {}
stack = [json.dumps(contents)]
@to_cached_resource(from_contents=DRAFT202012.create_resource)
def retrieve(uri):
return stack.pop()
registry = Registry(retrieve=retrieve)
expected = DRAFT202012.create_resource(contents)
got = registry.get_or_retrieve("urn:example:schema")
assert got.value == expected
# And a second time we get the same value.
again = registry.get_or_retrieve("urn:example:schema")
assert again.value is got.value
def test_custom_cache(self):
schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
mapping = {
"urn:example:1": dict(schema, foo=1),
"urn:example:2": dict(schema, foo=2),
"urn:example:3": dict(schema, foo=3),
}
resources = {
uri: Resource.from_contents(contents)
for uri, contents in mapping.items()
}
@to_cached_resource(cache=lru_cache(maxsize=2))
def retrieve(uri):
return json.dumps(mapping.pop(uri))
registry = Registry(retrieve=retrieve)
got = registry.get_or_retrieve("urn:example:1")
assert got.value == resources["urn:example:1"]
assert registry.get_or_retrieve("urn:example:1").value is got.value
assert registry.get_or_retrieve("urn:example:1").value is got.value
got = registry.get_or_retrieve("urn:example:2")
assert got.value == resources["urn:example:2"]
assert registry.get_or_retrieve("urn:example:2").value is got.value
assert registry.get_or_retrieve("urn:example:2").value is got.value
# This still succeeds, but evicts the first URI
got = registry.get_or_retrieve("urn:example:3")
assert got.value == resources["urn:example:3"]
assert registry.get_or_retrieve("urn:example:3").value is got.value
assert registry.get_or_retrieve("urn:example:3").value is got.value
# And now this fails (as we popped the value out of `mapping`)
with pytest.raises(exceptions.Unretrievable):
registry.get_or_retrieve("urn:example:1")
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/tests/test_core.py | from rpds import HashTrieMap
import pytest
from referencing import Anchor, Registry, Resource, Specification, exceptions
from referencing.jsonschema import DRAFT202012
ID_AND_CHILDREN = Specification(
name="id-and-children",
id_of=lambda contents: contents.get("ID"),
subresources_of=lambda contents: contents.get("children", []),
anchors_in=lambda specification, contents: [
Anchor(
name=name,
resource=specification.create_resource(contents=each),
)
for name, each in contents.get("anchors", {}).items()
],
maybe_in_subresource=lambda segments, resolver, subresource: (
resolver.in_subresource(subresource)
if not len(segments) % 2
and all(each == "children" for each in segments[::2])
else resolver
),
)
def blow_up(uri): # pragma: no cover
"""
A retriever suitable for use in tests which expect it never to be used.
"""
raise RuntimeError("This retrieve function expects to never be called!")
class TestRegistry:
def test_with_resource(self):
"""
Adding a resource to the registry then allows re-retrieving it.
"""
resource = Resource.opaque(contents={"foo": "bar"})
uri = "urn:example"
registry = Registry().with_resource(uri=uri, resource=resource)
assert registry[uri] is resource
def test_with_resources(self):
"""
Adding multiple resources to the registry is like adding each one.
"""
one = Resource.opaque(contents={})
two = Resource(contents={"foo": "bar"}, specification=ID_AND_CHILDREN)
registry = Registry().with_resources(
[
("http://example.com/1", one),
("http://example.com/foo/bar", two),
],
)
assert registry == Registry().with_resource(
uri="http://example.com/1",
resource=one,
).with_resource(
uri="http://example.com/foo/bar",
resource=two,
)
def test_matmul_resource(self):
uri = "urn:example:resource"
resource = ID_AND_CHILDREN.create_resource({"ID": uri, "foo": 12})
registry = resource @ Registry()
assert registry == Registry().with_resource(uri, resource)
def test_matmul_many_resources(self):
one_uri = "urn:example:one"
one = ID_AND_CHILDREN.create_resource({"ID": one_uri, "foo": 12})
two_uri = "urn:example:two"
two = ID_AND_CHILDREN.create_resource({"ID": two_uri, "foo": 12})
registry = [one, two] @ Registry()
assert registry == Registry().with_resources(
[(one_uri, one), (two_uri, two)],
)
def test_matmul_resource_without_id(self):
resource = Resource.opaque(contents={"foo": "bar"})
with pytest.raises(exceptions.NoInternalID) as e:
resource @ Registry()
assert e.value == exceptions.NoInternalID(resource=resource)
def test_with_contents_from_json_schema(self):
uri = "urn:example"
schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
registry = Registry().with_contents([(uri, schema)])
expected = Resource(contents=schema, specification=DRAFT202012)
assert registry[uri] == expected
def test_with_contents_and_default_specification(self):
uri = "urn:example"
registry = Registry().with_contents(
[(uri, {"foo": "bar"})],
default_specification=Specification.OPAQUE,
)
assert registry[uri] == Resource.opaque({"foo": "bar"})
def test_len(self):
total = 5
registry = Registry().with_contents(
[(str(i), {"foo": "bar"}) for i in range(total)],
default_specification=Specification.OPAQUE,
)
assert len(registry) == total
def test_bool_empty(self):
assert not Registry()
def test_bool_not_empty(self):
registry = Registry().with_contents(
[(str(i), {"foo": "bar"}) for i in range(3)],
default_specification=Specification.OPAQUE,
)
assert registry
def test_iter(self):
registry = Registry().with_contents(
[(str(i), {"foo": "bar"}) for i in range(8)],
default_specification=Specification.OPAQUE,
)
assert set(registry) == {str(i) for i in range(8)}
def test_crawl_still_has_top_level_resource(self):
resource = Resource.opaque({"foo": "bar"})
uri = "urn:example"
registry = Registry({uri: resource}).crawl()
assert registry[uri] is resource
def test_crawl_finds_a_subresource(self):
child_id = "urn:child"
root = ID_AND_CHILDREN.create_resource(
{"ID": "urn:root", "children": [{"ID": child_id, "foo": 12}]},
)
registry = root @ Registry()
with pytest.raises(LookupError):
registry[child_id]
expected = ID_AND_CHILDREN.create_resource({"ID": child_id, "foo": 12})
assert registry.crawl()[child_id] == expected
def test_crawl_finds_anchors_with_id(self):
resource = ID_AND_CHILDREN.create_resource(
{"ID": "urn:bar", "anchors": {"foo": 12}},
)
registry = resource @ Registry()
assert registry.crawl().anchor(resource.id(), "foo").value == Anchor(
name="foo",
resource=ID_AND_CHILDREN.create_resource(12),
)
def test_crawl_finds_anchors_no_id(self):
resource = ID_AND_CHILDREN.create_resource({"anchors": {"foo": 12}})
registry = Registry().with_resource("urn:root", resource)
assert registry.crawl().anchor("urn:root", "foo").value == Anchor(
name="foo",
resource=ID_AND_CHILDREN.create_resource(12),
)
def test_contents(self):
resource = Resource.opaque({"foo": "bar"})
uri = "urn:example"
registry = Registry().with_resource(uri, resource)
assert registry.contents(uri) == {"foo": "bar"}
def test_getitem_strips_empty_fragments(self):
uri = "http://example.com/"
resource = ID_AND_CHILDREN.create_resource({"ID": uri + "#"})
registry = resource @ Registry()
assert registry[uri] == registry[uri + "#"] == resource
def test_contents_strips_empty_fragments(self):
uri = "http://example.com/"
resource = ID_AND_CHILDREN.create_resource({"ID": uri + "#"})
registry = resource @ Registry()
assert (
registry.contents(uri)
== registry.contents(uri + "#")
== {"ID": uri + "#"}
)
def test_crawled_anchor(self):
resource = ID_AND_CHILDREN.create_resource({"anchors": {"foo": "bar"}})
registry = Registry().with_resource("urn:example", resource)
retrieved = registry.anchor("urn:example", "foo")
assert retrieved.value == Anchor(
name="foo",
resource=ID_AND_CHILDREN.create_resource("bar"),
)
assert retrieved.registry == registry.crawl()
def test_anchor_in_nonexistent_resource(self):
registry = Registry()
with pytest.raises(exceptions.NoSuchResource) as e:
registry.anchor("urn:example", "foo")
assert e.value == exceptions.NoSuchResource(ref="urn:example")
def test_init(self):
one = Resource.opaque(contents={})
two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
registry = Registry(
{
"http://example.com/1": one,
"http://example.com/foo/bar": two,
},
)
assert (
registry
== Registry()
.with_resources(
[
("http://example.com/1", one),
("http://example.com/foo/bar", two),
],
)
.crawl()
)
def test_dict_conversion(self):
"""
Passing a `dict` to `Registry` gets converted to a `HashTrieMap`.
So continuing to use the registry works.
"""
one = Resource.opaque(contents={})
two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
registry = Registry(
{"http://example.com/1": one},
).with_resource("http://example.com/foo/bar", two)
assert (
registry.crawl()
== Registry()
.with_resources(
[
("http://example.com/1", one),
("http://example.com/foo/bar", two),
],
)
.crawl()
)
def test_no_such_resource(self):
registry = Registry()
with pytest.raises(exceptions.NoSuchResource) as e:
registry["urn:bigboom"]
assert e.value == exceptions.NoSuchResource(ref="urn:bigboom")
def test_combine(self):
one = Resource.opaque(contents={})
two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
three = ID_AND_CHILDREN.create_resource({"baz": "quux"})
four = ID_AND_CHILDREN.create_resource({"anchors": {"foo": 12}})
first = Registry({"http://example.com/1": one})
second = Registry().with_resource("http://example.com/foo/bar", two)
third = Registry(
{
"http://example.com/1": one,
"http://example.com/baz": three,
},
)
fourth = (
Registry()
.with_resource(
"http://example.com/foo/quux",
four,
)
.crawl()
)
assert first.combine(second, third, fourth) == Registry(
[
("http://example.com/1", one),
("http://example.com/baz", three),
("http://example.com/foo/quux", four),
],
anchors=HashTrieMap(
{
("http://example.com/foo/quux", "foo"): Anchor(
name="foo",
resource=ID_AND_CHILDREN.create_resource(12),
),
},
),
).with_resource("http://example.com/foo/bar", two)
def test_combine_self(self):
"""
Combining a registry with itself short-circuits.
This is a performance optimization -- otherwise we do lots more work
(in jsonschema this seems to correspond to making the test suite take
*3x* longer).
"""
registry = Registry({"urn:foo": "bar"})
assert registry.combine(registry) is registry
def test_combine_with_uncrawled_resources(self):
one = Resource.opaque(contents={})
two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
three = ID_AND_CHILDREN.create_resource({"baz": "quux"})
first = Registry().with_resource("http://example.com/1", one)
second = Registry().with_resource("http://example.com/foo/bar", two)
third = Registry(
{
"http://example.com/1": one,
"http://example.com/baz": three,
},
)
expected = Registry(
[
("http://example.com/1", one),
("http://example.com/foo/bar", two),
("http://example.com/baz", three),
],
)
combined = first.combine(second, third)
assert combined != expected
assert combined.crawl() == expected
def test_combine_with_single_retrieve(self):
one = Resource.opaque(contents={})
two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
three = ID_AND_CHILDREN.create_resource({"baz": "quux"})
def retrieve(uri): # pragma: no cover
pass
first = Registry().with_resource("http://example.com/1", one)
second = Registry(
retrieve=retrieve,
).with_resource("http://example.com/2", two)
third = Registry().with_resource("http://example.com/3", three)
assert first.combine(second, third) == Registry(
retrieve=retrieve,
).with_resources(
[
("http://example.com/1", one),
("http://example.com/2", two),
("http://example.com/3", three),
],
)
assert second.combine(first, third) == Registry(
retrieve=retrieve,
).with_resources(
[
("http://example.com/1", one),
("http://example.com/2", two),
("http://example.com/3", three),
],
)
def test_combine_with_common_retrieve(self):
one = Resource.opaque(contents={})
two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
three = ID_AND_CHILDREN.create_resource({"baz": "quux"})
def retrieve(uri): # pragma: no cover
pass
first = Registry(retrieve=retrieve).with_resource(
"http://example.com/1",
one,
)
second = Registry(
retrieve=retrieve,
).with_resource("http://example.com/2", two)
third = Registry(retrieve=retrieve).with_resource(
"http://example.com/3",
three,
)
assert first.combine(second, third) == Registry(
retrieve=retrieve,
).with_resources(
[
("http://example.com/1", one),
("http://example.com/2", two),
("http://example.com/3", three),
],
)
assert second.combine(first, third) == Registry(
retrieve=retrieve,
).with_resources(
[
("http://example.com/1", one),
("http://example.com/2", two),
("http://example.com/3", three),
],
)
def test_combine_conflicting_retrieve(self):
one = Resource.opaque(contents={})
two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
three = ID_AND_CHILDREN.create_resource({"baz": "quux"})
def foo_retrieve(uri): # pragma: no cover
pass
def bar_retrieve(uri): # pragma: no cover
pass
first = Registry(retrieve=foo_retrieve).with_resource(
"http://example.com/1",
one,
)
second = Registry().with_resource("http://example.com/2", two)
third = Registry(retrieve=bar_retrieve).with_resource(
"http://example.com/3",
three,
)
with pytest.raises(Exception, match="conflict.*retriev"):
first.combine(second, third)
def test_remove(self):
one = Resource.opaque(contents={})
two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
registry = Registry({"urn:foo": one, "urn:bar": two})
assert registry.remove("urn:foo") == Registry({"urn:bar": two})
def test_remove_uncrawled(self):
one = Resource.opaque(contents={})
two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
registry = Registry().with_resources(
[("urn:foo", one), ("urn:bar", two)],
)
assert registry.remove("urn:foo") == Registry().with_resource(
"urn:bar",
two,
)
def test_remove_with_anchors(self):
one = Resource.opaque(contents={})
two = ID_AND_CHILDREN.create_resource({"anchors": {"foo": "bar"}})
registry = (
Registry()
.with_resources(
[("urn:foo", one), ("urn:bar", two)],
)
.crawl()
)
assert (
registry.remove("urn:bar")
== Registry()
.with_resource(
"urn:foo",
one,
)
.crawl()
)
def test_remove_nonexistent_uri(self):
with pytest.raises(exceptions.NoSuchResource) as e:
Registry().remove("urn:doesNotExist")
assert e.value == exceptions.NoSuchResource(ref="urn:doesNotExist")
def test_retrieve(self):
foo = Resource.opaque({"foo": "bar"})
registry = Registry(retrieve=lambda uri: foo)
assert registry.get_or_retrieve("urn:example").value == foo
def test_retrieve_arbitrary_exception(self):
foo = Resource.opaque({"foo": "bar"})
def retrieve(uri):
if uri == "urn:succeed":
return foo
raise Exception("Oh no!")
registry = Registry(retrieve=retrieve)
assert registry.get_or_retrieve("urn:succeed").value == foo
with pytest.raises(exceptions.Unretrievable):
registry.get_or_retrieve("urn:uhoh")
def test_retrieve_no_such_resource(self):
foo = Resource.opaque({"foo": "bar"})
def retrieve(uri):
if uri == "urn:succeed":
return foo
raise exceptions.NoSuchResource(ref=uri)
registry = Registry(retrieve=retrieve)
assert registry.get_or_retrieve("urn:succeed").value == foo
with pytest.raises(exceptions.NoSuchResource):
registry.get_or_retrieve("urn:uhoh")
def test_retrieve_cannot_determine_specification(self):
def retrieve(uri):
return Resource.from_contents({})
registry = Registry(retrieve=retrieve)
with pytest.raises(exceptions.CannotDetermineSpecification):
registry.get_or_retrieve("urn:uhoh")
def test_retrieve_already_available_resource(self):
foo = Resource.opaque({"foo": "bar"})
registry = Registry({"urn:example": foo}, retrieve=blow_up)
assert registry["urn:example"] == foo
assert registry.get_or_retrieve("urn:example").value == foo
def test_retrieve_first_checks_crawlable_resource(self):
child = ID_AND_CHILDREN.create_resource({"ID": "urn:child", "foo": 12})
root = ID_AND_CHILDREN.create_resource({"children": [child.contents]})
registry = Registry(retrieve=blow_up).with_resource("urn:root", root)
assert registry.crawl()["urn:child"] == child
def test_resolver(self):
one = Resource.opaque(contents={})
registry = Registry({"http://example.com": one})
resolver = registry.resolver(base_uri="http://example.com")
assert resolver.lookup("#").contents == {}
def test_resolver_with_root_identified(self):
root = ID_AND_CHILDREN.create_resource({"ID": "http://example.com"})
resolver = Registry().resolver_with_root(root)
assert resolver.lookup("http://example.com").contents == root.contents
assert resolver.lookup("#").contents == root.contents
def test_resolver_with_root_unidentified(self):
root = Resource.opaque(contents={})
resolver = Registry().resolver_with_root(root)
assert resolver.lookup("#").contents == root.contents
def test_repr(self):
one = Resource.opaque(contents={})
two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
registry = Registry().with_resources(
[
("http://example.com/1", one),
("http://example.com/foo/bar", two),
],
)
assert repr(registry) == "<Registry (2 uncrawled resources)>"
assert repr(registry.crawl()) == "<Registry (2 resources)>"
def test_repr_mixed_crawled(self):
one = Resource.opaque(contents={})
two = ID_AND_CHILDREN.create_resource({"foo": "bar"})
registry = (
Registry(
{"http://example.com/1": one},
)
.crawl()
.with_resource(uri="http://example.com/foo/bar", resource=two)
)
assert repr(registry) == "<Registry (2 resources, 1 uncrawled)>"
def test_repr_one_resource(self):
registry = Registry().with_resource(
uri="http://example.com/1",
resource=Resource.opaque(contents={}),
)
assert repr(registry) == "<Registry (1 uncrawled resource)>"
def test_repr_empty(self):
assert repr(Registry()) == "<Registry (0 resources)>"
class TestResource:
def test_from_contents_from_json_schema(self):
schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
resource = Resource.from_contents(schema)
assert resource == Resource(contents=schema, specification=DRAFT202012)
def test_from_contents_with_no_discernible_information(self):
"""
Creating a resource with no discernible way to see what
specification it belongs to (e.g. no ``$schema`` keyword for JSON
Schema) raises an error.
"""
with pytest.raises(exceptions.CannotDetermineSpecification):
Resource.from_contents({"foo": "bar"})
def test_from_contents_with_no_discernible_information_and_default(self):
resource = Resource.from_contents(
{"foo": "bar"},
default_specification=Specification.OPAQUE,
)
assert resource == Resource.opaque(contents={"foo": "bar"})
def test_from_contents_unneeded_default(self):
schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
resource = Resource.from_contents(
schema,
default_specification=Specification.OPAQUE,
)
assert resource == Resource(
contents=schema,
specification=DRAFT202012,
)
def test_non_mapping_from_contents(self):
resource = Resource.from_contents(
True,
default_specification=ID_AND_CHILDREN,
)
assert resource == Resource(
contents=True,
specification=ID_AND_CHILDREN,
)
def test_from_contents_with_fallback(self):
resource = Resource.from_contents(
{"foo": "bar"},
default_specification=Specification.OPAQUE,
)
assert resource == Resource.opaque(contents={"foo": "bar"})
def test_id_delegates_to_specification(self):
specification = Specification(
name="",
id_of=lambda contents: "urn:fixedID",
subresources_of=lambda contents: [],
anchors_in=lambda specification, contents: [],
maybe_in_subresource=(
lambda segments, resolver, subresource: resolver
),
)
resource = Resource(
contents={"foo": "baz"},
specification=specification,
)
assert resource.id() == "urn:fixedID"
def test_id_strips_empty_fragment(self):
uri = "http://example.com/"
root = ID_AND_CHILDREN.create_resource({"ID": uri + "#"})
assert root.id() == uri
def test_subresources_delegates_to_specification(self):
resource = ID_AND_CHILDREN.create_resource({"children": [{}, 12]})
assert list(resource.subresources()) == [
ID_AND_CHILDREN.create_resource(each) for each in [{}, 12]
]
def test_subresource_with_different_specification(self):
schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
resource = ID_AND_CHILDREN.create_resource({"children": [schema]})
assert list(resource.subresources()) == [
DRAFT202012.create_resource(schema),
]
def test_anchors_delegates_to_specification(self):
resource = ID_AND_CHILDREN.create_resource(
{"anchors": {"foo": {}, "bar": 1, "baz": ""}},
)
assert list(resource.anchors()) == [
Anchor(name="foo", resource=ID_AND_CHILDREN.create_resource({})),
Anchor(name="bar", resource=ID_AND_CHILDREN.create_resource(1)),
Anchor(name="baz", resource=ID_AND_CHILDREN.create_resource("")),
]
def test_pointer_to_mapping(self):
resource = Resource.opaque(contents={"foo": "baz"})
resolver = Registry().resolver()
assert resource.pointer("/foo", resolver=resolver).contents == "baz"
def test_pointer_to_array(self):
resource = Resource.opaque(contents={"foo": {"bar": [3]}})
resolver = Registry().resolver()
assert resource.pointer("/foo/bar/0", resolver=resolver).contents == 3
def test_opaque(self):
contents = {"foo": "bar"}
assert Resource.opaque(contents) == Resource(
contents=contents,
specification=Specification.OPAQUE,
)
class TestResolver:
def test_lookup_exact_uri(self):
resource = Resource.opaque(contents={"foo": "baz"})
resolver = Registry({"http://example.com/1": resource}).resolver()
resolved = resolver.lookup("http://example.com/1")
assert resolved.contents == resource.contents
def test_lookup_subresource(self):
root = ID_AND_CHILDREN.create_resource(
{
"ID": "http://example.com/",
"children": [
{"ID": "http://example.com/a", "foo": 12},
],
},
)
registry = root @ Registry()
resolved = registry.resolver().lookup("http://example.com/a")
assert resolved.contents == {"ID": "http://example.com/a", "foo": 12}
def test_lookup_anchor_with_id(self):
root = ID_AND_CHILDREN.create_resource(
{
"ID": "http://example.com/",
"anchors": {"foo": 12},
},
)
registry = root @ Registry()
resolved = registry.resolver().lookup("http://example.com/#foo")
assert resolved.contents == 12
def test_lookup_anchor_without_id(self):
root = ID_AND_CHILDREN.create_resource({"anchors": {"foo": 12}})
resolver = Registry().with_resource("urn:example", root).resolver()
resolved = resolver.lookup("urn:example#foo")
assert resolved.contents == 12
def test_lookup_unknown_reference(self):
resolver = Registry().resolver()
ref = "http://example.com/does/not/exist"
with pytest.raises(exceptions.Unresolvable) as e:
resolver.lookup(ref)
assert e.value == exceptions.Unresolvable(ref=ref)
def test_lookup_non_existent_pointer(self):
resource = Resource.opaque({"foo": {}})
resolver = Registry({"http://example.com/1": resource}).resolver()
ref = "http://example.com/1#/foo/bar"
with pytest.raises(exceptions.Unresolvable) as e:
resolver.lookup(ref)
assert e.value == exceptions.PointerToNowhere(
ref="/foo/bar",
resource=resource,
)
assert str(e.value) == "'/foo/bar' does not exist within {'foo': {}}"
def test_lookup_non_existent_pointer_to_array_index(self):
resource = Resource.opaque([1, 2, 4, 8])
resolver = Registry({"http://example.com/1": resource}).resolver()
ref = "http://example.com/1#/10"
with pytest.raises(exceptions.Unresolvable) as e:
resolver.lookup(ref)
assert e.value == exceptions.PointerToNowhere(
ref="/10",
resource=resource,
)
def test_lookup_pointer_to_empty_string(self):
resolver = Registry().resolver_with_root(Resource.opaque({"": {}}))
assert resolver.lookup("#/").contents == {}
def test_lookup_non_existent_pointer_to_empty_string(self):
resource = Resource.opaque({"foo": {}})
resolver = Registry().resolver_with_root(resource)
with pytest.raises(
exceptions.Unresolvable,
match="^'/' does not exist within {'foo': {}}.*'#'",
) as e:
resolver.lookup("#/")
assert e.value == exceptions.PointerToNowhere(
ref="/",
resource=resource,
)
def test_lookup_non_existent_anchor(self):
root = ID_AND_CHILDREN.create_resource({"anchors": {}})
resolver = Registry().with_resource("urn:example", root).resolver()
resolved = resolver.lookup("urn:example")
assert resolved.contents == root.contents
ref = "urn:example#noSuchAnchor"
with pytest.raises(exceptions.Unresolvable) as e:
resolver.lookup(ref)
assert "'noSuchAnchor' does not exist" in str(e.value)
assert e.value == exceptions.NoSuchAnchor(
ref="urn:example",
resource=root,
anchor="noSuchAnchor",
)
def test_lookup_invalid_JSON_pointerish_anchor(self):
resolver = Registry().resolver_with_root(
ID_AND_CHILDREN.create_resource(
{
"ID": "http://example.com/",
"foo": {"bar": 12},
},
),
)
valid = resolver.lookup("#/foo/bar")
assert valid.contents == 12
with pytest.raises(exceptions.InvalidAnchor) as e:
resolver.lookup("#foo/bar")
assert " '#/foo/bar'" in str(e.value)
def test_lookup_retrieved_resource(self):
resource = Resource.opaque(contents={"foo": "baz"})
resolver = Registry(retrieve=lambda uri: resource).resolver()
resolved = resolver.lookup("http://example.com/")
assert resolved.contents == resource.contents
def test_lookup_failed_retrieved_resource(self):
"""
Unretrievable exceptions are also wrapped in Unresolvable.
"""
uri = "http://example.com/"
registry = Registry(retrieve=blow_up)
with pytest.raises(exceptions.Unretrievable):
registry.get_or_retrieve(uri)
resolver = registry.resolver()
with pytest.raises(exceptions.Unresolvable):
resolver.lookup(uri)
def test_repeated_lookup_from_retrieved_resource(self):
"""
A (custom-)retrieved resource is added to the registry returned by
looking it up.
"""
resource = Resource.opaque(contents={"foo": "baz"})
once = [resource]
def retrieve(uri):
return once.pop()
resolver = Registry(retrieve=retrieve).resolver()
resolved = resolver.lookup("http://example.com/")
assert resolved.contents == resource.contents
resolved = resolved.resolver.lookup("http://example.com/")
assert resolved.contents == resource.contents
def test_repeated_anchor_lookup_from_retrieved_resource(self):
resource = Resource.opaque(contents={"foo": "baz"})
once = [resource]
def retrieve(uri):
return once.pop()
resolver = Registry(retrieve=retrieve).resolver()
resolved = resolver.lookup("http://example.com/")
assert resolved.contents == resource.contents
resolved = resolved.resolver.lookup("#")
assert resolved.contents == resource.contents
# FIXME: The tests below aren't really representable in the current
# suite, though we should probably think of ways to do so.
def test_in_subresource(self):
root = ID_AND_CHILDREN.create_resource(
{
"ID": "http://example.com/",
"children": [
{
"ID": "child/",
"children": [{"ID": "grandchild"}],
},
],
},
)
registry = root @ Registry()
resolver = registry.resolver()
first = resolver.lookup("http://example.com/")
assert first.contents == root.contents
with pytest.raises(exceptions.Unresolvable):
first.resolver.lookup("grandchild")
sub = first.resolver.in_subresource(
ID_AND_CHILDREN.create_resource(first.contents["children"][0]),
)
second = sub.lookup("grandchild")
assert second.contents == {"ID": "grandchild"}
def test_in_pointer_subresource(self):
root = ID_AND_CHILDREN.create_resource(
{
"ID": "http://example.com/",
"children": [
{
"ID": "child/",
"children": [{"ID": "grandchild"}],
},
],
},
)
registry = root @ Registry()
resolver = registry.resolver()
first = resolver.lookup("http://example.com/")
assert first.contents == root.contents
with pytest.raises(exceptions.Unresolvable):
first.resolver.lookup("grandchild")
second = first.resolver.lookup("#/children/0")
third = second.resolver.lookup("grandchild")
assert third.contents == {"ID": "grandchild"}
def test_dynamic_scope(self):
one = ID_AND_CHILDREN.create_resource(
{
"ID": "http://example.com/",
"children": [
{
"ID": "child/",
"children": [{"ID": "grandchild"}],
},
],
},
)
two = ID_AND_CHILDREN.create_resource(
{
"ID": "http://example.com/two",
"children": [{"ID": "two-child/"}],
},
)
registry = [one, two] @ Registry()
resolver = registry.resolver()
first = resolver.lookup("http://example.com/")
second = first.resolver.lookup("#/children/0")
third = second.resolver.lookup("grandchild")
fourth = third.resolver.lookup("http://example.com/two")
assert list(fourth.resolver.dynamic_scope()) == [
("http://example.com/child/grandchild", fourth.resolver._registry),
("http://example.com/child/", fourth.resolver._registry),
("http://example.com/", fourth.resolver._registry),
]
assert list(third.resolver.dynamic_scope()) == [
("http://example.com/child/", third.resolver._registry),
("http://example.com/", third.resolver._registry),
]
assert list(second.resolver.dynamic_scope()) == [
("http://example.com/", second.resolver._registry),
]
assert list(first.resolver.dynamic_scope()) == []
class TestSpecification:
def test_create_resource(self):
specification = Specification(
name="",
id_of=lambda contents: "urn:fixedID",
subresources_of=lambda contents: [],
anchors_in=lambda specification, contents: [],
maybe_in_subresource=(
lambda segments, resolver, subresource: resolver
),
)
resource = specification.create_resource(contents={"foo": "baz"})
assert resource == Resource(
contents={"foo": "baz"},
specification=specification,
)
assert resource.id() == "urn:fixedID"
def test_detect_from_json_schema(self):
schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
specification = Specification.detect(schema)
assert specification == DRAFT202012
def test_detect_with_no_discernible_information(self):
with pytest.raises(exceptions.CannotDetermineSpecification):
Specification.detect({"foo": "bar"})
def test_detect_with_no_discernible_information_and_default(self):
specification = Specification.OPAQUE.detect({"foo": "bar"})
assert specification is Specification.OPAQUE
def test_detect_unneeded_default(self):
schema = {"$schema": "https://json-schema.org/draft/2020-12/schema"}
specification = Specification.OPAQUE.detect(schema)
assert specification == DRAFT202012
def test_non_mapping_detect(self):
with pytest.raises(exceptions.CannotDetermineSpecification):
Specification.detect(True)
def test_non_mapping_detect_with_default(self):
specification = ID_AND_CHILDREN.detect(True)
assert specification is ID_AND_CHILDREN
def test_detect_with_fallback(self):
specification = Specification.OPAQUE.detect({"foo": "bar"})
assert specification is Specification.OPAQUE
def test_repr(self):
assert (
repr(ID_AND_CHILDREN) == "<Specification name='id-and-children'>"
)
class TestOpaqueSpecification:
THINGS = [{"foo": "bar"}, True, 37, "foo", object()]
@pytest.mark.parametrize("thing", THINGS)
def test_no_id(self, thing):
"""
An arbitrary thing has no ID.
"""
assert Specification.OPAQUE.id_of(thing) is None
@pytest.mark.parametrize("thing", THINGS)
def test_no_subresources(self, thing):
"""
An arbitrary thing has no subresources.
"""
assert list(Specification.OPAQUE.subresources_of(thing)) == []
@pytest.mark.parametrize("thing", THINGS)
def test_no_anchors(self, thing):
"""
An arbitrary thing has no anchors.
"""
assert list(Specification.OPAQUE.anchors_in(thing)) == []
@pytest.mark.parametrize(
"cls",
[Anchor, Registry, Resource, Specification, exceptions.PointerToNowhere],
)
def test_nonsubclassable(cls):
with pytest.raises(Exception, match="(?i)subclassing"):
class Boom(cls): # pragma: no cover
pass
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/tests/test_exceptions.py | import itertools
import pytest
from referencing import Resource, exceptions
def pairs(choices):
return itertools.combinations(choices, 2)
TRUE = Resource.opaque(True)
thunks = (
lambda: exceptions.CannotDetermineSpecification(TRUE),
lambda: exceptions.NoSuchResource("urn:example:foo"),
lambda: exceptions.NoInternalID(TRUE),
lambda: exceptions.InvalidAnchor(resource=TRUE, anchor="foo", ref="a#b"),
lambda: exceptions.NoSuchAnchor(resource=TRUE, anchor="foo", ref="a#b"),
lambda: exceptions.PointerToNowhere(resource=TRUE, ref="urn:example:foo"),
lambda: exceptions.Unresolvable("urn:example:foo"),
lambda: exceptions.Unretrievable("urn:example:foo"),
)
@pytest.mark.parametrize("one, two", pairs(each() for each in thunks))
def test_eq_incompatible_types(one, two):
assert one != two
@pytest.mark.parametrize("thunk", thunks)
def test_hash(thunk):
assert thunk() in {thunk()}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/referencing/tests/test_referencing_suite.py | from pathlib import Path
import json
import os
import pytest
from referencing import Registry
from referencing.exceptions import Unresolvable
import referencing.jsonschema
class SuiteNotFound(Exception):
def __str__(self): # pragma: no cover
return (
"Cannot find the referencing suite. "
"Set the REFERENCING_SUITE environment variable to the path to "
"the suite, or run the test suite from alongside a full checkout "
"of the git repository."
)
if "REFERENCING_SUITE" in os.environ: # pragma: no cover
SUITE = Path(os.environ["REFERENCING_SUITE"]) / "tests"
else:
SUITE = Path(__file__).parent.parent.parent / "suite/tests"
if not SUITE.is_dir(): # pragma: no cover
raise SuiteNotFound()
DIALECT_IDS = json.loads(SUITE.joinpath("specifications.json").read_text())
@pytest.mark.parametrize(
"test_path",
[
pytest.param(each, id=f"{each.parent.name}-{each.stem}")
for each in SUITE.glob("*/**/*.json")
],
)
def test_referencing_suite(test_path, subtests):
dialect_id = DIALECT_IDS[test_path.relative_to(SUITE).parts[0]]
specification = referencing.jsonschema.specification_with(dialect_id)
loaded = json.loads(test_path.read_text())
registry = loaded["registry"]
registry = Registry().with_resources(
(uri, specification.create_resource(contents))
for uri, contents in loaded["registry"].items()
)
for test in loaded["tests"]:
with subtests.test(test=test):
resolver = registry.resolver(base_uri=test.get("base_uri", ""))
if test.get("error"):
with pytest.raises(Unresolvable):
resolver.lookup(test["ref"])
else:
resolved = resolver.lookup(test["ref"])
assert resolved.contents == test["target"]
then = test.get("then")
while then: # pragma: no cover
with subtests.test(test=test, then=then):
resolved = resolved.resolver.lookup(then["ref"])
assert resolved.contents == then["target"]
then = then.get("then")
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/authenticators.py | """Classes to assist in authenticating to APIs."""
from __future__ import annotations
import base64
import math
from datetime import datetime, timedelta
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, Mapping
from urllib.parse import parse_qs, urlencode, urlsplit, urlunsplit
import jwt
import requests
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from singer_sdk.helpers._util import utc_now
if TYPE_CHECKING:
import logging
from singer_sdk.streams import Stream as RESTStreamBase
def _add_parameters(initial_url: str, extra_parameters: dict) -> str:
"""Add parameters to an URL and return the new URL.
Args:
initial_url: The URL to add parameters to.
extra_parameters: The parameters to add.
Returns:
The new URL with the parameters added.
"""
scheme, netloc, path, query_string, fragment = urlsplit(initial_url)
query_params = parse_qs(query_string)
query_params.update(
{
parameter_name: [parameter_value]
for parameter_name, parameter_value in extra_parameters.items()
},
)
new_query_string = urlencode(query_params, doseq=True)
return urlunsplit((scheme, netloc, path, new_query_string, fragment))
class SingletonMeta(type):
"""A general purpose singleton metaclass."""
def __init__(cls, name: str, bases: tuple[type], dic: dict) -> None:
"""Init metaclass.
The single instance is saved as an attribute of the the metaclass.
Args:
name: Name of the derived class.
bases: Base types of the derived class.
dic: Class dictionary of the derived class.
"""
cls.__single_instance = None
super().__init__(name, bases, dic)
def __call__(cls, *args: Any, **kwargs: Any) -> Any: # noqa: ANN401
"""Create or reuse the singleton.
Args:
args: Class constructor positional arguments.
kwargs: Class constructor keyword arguments.
Returns:
A singleton instance of the derived class.
"""
if cls.__single_instance:
return cls.__single_instance
single_obj = cls.__new__(cls, None) # type: ignore[call-overload]
single_obj.__init__(*args, **kwargs)
cls.__single_instance = single_obj
return single_obj
class APIAuthenticatorBase:
"""Base class for offloading API auth."""
def __init__(self, stream: RESTStreamBase) -> None:
"""Init authenticator.
Args:
stream: A stream for a RESTful endpoint.
"""
self.tap_name: str = stream.tap_name
self._config: dict[str, Any] = dict(stream.config)
self._auth_headers: dict[str, Any] = {}
self._auth_params: dict[str, Any] = {}
self.logger: logging.Logger = stream.logger
@property
def config(self) -> Mapping[str, Any]:
"""Get stream or tap config.
Returns:
A frozen (read-only) config dictionary map.
"""
return MappingProxyType(self._config)
@property
def auth_headers(self) -> dict:
"""Get headers.
Returns:
HTTP headers for authentication.
"""
return self._auth_headers or {}
@property
def auth_params(self) -> dict:
"""Get query parameters.
Returns:
URL query parameters for authentication.
"""
return self._auth_params or {}
def authenticate_request(
self,
request: requests.PreparedRequest,
) -> requests.PreparedRequest:
"""Authenticate a request.
Args:
request: A `request object`_.
Returns:
The authenticated request object.
.. _request object:
https://requests.readthedocs.io/en/latest/api/#requests.PreparedRequest
"""
request.headers.update(self.auth_headers)
if request.url:
request.url = _add_parameters(request.url, self.auth_params)
return request
def __call__(self, r: requests.PreparedRequest) -> requests.PreparedRequest:
"""Authenticate a request.
Calls
:meth:`~singer_sdk.authenticators.APIAuthenticatorBase.authenticate_request`
and returns the result.
Args:
r: A `request object`_.
Returns:
The authenticated request object.
.. _request object:
https://requests.readthedocs.io/en/latest/api/#requests.PreparedRequest
"""
return self.authenticate_request(r)
class SimpleAuthenticator(APIAuthenticatorBase):
"""DEPRECATED: Please use a more specific authenticator.
This authenticator will merge a key-value pair to the stream
in either the request headers or query parameters.
"""
def __init__(
self,
stream: RESTStreamBase,
auth_headers: dict | None = None,
) -> None:
"""Create a new authenticator.
If auth_headers is provided, it will be merged with http_headers specified on
the stream.
Args:
stream: The stream instance to use with this authenticator.
auth_headers: Authentication headers.
"""
super().__init__(stream=stream)
if self._auth_headers is None:
self._auth_headers = {}
if auth_headers:
self._auth_headers.update(auth_headers)
class APIKeyAuthenticator(APIAuthenticatorBase):
"""Implements API key authentication for REST Streams.
This authenticator will merge a key-value pair with either the
HTTP headers or query parameters specified on the stream. Common
examples of key names are "x-api-key" and "Authorization" but
any key-value pair may be used for this authenticator.
"""
def __init__(
self,
stream: RESTStreamBase,
key: str,
value: str,
location: str = "header",
) -> None:
"""Create a new authenticator.
Args:
stream: The stream instance to use with this authenticator.
key: API key parameter name.
value: API key value.
location: Where the API key is to be added. Either 'header' or 'params'.
Raises:
ValueError: If the location value is not 'header' or 'params'.
"""
super().__init__(stream=stream)
auth_credentials = {key: value}
if location not in ["header", "params"]:
raise ValueError("`type` must be one of 'header' or 'params'.")
if location == "header":
if self._auth_headers is None:
self._auth_headers = {}
self._auth_headers.update(auth_credentials)
elif location == "params":
if self._auth_params is None:
self._auth_params = {}
self._auth_params.update(auth_credentials)
@classmethod
def create_for_stream(
cls: type[APIKeyAuthenticator],
stream: RESTStreamBase,
key: str,
value: str,
location: str,
) -> APIKeyAuthenticator:
"""Create an Authenticator object specific to the Stream class.
Args:
stream: The stream instance to use with this authenticator.
key: API key parameter name.
value: API key value.
location: Where the API key is to be added. Either 'header' or 'params'.
Returns:
APIKeyAuthenticator: A new
:class:`singer_sdk.authenticators.APIKeyAuthenticator` instance.
"""
return cls(stream=stream, key=key, value=value, location=location)
class BearerTokenAuthenticator(APIAuthenticatorBase):
"""Implements bearer token authentication for REST Streams.
This Authenticator implements Bearer Token authentication. The token
is a text string, included in the request header and prefixed with
'Bearer '. The token will be merged with HTTP headers on the stream.
"""
def __init__(self, stream: RESTStreamBase, token: str) -> None:
"""Create a new authenticator.
Args:
stream: The stream instance to use with this authenticator.
token: Authentication token.
"""
super().__init__(stream=stream)
auth_credentials = {"Authorization": f"Bearer {token}"}
if self._auth_headers is None:
self._auth_headers = {}
self._auth_headers.update(auth_credentials)
@classmethod
def create_for_stream(
cls: type[BearerTokenAuthenticator],
stream: RESTStreamBase,
token: str,
) -> BearerTokenAuthenticator:
"""Create an Authenticator object specific to the Stream class.
Args:
stream: The stream instance to use with this authenticator.
token: Authentication token.
Returns:
BearerTokenAuthenticator: A new
:class:`singer_sdk.authenticators.BearerTokenAuthenticator` instance.
"""
return cls(stream=stream, token=token)
class BasicAuthenticator(APIAuthenticatorBase):
"""Implements basic authentication for REST Streams.
This Authenticator implements basic authentication by concatinating a
username and password then base64 encoding the string. The resulting
token will be merged with any HTTP headers specified on the stream.
"""
def __init__(
self,
stream: RESTStreamBase,
username: str,
password: str,
) -> None:
"""Create a new authenticator.
Args:
stream: The stream instance to use with this authenticator.
username: API username.
password: API password.
"""
super().__init__(stream=stream)
credentials = f"{username}:{password}".encode()
auth_token = base64.b64encode(credentials).decode("ascii")
auth_credentials = {"Authorization": f"Basic {auth_token}"}
if self._auth_headers is None:
self._auth_headers = {}
self._auth_headers.update(auth_credentials)
@classmethod
def create_for_stream(
cls: type[BasicAuthenticator],
stream: RESTStreamBase,
username: str,
password: str,
) -> BasicAuthenticator:
"""Create an Authenticator object specific to the Stream class.
Args:
stream: The stream instance to use with this authenticator.
username: API username.
password: API password.
Returns:
BasicAuthenticator: A new
:class:`singer_sdk.authenticators.BasicAuthenticator` instance.
"""
return cls(stream=stream, username=username, password=password)
class OAuthAuthenticator(APIAuthenticatorBase):
"""API Authenticator for OAuth 2.0 flows."""
def __init__(
self,
stream: RESTStreamBase,
auth_endpoint: str | None = None,
oauth_scopes: str | None = None,
default_expiration: int | None = None,
) -> None:
"""Create a new authenticator.
Args:
stream: The stream instance to use with this authenticator.
auth_endpoint: The OAuth 2.0 authorization endpoint.
oauth_scopes: A comma-separated list of OAuth scopes.
default_expiration: Default token expiry in seconds.
"""
super().__init__(stream=stream)
self._auth_endpoint = auth_endpoint
self._default_expiration = default_expiration
self._oauth_scopes = oauth_scopes
# Initialize internal tracking attributes
self.access_token: str | None = None
self.refresh_token: str | None = None
self.last_refreshed: datetime | None = None
self.expires_in: int | None = None
@property
def auth_headers(self) -> dict:
"""Return a dictionary of auth headers to be applied.
These will be merged with any `http_headers` specified in the stream.
Returns:
HTTP headers for authentication.
"""
if not self.is_token_valid():
self.update_access_token()
result = super().auth_headers
result["Authorization"] = f"Bearer {self.access_token}"
return result
@property
def auth_endpoint(self) -> str:
"""Get the authorization endpoint.
Returns:
The API authorization endpoint if it is set.
Raises:
ValueError: If the endpoint is not set.
"""
if not self._auth_endpoint:
raise ValueError("Authorization endpoint not set.")
return self._auth_endpoint
@property
def oauth_scopes(self) -> str | None:
"""Get OAuth scopes.
Returns:
String of OAuth scopes, or None if not set.
"""
return self._oauth_scopes
@property
def oauth_request_payload(self) -> dict:
"""Get request body.
Returns:
A plain (OAuth) or encrypted (JWT) request body.
"""
return self.oauth_request_body
@property
def oauth_request_body(self) -> dict:
"""Get formatted body of the OAuth authorization request.
Sample implementation:
.. highlight:: python
.. code-block:: python
@property
def oauth_request_body(self) -> dict:
return {
'grant_type': 'password',
'scope': 'https://api.powerbi.com',
'resource': 'https://analysis.windows.net/powerbi/api',
'client_id': self.config["client_id"],
'username': self.config.get("username", self.config["client_id"]),
'password': self.config["password"],
}
Raises:
NotImplementedError: If derived class does not override this method.
"""
raise NotImplementedError(
"The `oauth_request_body` property was not defined in the subclass.",
)
@property
def client_id(self) -> str | None:
"""Get client ID string to be used in authentication.
Returns:
Optional client secret from stream config if it has been set.
"""
if self.config:
return self.config.get("client_id")
return None
@property
def client_secret(self) -> str | None:
"""Get client secret to be used in authentication.
Returns:
Optional client secret from stream config if it has been set.
"""
if self.config:
return self.config.get("client_secret")
return None
def is_token_valid(self) -> bool:
"""Check if token is valid.
Returns:
True if the token is valid (fresh).
"""
if self.last_refreshed is None:
return False
if not self.expires_in:
return True
if self.expires_in > (utc_now() - self.last_refreshed).total_seconds():
return True
return False
# Authentication and refresh
def update_access_token(self) -> None:
"""Update `access_token` along with: `last_refreshed` and `expires_in`.
Raises:
RuntimeError: When OAuth login fails.
"""
request_time = utc_now()
auth_request_payload = self.oauth_request_payload
token_response = requests.post(
self.auth_endpoint,
data=auth_request_payload,
timeout=60,
)
try:
token_response.raise_for_status()
except requests.HTTPError as ex:
raise RuntimeError(
f"Failed OAuth login, response was '{token_response.json()}'. {ex}",
) from ex
self.logger.info("OAuth authorization attempt was successful.")
token_json = token_response.json()
self.access_token = token_json["access_token"]
self.expires_in = token_json.get("expires_in", self._default_expiration)
if self.expires_in is None:
self.logger.debug(
"No expires_in receied in OAuth response and no "
"default_expiration set. Token will be treated as if it never "
"expires.",
)
self.last_refreshed = request_time
class OAuthJWTAuthenticator(OAuthAuthenticator):
"""API Authenticator for OAuth 2.0 flows which utilize a JWT refresh token."""
@property
def private_key(self) -> str | None:
"""Return the private key to use in encryption.
Returns:
Private key from stream config.
"""
return self.config.get("private_key", None)
@property
def private_key_passphrase(self) -> str | None:
"""Return the private key passphrase to use in encryption.
Returns:
Passphrase for private key from stream config.
"""
return self.config.get("private_key_passphrase", None)
@property
def oauth_request_body(self) -> dict:
"""Return request body for OAuth request.
Returns:
Request body mapping for OAuth.
"""
request_time = utc_now()
return {
"iss": self.client_id,
"scope": self.oauth_scopes,
"aud": self.auth_endpoint,
"exp": math.floor((request_time + timedelta(hours=1)).timestamp()),
"iat": math.floor(request_time.timestamp()),
}
@property
def oauth_request_payload(self) -> dict:
"""Return request paytload for OAuth request.
Returns:
Payload object for OAuth.
Raises:
ValueError: If the private key is not set.
"""
if not self.private_key:
raise ValueError("Missing 'private_key' property for OAuth payload.")
private_key: bytes | Any = bytes(self.private_key, "UTF-8")
if self.private_key_passphrase:
passphrase = bytes(self.private_key_passphrase, "UTF-8")
private_key = serialization.load_pem_private_key(
private_key,
password=passphrase,
backend=default_backend(),
)
private_key_string: str | Any = private_key.decode("UTF-8")
return {
"grant_type": "urn:ietf:params:oauth:grant-type:jwt-bearer",
"assertion": jwt.encode(
self.oauth_request_body,
private_key_string,
"RS256",
),
}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/metrics.py | """Singer metrics logging."""
from __future__ import annotations
import abc
import enum
import json
import logging
import logging.config
import os
from dataclasses import dataclass, field
from pathlib import Path
from time import time
from typing import TYPE_CHECKING, Any, Generic, Mapping, TypeVar
import yaml
from singer_sdk.helpers._resources import Traversable, get_package_files
if TYPE_CHECKING:
from types import TracebackType
DEFAULT_LOG_INTERVAL = 60.0
METRICS_LOGGER_NAME = __name__
METRICS_LOG_LEVEL_SETTING = "metrics_log_level"
_TVal = TypeVar("_TVal")
class Status(str, enum.Enum):
"""Constants for commonly used status values."""
SUCCEEDED = "succeeded"
FAILED = "failed"
class Tag(str, enum.Enum):
"""Constants for commonly used tags."""
STREAM = "stream"
CONTEXT = "context"
ENDPOINT = "endpoint"
JOB_TYPE = "job_type"
HTTP_STATUS_CODE = "http_status_code"
STATUS = "status"
class Metric(str, enum.Enum):
"""Common metric types."""
RECORD_COUNT = "record_count"
BATCH_COUNT = "batch_count"
HTTP_REQUEST_DURATION = "http_request_duration"
HTTP_REQUEST_COUNT = "http_request_count"
JOB_DURATION = "job_duration"
SYNC_DURATION = "sync_duration"
@dataclass
class Point(Generic[_TVal]):
"""An individual metric measurement."""
metric_type: str
metric: Metric
value: _TVal
tags: dict[str, Any] = field(default_factory=dict)
def __str__(self) -> str:
"""Get string representation of this measurement.
Returns:
A string representation of this measurement.
"""
return self.to_json()
def to_json(self) -> str:
"""Convert this measure to a JSON object.
Returns:
A JSON object.
"""
return json.dumps(
{
"type": self.metric_type,
"metric": self.metric.value,
"value": self.value,
"tags": self.tags,
},
default=str,
)
def log(logger: logging.Logger, point: Point) -> None:
"""Log a measurement.
Args:
logger: An logger instance.
point: A measurement.
"""
logger.info("METRIC: %s", point)
class Meter(metaclass=abc.ABCMeta):
"""Base class for all meters."""
def __init__(self, metric: Metric, tags: dict | None = None) -> None:
"""Initialize a meter.
Args:
metric: The metric type.
tags: Tags to add to the measurement.
"""
self.metric = metric
self.tags = tags or {}
self.logger = get_metrics_logger()
@property
def context(self) -> dict | None:
"""Get the context for this meter.
Returns:
A context dictionary.
"""
return self.tags.get(Tag.CONTEXT)
@context.setter
def context(self, value: dict | None) -> None:
"""Set the context for this meter.
Args:
value: A context dictionary.
"""
if value is None:
self.tags.pop(Tag.CONTEXT, None)
else:
self.tags[Tag.CONTEXT] = value
@abc.abstractmethod
def __enter__(self) -> Meter:
"""Enter the meter context."""
...
@abc.abstractmethod
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
"""Exit the meter context.
Args:
exc_type: The exception type.
exc_val: The exception value.
exc_tb: The exception traceback.
"""
...
class Counter(Meter):
"""A meter for counting things."""
def __init__(
self,
metric: Metric,
tags: dict | None = None,
log_interval: float = DEFAULT_LOG_INTERVAL,
) -> None:
"""Initialize a counter.
Args:
metric: The metric type.
tags: Tags to add to the measurement.
log_interval: The interval at which to log the count.
"""
super().__init__(metric, tags)
self.value = 0
self.log_interval = log_interval
self.last_log_time = time()
def __enter__(self) -> Counter:
"""Enter the counter context.
Returns:
The counter instance.
"""
self.last_log_time = time()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
"""Exit the counter context.
Args:
exc_type: The exception type.
exc_val: The exception value.
exc_tb: The exception traceback.
"""
self._pop()
def _pop(self) -> None:
"""Log and reset the counter."""
log(self.logger, Point("counter", self.metric, self.value, self.tags))
self.value = 0
self.last_log_time = time()
def increment(self, value: int = 1) -> None:
"""Increment the counter.
Args:
value: The value to increment by.
"""
self.value += value
if self._ready_to_log():
self._pop()
def _ready_to_log(self) -> bool:
"""Check if the counter is ready to log.
Returns:
True if the counter is ready to log.
"""
return time() - self.last_log_time > self.log_interval
class Timer(Meter):
"""A meter for timing things."""
def __init__(self, metric: Metric, tags: dict | None = None) -> None:
"""Initialize a timer.
Args:
metric: The metric type.
tags: Tags to add to the measurement.
"""
super().__init__(metric, tags)
self.start_time = time()
def __enter__(self) -> Timer:
"""Enter the timer context.
Returns:
The timer instance.
"""
self.start_time = time()
return self
def __exit__(
self,
exc_type: type[BaseException] | None,
exc_val: BaseException | None,
exc_tb: TracebackType | None,
) -> None:
"""Exit the timer context.
Args:
exc_type: The exception type.
exc_val: The exception value.
exc_tb: The exception traceback.
"""
if Tag.STATUS not in self.tags:
if exc_type is None:
self.tags[Tag.STATUS] = Status.SUCCEEDED
else:
self.tags[Tag.STATUS] = Status.FAILED
log(self.logger, Point("timer", self.metric, self.elapsed(), self.tags))
def elapsed(self) -> float:
"""Get the elapsed time.
Returns:
The elapsed time.
"""
return time() - self.start_time
def get_metrics_logger() -> logging.Logger:
"""Get a logger for emitting metrics.
Returns:
A logger that can be used to emit metrics.
"""
return logging.getLogger(METRICS_LOGGER_NAME)
def record_counter(
stream: str,
endpoint: str | None = None,
log_interval: float = DEFAULT_LOG_INTERVAL,
**tags: Any,
) -> Counter:
"""Use for counting records retrieved from the source.
with record_counter("my_stream", endpoint="/users") as counter:
for record in my_records:
# Do something with the record
counter.increment()
Args:
stream: The stream name.
endpoint: The endpoint name.
log_interval: The interval at which to log the count.
tags: Tags to add to the measurement.
Returns:
A counter for counting records.
"""
tags[Tag.STREAM] = stream
if endpoint:
tags[Tag.ENDPOINT] = endpoint
return Counter(Metric.RECORD_COUNT, tags, log_interval=log_interval)
def batch_counter(stream: str, **tags: Any) -> Counter:
"""Use for counting batches sent to the target.
with batch_counter("my_stream") as counter:
for batch in my_batches:
# Do something with the batch
counter.increment()
Args:
stream: The stream name.
tags: Tags to add to the measurement.
Returns:
A counter for counting batches.
"""
tags[Tag.STREAM] = stream
return Counter(Metric.BATCH_COUNT, tags)
def http_request_counter(
stream: str,
endpoint: str,
log_interval: float = DEFAULT_LOG_INTERVAL,
**tags: Any,
) -> Counter:
"""Use for counting HTTP requests.
with http_request_counter() as counter:
for record in my_records:
# Do something with the record
counter.increment()
Args:
stream: The stream name.
endpoint: The endpoint name.
log_interval: The interval at which to log the count.
tags: Tags to add to the measurement.
Returns:
A counter for counting HTTP requests.
"""
tags.update({Tag.STREAM: stream, Tag.ENDPOINT: endpoint})
return Counter(Metric.HTTP_REQUEST_COUNT, tags, log_interval=log_interval)
def sync_timer(stream: str, **tags: Any) -> Timer:
"""Use for timing the sync of a stream.
with singer.metrics.sync_timer() as timer:
# Do something
print(f"Sync took {timer.elapsed()} seconds")
Args:
stream: The stream name.
tags: Tags to add to the measurement.
Returns:
A timer for timing the sync of a stream.
"""
tags[Tag.STREAM] = stream
return Timer(Metric.SYNC_DURATION, tags)
def _load_yaml_logging_config(path: Traversable | Path) -> Any: # noqa: ANN401
"""Load the logging config from the YAML file.
Args:
path: A path to the YAML file.
Returns:
The logging config.
"""
with path.open() as f:
return yaml.safe_load(f)
def _get_default_config() -> Any: # noqa: ANN401
"""Get a logging configuration.
Returns:
A logging configuration.
"""
log_config_path = get_package_files("singer_sdk").joinpath("default_logging.yml")
return _load_yaml_logging_config(log_config_path)
def _setup_logging(config: Mapping[str, Any]) -> None:
"""Setup logging.
Args:
config: A plugin configuration dictionary.
"""
logging.config.dictConfig(_get_default_config())
config = config or {}
metrics_log_level = config.get(METRICS_LOG_LEVEL_SETTING, "INFO").upper()
logging.getLogger(METRICS_LOGGER_NAME).setLevel(metrics_log_level)
if "SINGER_SDK_LOG_CONFIG" in os.environ:
log_config_path = Path(os.environ["SINGER_SDK_LOG_CONFIG"])
logging.config.dictConfig(_load_yaml_logging_config(log_config_path))
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/default_logging.yml | version: 1
disable_existing_loggers: false
formatters:
console:
format: "{asctime:23s} | {levelname:8s} | {name:20s} | {message}"
style: "{"
handlers:
default:
class: logging.StreamHandler
formatter: console
stream: ext://sys.stderr
root:
level: INFO
handlers: [default]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/plugin_base.py | """Shared parent class for Tap, Target (future), and Transform (future)."""
from __future__ import annotations
import abc
import logging
import os
from pathlib import Path, PurePath
from types import MappingProxyType
from typing import TYPE_CHECKING, Any, Callable, Mapping, cast
import click
from jsonschema import Draft7Validator
from singer_sdk import about, metrics
from singer_sdk.configuration._dict_config import parse_environment_config
from singer_sdk.exceptions import ConfigValidationError
from singer_sdk.helpers._classproperty import classproperty
from singer_sdk.helpers._compat import metadata
from singer_sdk.helpers._secrets import SecretString, is_common_secret_key
from singer_sdk.helpers._util import read_json_file
from singer_sdk.helpers.capabilities import (
FLATTENING_CONFIG,
STREAM_MAPS_CONFIG,
CapabilitiesEnum,
PluginCapabilities,
)
from singer_sdk.typing import extend_validator_with_defaults
if TYPE_CHECKING:
from singer_sdk.mapper import PluginMapper
SDK_PACKAGE_NAME = "singer_sdk"
JSONSchemaValidator = extend_validator_with_defaults(Draft7Validator)
class PluginBase(metaclass=abc.ABCMeta):
"""Abstract base class for taps."""
name: str # The executable name of the tap or target plugin.
config_jsonschema: dict = {}
# A JSON Schema object defining the config options that this tap will accept.
_config: dict
@classproperty
def logger(cls) -> logging.Logger: # noqa: N805
"""Get logger.
Returns:
Plugin logger.
"""
# Get the level from <PLUGIN_NAME>_LOGLEVEL or LOGLEVEL environment variables
plugin_env_prefix = f"{cls.name.upper().replace('-', '_')}_"
log_level = os.environ.get(f"{plugin_env_prefix}LOGLEVEL") or os.environ.get(
"LOGLEVEL",
)
logger = logging.getLogger(cls.name)
if log_level is not None and log_level.upper() in logging._levelToName.values():
logger.setLevel(log_level.upper())
return logger
# Constructor
def __init__(
self,
*,
config: dict | PurePath | str | list[PurePath | str] | None = None,
parse_env_config: bool = False,
validate_config: bool = True,
) -> None:
"""Create the tap or target.
Args:
config: May be one or more paths, either as str or PurePath objects, or
it can be a predetermined config dict.
parse_env_config: True to parse settings from env vars.
validate_config: True to require validation of config settings.
Raises:
ValueError: If config is not a dict or path string.
"""
if not config:
config_dict = {}
elif isinstance(config, (str, PurePath)):
config_dict = read_json_file(config)
elif isinstance(config, list):
config_dict = {}
for config_path in config:
# Read each config file sequentially. Settings from files later in the
# list will override those of earlier ones.
config_dict.update(read_json_file(config_path))
elif isinstance(config, dict):
config_dict = config
else:
raise ValueError(f"Error parsing config of type '{type(config).__name__}'.")
if parse_env_config:
self.logger.info("Parsing env var for settings config...")
config_dict.update(self._env_var_config)
else:
self.logger.info("Skipping parse of env var settings...")
for k, v in config_dict.items():
if self._is_secret_config(k):
config_dict[k] = SecretString(v)
self._config = config_dict
self._validate_config(raise_errors=validate_config)
self.mapper: PluginMapper
metrics._setup_logging(self.config)
self.metrics_logger = metrics.get_metrics_logger()
@classproperty
def capabilities(self) -> list[CapabilitiesEnum]:
"""Get capabilities.
Developers may override this property in oder to add or remove
advertised capabilities for this plugin.
Returns:
A list of plugin capabilities.
"""
return [
PluginCapabilities.STREAM_MAPS,
PluginCapabilities.FLATTENING,
]
@classproperty
def _env_var_config(cls) -> dict[str, Any]: # noqa: N805
"""Return any config specified in environment variables.
Variables must match the convention "<PLUGIN_NAME>_<SETTING_NAME>",
all uppercase with dashes converted to underscores.
Returns:
Dictionary of configuration parsed from the environment.
"""
plugin_env_prefix = f"{cls.name.upper().replace('-', '_')}_"
config_jsonschema = cls.config_jsonschema
cls.append_builtin_config(config_jsonschema)
return parse_environment_config(config_jsonschema, plugin_env_prefix)
# Core plugin metadata:
@staticmethod
def _get_package_version(package: str) -> str:
"""Return the package version number.
Args:
package: The package name.
Returns:
The package version number.
"""
try:
version = metadata.version(package)
except metadata.PackageNotFoundError:
version = "[could not be detected]"
return version
@classmethod
def get_plugin_version(cls) -> str:
"""Return the package version number.
Returns:
The package version number.
"""
return cls._get_package_version(cls.name)
@classmethod
def get_sdk_version(cls) -> str:
"""Return the package version number.
Returns:
The package version number.
"""
return cls._get_package_version(SDK_PACKAGE_NAME)
@classproperty
def plugin_version(cls) -> str: # noqa: N805
"""Get version.
Returns:
The package version number.
"""
return cls.get_plugin_version()
@classproperty
def sdk_version(cls) -> str: # noqa: N805
"""Return the package version number.
Returns:
Meltano Singer SDK version number.
"""
return cls.get_sdk_version()
# Abstract methods:
@property
def state(self) -> dict:
"""Get state.
Raises:
NotImplementedError: If the derived plugin doesn't override this method.
"""
raise NotImplementedError
# Core plugin config:
@property
def config(self) -> Mapping[str, Any]:
"""Get config.
Returns:
A frozen (read-only) config dictionary map.
"""
return cast(dict, MappingProxyType(self._config))
@staticmethod
def _is_secret_config(config_key: str) -> bool:
"""Check if config key is secret.
This prevents accidental printing to logs.
Args:
config_key: Configuration key name to match against common secret names.
Returns:
True if a config value should be treated as a secret.
"""
return is_common_secret_key(config_key)
def _validate_config(
self,
*,
raise_errors: bool = True,
warnings_as_errors: bool = False,
) -> tuple[list[str], list[str]]:
"""Validate configuration input against the plugin configuration JSON schema.
Args:
raise_errors: Flag to throw an exception if any validation errors are found.
warnings_as_errors: Flag to throw an exception if any warnings were emitted.
Returns:
A tuple of configuration validation warnings and errors.
Raises:
ConfigValidationError: If raise_errors is True and validation fails.
"""
warnings: list[str] = []
errors: list[str] = []
log_fn = self.logger.info
config_jsonschema = self.config_jsonschema
if config_jsonschema:
self.append_builtin_config(config_jsonschema)
self.logger.debug(
"Validating config using jsonschema: %s",
config_jsonschema,
)
validator = JSONSchemaValidator(config_jsonschema)
errors = [e.message for e in validator.iter_errors(self._config)]
if errors:
summary = (
f"Config validation failed: {'; '.join(errors)}\n"
f"JSONSchema was: {config_jsonschema}"
)
if raise_errors:
raise ConfigValidationError(summary)
log_fn = self.logger.warning
else:
summary = f"Config validation passed with {len(warnings)} warnings."
for warning in warnings:
summary += f"\n{warning}"
if warnings_as_errors and raise_errors and warnings:
raise ConfigValidationError(
f"One or more warnings ocurred during validation: {warnings}",
)
log_fn(summary)
return warnings, errors
@classmethod
def print_version(
cls: type[PluginBase],
print_fn: Callable[[Any], None] = print,
) -> None:
"""Print help text for the tap.
Args:
print_fn: A function to use to display the plugin version.
Defaults to `print`_.
.. _print: https://docs.python.org/3/library/functions.html#print
"""
print_fn(f"{cls.name} v{cls.plugin_version}, Meltano SDK v{cls.sdk_version}")
@classmethod
def _get_about_info(cls: type[PluginBase]) -> about.AboutInfo:
"""Returns capabilities and other tap metadata.
Returns:
A dictionary containing the relevant 'about' information.
"""
config_jsonschema = cls.config_jsonschema
cls.append_builtin_config(config_jsonschema)
return about.AboutInfo(
name=cls.name,
description=cls.__doc__,
version=cls.get_plugin_version(),
sdk_version=cls.get_sdk_version(),
capabilities=cls.capabilities,
settings=config_jsonschema,
)
@classmethod
def append_builtin_config(cls: type[PluginBase], config_jsonschema: dict) -> None:
"""Appends built-in config to `config_jsonschema` if not already set.
To customize or disable this behavior, developers may either override this class
method or override the `capabilities` property to disabled any unwanted
built-in capabilities.
For all except very advanced use cases, we recommend leaving these
implementations "as-is", since this provides the most choice to users and is
the most "future proof" in terms of taking advantage of built-in capabilities
which may be added in the future.
Args:
config_jsonschema: [description]
"""
def _merge_missing(source_jsonschema: dict, target_jsonschema: dict) -> None:
# Append any missing properties in the target with those from source.
for k, v in source_jsonschema["properties"].items():
if k not in target_jsonschema["properties"]:
target_jsonschema["properties"][k] = v
capabilities = cls.capabilities
if PluginCapabilities.STREAM_MAPS in capabilities:
_merge_missing(STREAM_MAPS_CONFIG, config_jsonschema)
if PluginCapabilities.FLATTENING in capabilities:
_merge_missing(FLATTENING_CONFIG, config_jsonschema)
@classmethod
def print_about(
cls: type[PluginBase],
output_format: str | None = None,
) -> None:
"""Print capabilities and other tap metadata.
Args:
output_format: Render option for the plugin information.
"""
info = cls._get_about_info()
formatter = about.AboutFormatter.get_formatter(output_format or "text")
print(formatter.format_about(info)) # noqa: T201
@staticmethod
def config_from_cli_args(*args: str) -> tuple[list[Path], bool]:
"""Parse CLI arguments into a config dictionary.
Args:
args: CLI arguments.
Raises:
FileNotFoundError: If the config file does not exist.
Returns:
A tuple containing the config dictionary and a boolean indicating whether
the config file was found.
"""
config_files = []
parse_env_config = False
for config_path in args:
if config_path == "ENV":
# Allow parse from env vars:
parse_env_config = True
continue
# Validate config file paths before adding to list
if not Path(config_path).is_file():
raise FileNotFoundError(
f"Could not locate config file at '{config_path}'."
"Please check that the file exists.",
)
config_files.append(Path(config_path))
return config_files, parse_env_config
@classproperty
def cli(cls) -> Callable: # noqa: N805
"""Handle command line execution.
Returns:
A callable CLI object.
"""
@click.command()
def cli() -> None:
pass
return cli
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/mapper.py | """Stream Mapper classes.
Mappers allow inline stream transformation, filtering, aliasing, and duplication.
"""
from __future__ import annotations
import abc
import copy
import datetime
import hashlib
import logging
from typing import TYPE_CHECKING, Any, Callable, Dict, Union
from singer_sdk.exceptions import MapExpressionError, StreamMapConfigError
from singer_sdk.helpers import _simpleeval as simpleeval
from singer_sdk.helpers._catalog import get_selected_schema
from singer_sdk.helpers._flattening import (
FlatteningOptions,
flatten_record,
flatten_schema,
get_flattening_options,
)
from singer_sdk.typing import (
CustomType,
IntegerType,
JSONTypeHelper,
NumberType,
PropertiesList,
Property,
StringType,
)
if TYPE_CHECKING:
import sys
if sys.version_info >= (3, 10):
from typing import TypeAlias
else:
from typing_extensions import TypeAlias
from singer_sdk._singerlib.catalog import Catalog
MAPPER_ELSE_OPTION = "__else__"
MAPPER_FILTER_OPTION = "__filter__"
MAPPER_SOURCE_OPTION = "__source__"
MAPPER_ALIAS_OPTION = "__alias__"
MAPPER_KEY_PROPERTIES_OPTION = "__key_properties__"
NULL_STRING = "__NULL__"
def md5(string: str) -> str:
"""Digest a string using MD5. This is a function for inline calculations.
Args:
string: String to digest.
Returns:
A string digested into MD5.
"""
return hashlib.md5(string.encode("utf-8")).hexdigest() # noqa: S324
StreamMapsDict: TypeAlias = Dict[str, Union[str, dict, None]]
class StreamMap(metaclass=abc.ABCMeta):
"""Abstract base class for all map classes."""
def __init__(
self,
stream_alias: str,
raw_schema: dict,
key_properties: list[str] | None,
flattening_options: FlatteningOptions | None,
) -> None:
"""Initialize mapper.
Args:
stream_alias: Stream name.
raw_schema: Original stream JSON schema.
key_properties: Primary key of the source stream.
flattening_options: Flattening options, or None to skip flattening.
"""
self.stream_alias = stream_alias
self.raw_schema = raw_schema
self.raw_key_properties = key_properties
self.transformed_schema = raw_schema
self.transformed_key_properties = key_properties
self.flattening_options = flattening_options
if self.flattening_enabled:
self.transformed_schema = self.flatten_schema(self.transformed_schema)
@property
def flattening_enabled(self) -> bool:
"""True if flattening is enabled for this stream map.
Returns:
True if flattening is enabled, otherwise False.
"""
return (
self.flattening_options is not None
and self.flattening_options.flattening_enabled
and self.flattening_options.max_level > 0
)
def flatten_record(self, record: dict) -> dict:
"""If flattening is enabled, flatten a record and return the result.
If flattening is disabled, the original record will be returned.
Args:
record: An individual record dictionary in a stream.
Returns:
A new dictionary representing the flattened record.
"""
if not self.flattening_options or not self.flattening_enabled:
return record
return flatten_record(
record,
flattened_schema=self.transformed_schema,
max_level=self.flattening_options.max_level,
separator=self.flattening_options.separator,
)
def flatten_schema(self, raw_schema: dict) -> dict:
"""Flatten the provided schema.
Args:
raw_schema: The raw schema to flatten.
Returns:
The flattened version of the schema.
"""
if not self.flattening_options or not self.flattening_enabled:
return raw_schema
return flatten_schema(
raw_schema,
separator=self.flattening_options.separator,
max_level=self.flattening_options.max_level,
)
@abc.abstractmethod
def transform(self, record: dict) -> dict | None:
"""Transform a record and return the result.
Record flattening will also be performed, if enabled.
Subclasses should call the super().transform(record) after any other custom
transforms are performed.
Args:
record: An individual record dictionary in a stream.
Returns:
A new dictionary representing a transformed record.
"""
return self.flatten_record(record)
@abc.abstractmethod
def get_filter_result(self, record: dict) -> bool:
"""Exclude records from a stream.
Args:
record: An individual record dictionary in a stream.
Return:
True to include the record or False to exclude.
Raises:
NotImplementedError: If the derived class doesn't override this method.
"""
raise NotImplementedError
class DefaultStreamMap(StreamMap):
"""Abstract base class for default maps which do not require custom config."""
class RemoveRecordTransform(DefaultStreamMap):
"""Default mapper which simply excludes any records."""
def transform(self, record: dict) -> None:
"""Return None (always exclude).
Args:
record: An individual record dictionary in a stream.
"""
_ = record # Drop the record
def get_filter_result(self, record: dict) -> bool: # noqa: ARG002
"""Exclude all records.
Args:
record: An individual record dictionary in a stream.
Returns:
Always `False`.
"""
return False
class SameRecordTransform(DefaultStreamMap):
"""Default mapper which simply returns the original records."""
def transform(self, record: dict) -> dict | None:
"""Return original record unchanged.
Args:
record: An individual record dictionary in a stream.
Returns:
The original record unchanged.
"""
return super().transform(record)
def get_filter_result(self, record: dict) -> bool: # noqa: ARG002
"""Return True (always include).
Args:
record: An individual record dictionary in a stream.
Returns:
Always `True`.
"""
return True
class CustomStreamMap(StreamMap):
"""Defines transformation logic for a singer stream map."""
def __init__(
self,
stream_alias: str,
map_config: dict,
raw_schema: dict,
key_properties: list[str] | None,
map_transform: dict,
flattening_options: FlatteningOptions | None,
) -> None:
"""Initialize mapper.
Args:
stream_alias: Stream name.
map_config: Stream map configuration.
raw_schema: Original stream's JSON schema.
key_properties: Primary key of the source stream.
map_transform: Dictionary of transformations to apply to the stream.
flattening_options: Flattening options, or None to skip flattening.
"""
super().__init__(
stream_alias=stream_alias,
raw_schema=raw_schema,
key_properties=key_properties,
flattening_options=flattening_options,
)
self.map_config = map_config
self._transform_fn: Callable[[dict], dict | None]
self._filter_fn: Callable[[dict], bool]
(
self._filter_fn,
self._transform_fn,
self.transformed_schema,
) = self._init_functions_and_schema(stream_map=map_transform)
def transform(self, record: dict) -> dict | None:
"""Return a transformed record.
Args:
record: An individual record dictionary in a stream.
Returns:
The transformed record.
"""
transformed_record = self._transform_fn(record)
if not transformed_record:
return None
return super().transform(transformed_record)
def get_filter_result(self, record: dict) -> bool:
"""Return True to include or False to exclude.
Args:
record: An individual record dictionary in a stream.
Returns:
Boolean flag for record selection.
"""
return self._filter_fn(record)
@property
def functions(self) -> dict[str, Callable]:
"""Get availabale transformation functions.
Returns:
Functions which should be available for expression evaluation.
"""
funcs: dict[str, Any] = simpleeval.DEFAULT_FUNCTIONS.copy()
funcs["md5"] = md5
funcs["datetime"] = datetime
return funcs
def _eval(
self,
expr: str,
record: dict,
property_name: str | None,
) -> str | int | float:
"""Solve an expression.
Args:
expr: String expression to evaluate.
record: Individual stream record.
property_name: Name of property to transform in the record.
Returns:
Evaluated expression.
Raises:
MapExpressionError: If the mapping expression failed to evaluate.
"""
names = record.copy() # Start with names from record properties
names["_"] = record # Add a shorthand alias in case of reserved words in names
names["record"] = record # ...and a longhand alias
names["config"] = self.map_config # Allow map config access within transform
if property_name and property_name in record:
# Allow access to original property value if applicable
names["self"] = record[property_name]
try:
result: str | int | float = simpleeval.simple_eval(
expr,
functions=self.functions,
names=names,
)
except (simpleeval.InvalidExpression, SyntaxError) as ex:
raise MapExpressionError(
f"Failed to evaluate simpleeval expressions {expr}.",
) from ex
logging.debug("Eval result: %s = %s", expr, result)
return result
def _eval_type(
self,
expr: str,
default: JSONTypeHelper | None = None,
) -> JSONTypeHelper:
"""Evaluate an expression's type.
Args:
expr: String expression to evaluate.
default: Default type.
Returns:
The evaluated expression's type.
Raises:
ValueError: If the expression is ``None``.
"""
if expr is None:
raise ValueError("Expression should be str, not None")
default = default or StringType()
if expr.startswith("float("):
return NumberType()
if expr.startswith("int("):
return IntegerType()
if expr.startswith("str("):
return StringType()
if expr[0] == "'" and expr[-1] == "'":
return StringType()
return default
def _init_functions_and_schema( # noqa: PLR0912, PLR0915
self,
stream_map: dict,
) -> tuple[Callable[[dict], bool], Callable[[dict], dict | None], dict]:
"""Return a tuple: filter_fn, transform_fn, transformed_schema.
Args:
stream_map: TODO
Returns:
TODO.
Raises:
NotImplementedError: TODO
StreamMapConfigError: TODO
"""
stream_map = copy.copy(stream_map)
filter_rule: str | None = None
include_by_default = True
if stream_map and MAPPER_FILTER_OPTION in stream_map:
filter_rule = stream_map.pop(MAPPER_FILTER_OPTION)
logging.info(
"Found '%s' filter rule: %s",
self.stream_alias,
filter_rule,
)
if stream_map and MAPPER_KEY_PROPERTIES_OPTION in stream_map:
self.transformed_key_properties: list[str] = stream_map.pop(
MAPPER_KEY_PROPERTIES_OPTION,
)
logging.info(
"Found stream map override for '%s' key properties: %s",
self.stream_alias,
self.transformed_key_properties,
)
if stream_map and MAPPER_ELSE_OPTION in stream_map:
if stream_map[MAPPER_ELSE_OPTION] in {None, NULL_STRING}:
logging.info(
"Detected `%s=None` rule. "
"Unmapped, non-key properties will be excluded from output.",
MAPPER_ELSE_OPTION,
)
include_by_default = False
else:
raise NotImplementedError(
f"Option '{MAPPER_ELSE_OPTION}={stream_map[MAPPER_ELSE_OPTION]}' "
"is not supported.",
)
stream_map.pop(MAPPER_ELSE_OPTION)
# Transform the schema as needed
transformed_schema = copy.copy(self.raw_schema)
if not include_by_default:
# Start with only the defined (or transformed) key properties
transformed_schema = PropertiesList().to_dict()
if "properties" not in transformed_schema:
transformed_schema["properties"] = {}
for prop_key, prop_def in list(stream_map.items()):
if prop_def in {None, NULL_STRING}:
if prop_key in (self.transformed_key_properties or []):
raise StreamMapConfigError(
f"Removing key property '{prop_key}' is not permitted in "
f"'{self.stream_alias}' stream map config. To remove a key "
"property, use the `__key_properties__` operator "
"to specify either a new list of key property names or `null` "
"to replicate with no key properties in the stream.",
)
transformed_schema["properties"].pop(prop_key, None)
elif isinstance(prop_def, str):
default_type: JSONTypeHelper = StringType() # Fallback to string
existing_schema: dict = (
# Use transformed schema if available
transformed_schema["properties"].get(prop_key, {})
# ...or original schema for passthrough
or self.raw_schema["properties"].get(prop_def, {})
)
if existing_schema:
# Set default type if property exists already in JSON Schema
default_type = CustomType(existing_schema)
transformed_schema["properties"].update(
Property(
prop_key,
self._eval_type(prop_def, default=default_type),
).to_dict(),
)
else:
raise StreamMapConfigError(
f"Unexpected type '{type(prop_def).__name__}' in stream map "
f"for '{self.stream_alias}:{prop_key}'.",
)
for key_property in self.transformed_key_properties or []:
if key_property not in transformed_schema["properties"]:
raise StreamMapConfigError(
f"Invalid key properties for '{self.stream_alias}': "
f"[{','.join(self.transformed_key_properties)}]. "
f"Property '{key_property}' was not detected in schema.",
)
if self.flattening_enabled:
transformed_schema = self.flatten_schema(transformed_schema)
# Declare function variables
def eval_filter(filter_rule: str) -> Callable[[dict], bool]:
def _inner(record: dict) -> bool:
filter_result = self._eval(
expr=filter_rule,
record=record,
property_name=None,
)
logging.debug(
"Filter result for '%s' in '{self.name}' stream: %s",
filter_rule,
filter_result,
)
if not filter_result:
logging.debug("Excluding record due to filter.")
return False
return True
return _inner
def always_true(record: dict) -> bool:
_ = record
return True
if isinstance(filter_rule, str):
filter_fn = eval_filter(filter_rule)
elif filter_rule is None:
filter_fn = always_true
else:
raise StreamMapConfigError(
f"Unexpected filter rule type '{type(filter_rule).__name__}' in "
f"expression {str(filter_rule)}. Expected 'str' or 'None'.",
)
def transform_fn(record: dict) -> dict | None:
nonlocal include_by_default, stream_map
if not self.get_filter_result(record):
return None
if include_by_default:
result = record.copy()
else:
# Start with only the defined (or transformed) key properties
result = {}
for key_property in self.transformed_key_properties or []:
if key_property in record:
result[key_property] = record[key_property]
for prop_key, prop_def in list(stream_map.items()):
if prop_def in {None, NULL_STRING}:
# Remove property from result
result.pop(prop_key, None)
continue
if isinstance(prop_def, str):
# Apply property transform
result[prop_key] = self._eval(
expr=prop_def,
record=record,
property_name=prop_key,
)
continue
raise StreamMapConfigError(
f"Unexpected mapping type '{type(prop_def).__name__}' in "
f"map expression '{prop_def}'. Expected 'str' or 'None'.",
)
return result
return filter_fn, transform_fn, transformed_schema
class PluginMapper:
"""Inline map tranformer."""
def __init__(
self,
plugin_config: dict[str, StreamMapsDict],
logger: logging.Logger,
) -> None:
"""Initialize mapper.
Args:
plugin_config: TODO
logger: TODO
Raises:
StreamMapConfigError: TODO
"""
self.stream_maps: dict[str, list[StreamMap]] = {}
self.map_config = plugin_config.get("stream_map_config", {})
self.flattening_options = get_flattening_options(plugin_config)
self.default_mapper_type: type[DefaultStreamMap] = SameRecordTransform
self.logger = logger
self.stream_maps_dict: StreamMapsDict = plugin_config.get("stream_maps", {})
if MAPPER_ELSE_OPTION in self.stream_maps_dict:
if self.stream_maps_dict[MAPPER_ELSE_OPTION] in {None, NULL_STRING}:
logging.info(
"Found '%s=None' default mapper. "
"Unmapped streams will be excluded from output.",
MAPPER_ELSE_OPTION,
)
self.default_mapper_type = RemoveRecordTransform
self.stream_maps_dict.pop(MAPPER_ELSE_OPTION)
else:
raise StreamMapConfigError(
f"Undefined transform for '{MAPPER_ELSE_OPTION}'' case: "
f"{self.stream_maps_dict[MAPPER_ELSE_OPTION]}",
)
else:
logging.debug(
"Operator '%s=None' was not found. "
"Unmapped streams will be included in output.",
MAPPER_ELSE_OPTION,
)
for stream_map_key, stream_def in self.stream_maps_dict.items():
if stream_map_key.startswith("__"):
raise StreamMapConfigError(
f"Option '{stream_map_key}:{stream_def}' is not expected.",
)
def register_raw_streams_from_catalog(self, catalog: Catalog) -> None:
"""Register all streams as described in the catalog dict.
Args:
catalog: TODO
"""
for catalog_entry in catalog.streams:
self.register_raw_stream_schema(
catalog_entry.stream or catalog_entry.tap_stream_id,
get_selected_schema(
catalog_entry.stream or catalog_entry.tap_stream_id,
catalog_entry.schema.to_dict(),
catalog_entry.metadata.resolve_selection(),
self.logger,
),
catalog_entry.key_properties,
)
def register_raw_stream_schema( # noqa: PLR0912
self,
stream_name: str,
schema: dict,
key_properties: list[str] | None,
) -> None:
"""Register a new stream as described by its name and schema.
If stream has already been registered and schema or key_properties has changed,
the older registration will be removed and replaced with new, updated mappings.
Args:
stream_name: The stream name.
schema: The schema definition for the stream.
key_properties: The key properties of the stream.
Raises:
StreamMapConfigError: If the configuration is invalid.
"""
if stream_name in self.stream_maps:
primary_mapper = self.stream_maps[stream_name][0]
if (
primary_mapper.raw_schema != schema
or primary_mapper.raw_key_properties != key_properties
):
# Unload/reset stream maps if schema or key properties have changed.
self.stream_maps.pop(stream_name)
if stream_name not in self.stream_maps:
# The 0th mapper should be the same-named treatment.
# Additional items may be added for aliasing or multi projections.
self.stream_maps[stream_name] = [
self.default_mapper_type(
stream_name,
schema,
key_properties,
flattening_options=self.flattening_options,
),
]
for stream_map_key, stream_map_val in self.stream_maps_dict.items():
stream_def = (
stream_map_val.copy()
if isinstance(stream_map_val, dict)
else stream_map_val
)
stream_alias: str = stream_map_key
source_stream: str = stream_map_key
if isinstance(stream_def, str) and stream_def != NULL_STRING:
if stream_name == stream_map_key:
# TODO: Add any expected cases for str expressions (currently none)
pass
raise StreamMapConfigError(
f"Option '{stream_map_key}:{stream_def}' is not expected.",
)
if stream_def is None or stream_def == NULL_STRING:
if stream_name != stream_map_key:
continue
self.stream_maps[stream_map_key][0] = RemoveRecordTransform(
stream_alias=stream_map_key,
raw_schema=schema,
key_properties=None,
flattening_options=self.flattening_options,
)
logging.info("Set null tansform as default for '%s'", stream_name)
continue
if not isinstance(stream_def, dict):
raise StreamMapConfigError(
"Unexpected stream definition type. Expected str, dict, or None. "
f"Got '{type(stream_def).__name__}'.",
)
if MAPPER_SOURCE_OPTION in stream_def:
source_stream = stream_def.pop(MAPPER_SOURCE_OPTION)
if source_stream != stream_name:
# Not a match
continue
if MAPPER_ALIAS_OPTION in stream_def:
stream_alias = stream_def.pop(MAPPER_ALIAS_OPTION)
mapper = CustomStreamMap(
stream_alias=stream_alias,
map_transform=stream_def,
map_config=self.map_config,
raw_schema=schema,
key_properties=key_properties,
flattening_options=self.flattening_options,
)
if source_stream == stream_map_key:
# Zero-th mapper should be the same-keyed mapper.
# Override the default mapper with this custom map.
self.stream_maps[stream_name][0] = mapper
else:
# Additional mappers for aliasing and multi-projection:
self.stream_maps[stream_name].append(mapper)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/io_base.py | """Abstract base classes for all Singer messages IO operations."""
from __future__ import annotations
import abc
import json
import logging
import sys
from collections import Counter, defaultdict
from typing import IO
from typing import Counter as CounterType
from singer_sdk._singerlib import SingerMessageType
from singer_sdk.helpers._compat import final
logger = logging.getLogger(__name__)
class SingerReader(metaclass=abc.ABCMeta):
"""Interface for all plugins reading Singer messages from stdin."""
@final
def listen(self, file_input: IO[str] | None = None) -> None:
"""Read from input until all messages are processed.
Args:
file_input: Readable stream of messages. Defaults to standard in.
This method is internal to the SDK and should not need to be overridden.
"""
if not file_input:
file_input = sys.stdin
self._process_lines(file_input)
self._process_endofpipe()
@staticmethod
def _assert_line_requires(line_dict: dict, requires: set[str]) -> None:
"""Check if dictionary .
Args:
line_dict: TODO
requires: TODO
Raises:
Exception: TODO
"""
if not requires.issubset(line_dict):
missing = requires - set(line_dict)
raise Exception(
f"Line is missing required {', '.join(missing)} key(s): {line_dict}",
)
def _process_lines(self, file_input: IO[str]) -> CounterType[str]:
"""Internal method to process jsonl lines from a Singer tap.
Args:
file_input: Readable stream of messages, each on a separate line.
Returns:
A counter object for the processed lines.
Raises:
json.decoder.JSONDecodeError: raised if any lines are not valid json
"""
stats: dict[str, int] = defaultdict(int)
for line in file_input:
try:
line_dict = json.loads(line)
except json.decoder.JSONDecodeError as exc:
logger.error("Unable to parse:\n%s", line, exc_info=exc)
raise
self._assert_line_requires(line_dict, requires={"type"})
record_type: SingerMessageType = line_dict["type"]
if record_type == SingerMessageType.SCHEMA:
self._process_schema_message(line_dict)
elif record_type == SingerMessageType.RECORD:
self._process_record_message(line_dict)
elif record_type == SingerMessageType.ACTIVATE_VERSION:
self._process_activate_version_message(line_dict)
elif record_type == SingerMessageType.STATE:
self._process_state_message(line_dict)
elif record_type == SingerMessageType.BATCH:
self._process_batch_message(line_dict)
else:
self._process_unknown_message(line_dict)
stats[record_type] += 1
return Counter(**stats)
@abc.abstractmethod
def _process_schema_message(self, message_dict: dict) -> None:
...
@abc.abstractmethod
def _process_record_message(self, message_dict: dict) -> None:
...
@abc.abstractmethod
def _process_state_message(self, message_dict: dict) -> None:
...
@abc.abstractmethod
def _process_activate_version_message(self, message_dict: dict) -> None:
...
@abc.abstractmethod
def _process_batch_message(self, message_dict: dict) -> None:
...
def _process_unknown_message(self, message_dict: dict) -> None:
"""Internal method to process unknown message types from a Singer tap.
Args:
message_dict: Dictionary representation of the Singer message.
Raises:
ValueError: raised if a message type is not recognized
"""
record_type = message_dict["type"]
raise ValueError(f"Unknown message type '{record_type}' in message.")
def _process_endofpipe(self) -> None:
logger.debug("End of pipe reached")
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/tap_base.py | """Tap abstract class."""
from __future__ import annotations
import abc
import contextlib
import json
from enum import Enum
from typing import TYPE_CHECKING, Any, Callable, Iterable, Sequence, cast
import click
from singer_sdk._singerlib import Catalog
from singer_sdk.cli import common_options
from singer_sdk.exceptions import AbortedSyncFailedException, AbortedSyncPausedException
from singer_sdk.helpers import _state
from singer_sdk.helpers._classproperty import classproperty
from singer_sdk.helpers._compat import final
from singer_sdk.helpers._state import write_stream_state
from singer_sdk.helpers._util import read_json_file
from singer_sdk.helpers.capabilities import (
CapabilitiesEnum,
PluginCapabilities,
TapCapabilities,
)
from singer_sdk.mapper import PluginMapper
from singer_sdk.plugin_base import PluginBase
if TYPE_CHECKING:
from pathlib import PurePath
from singer_sdk.streams import SQLStream, Stream
STREAM_MAPS_CONFIG = "stream_maps"
class CliTestOptionValue(Enum):
"""Values for CLI option --test."""
All = "all"
Schema = "schema"
Disabled = False
class Tap(PluginBase, metaclass=abc.ABCMeta):
"""Abstract base class for taps.
The Tap class governs configuration, validation, and stream discovery for tap
plugins.
"""
# Constructor
def __init__(
self,
*,
config: dict | PurePath | str | list[PurePath | str] | None = None,
catalog: PurePath | str | dict | Catalog | None = None,
state: PurePath | str | dict | None = None,
parse_env_config: bool = False,
validate_config: bool = True,
) -> None:
"""Initialize the tap.
Args:
config: Tap configuration. Can be a dictionary, a single path to a
configuration file, or a list of paths to multiple configuration
files.
catalog: Tap catalog. Can be a dictionary or a path to the catalog file.
state: Tap state. Can be dictionary or a path to the state file.
parse_env_config: Whether to look for configuration values in environment
variables.
validate_config: True to require validation of config settings.
"""
super().__init__(
config=config,
parse_env_config=parse_env_config,
validate_config=validate_config,
)
# Declare private members
self._streams: dict[str, Stream] | None = None
self._input_catalog: Catalog | None = None
self._state: dict[str, Stream] = {}
self._catalog: Catalog | None = None # Tap's working catalog
# Process input catalog
if isinstance(catalog, Catalog):
self._input_catalog = catalog
elif isinstance(catalog, dict):
self._input_catalog = Catalog.from_dict(catalog) # type: ignore[arg-type]
elif catalog is not None:
self._input_catalog = Catalog.from_dict(read_json_file(catalog))
# Initialize mapper
self.mapper: PluginMapper
self.mapper = PluginMapper(
plugin_config=dict(self.config),
logger=self.logger,
)
self.mapper.register_raw_streams_from_catalog(self.catalog)
# Process state
state_dict: dict = {}
if isinstance(state, dict):
state_dict = state
elif state:
state_dict = read_json_file(state)
self.load_state(state_dict)
# Class properties
@property
def streams(self) -> dict[str, Stream]:
"""Get streams discovered or catalogued for this tap.
Results will be cached after first execution.
Returns:
A mapping of names to streams, using discovery or a provided catalog.
"""
input_catalog = self.input_catalog
if self._streams is None:
self._streams = {}
for stream in self.load_streams():
if input_catalog is not None:
stream.apply_catalog(input_catalog)
self._streams[stream.name] = stream
return self._streams
@property
def state(self) -> dict:
"""Get tap state.
Returns:
The tap's state dictionary
Raises:
RuntimeError: If state has not been initialized.
"""
if self._state is None:
raise RuntimeError("Could not read from uninitialized state.")
return self._state
@property
def input_catalog(self) -> Catalog | None:
"""Get the catalog passed to the tap.
Returns:
Catalog dictionary input, or None if not provided.
"""
return self._input_catalog
@property
def catalog(self) -> Catalog:
"""Get the tap's working catalog.
Returns:
A Singer catalog object.
"""
if self._catalog is None:
self._catalog = self.input_catalog or self._singer_catalog
return self._catalog
@classproperty
def capabilities(self) -> list[CapabilitiesEnum]:
"""Get tap capabilities.
Returns:
A list of capabilities supported by this tap.
"""
return [
TapCapabilities.CATALOG,
TapCapabilities.STATE,
TapCapabilities.DISCOVER,
PluginCapabilities.ABOUT,
PluginCapabilities.STREAM_MAPS,
PluginCapabilities.FLATTENING,
]
# Connection and sync tests:
@final
def run_connection_test(self) -> bool:
"""Run connection test, aborting each stream after 1 record.
Returns:
True if the test succeeded.
"""
return self.run_sync_dry_run(
dry_run_record_limit=1,
streams=self.streams.values(),
)
@final
def run_sync_dry_run(
self,
dry_run_record_limit: int | None = 1,
streams: Iterable[Stream] | None = None,
) -> bool:
"""Run connection test.
Exceptions of type `MaxRecordsLimitException` and
`PartialSyncSuccessException` will be ignored.
Args:
dry_run_record_limit: The max number of records to sync per stream object.
streams: The streams to test. If omitted, all streams will be tested.
Returns:
True if the test succeeded.
"""
if streams is None:
streams = self.streams.values()
for stream in streams:
# Initialize streams' record limits before beginning the sync test.
stream.ABORT_AT_RECORD_COUNT = dry_run_record_limit
for stream in streams:
if stream.parent_stream_type:
self.logger.debug(
"Child stream '%s' should be called by "
"parent stream '%s'. "
"Skipping direct invocation.",
type(stream).__name__,
stream.parent_stream_type.__name__,
)
continue
with contextlib.suppress(
AbortedSyncFailedException,
AbortedSyncPausedException,
):
stream.sync()
return True
@final
def write_schemas(self) -> None:
"""Write a SCHEMA message for all known streams to STDOUT."""
for stream in self.streams.values():
stream._write_schema_message()
# Stream detection:
def run_discovery(self) -> str:
"""Write the catalog json to STDOUT and return as a string.
Returns:
The catalog as a string of JSON.
"""
catalog_text = self.catalog_json_text
print(catalog_text) # noqa: T201
return catalog_text
@property
def catalog_dict(self) -> dict:
"""Get catalog dictionary.
Returns:
The tap's catalog as a dict
"""
return cast(dict, self._singer_catalog.to_dict())
@property
def catalog_json_text(self) -> str:
"""Get catalog JSON.
Returns:
The tap's catalog as formatted JSON text.
"""
return json.dumps(self.catalog_dict, indent=2)
@property
def _singer_catalog(self) -> Catalog:
"""Return a Catalog object.
Returns:
:class:`singer_sdk._singerlib.Catalog`.
"""
return Catalog(
(stream.tap_stream_id, stream._singer_catalog_entry)
for stream in self.streams.values()
)
def discover_streams(self) -> Sequence[Stream]:
"""Initialize all available streams and return them as a list.
Return:
List of discovered Stream objects.
Raises:
NotImplementedError: If the tap implementation does not override this
method.
"""
raise NotImplementedError(
f"Tap '{self.name}' does not support discovery. "
"Please set the '--catalog' command line argument and try again.",
)
@final
def load_streams(self) -> list[Stream]:
"""Load streams from discovery and initialize DAG.
Return the output of `self.discover_streams()` to enumerate
discovered streams.
Returns:
A list of discovered streams, ordered by name.
"""
# Build the parent-child dependency DAG
# Index streams by type
streams_by_type: dict[type[Stream], list[Stream]] = {}
for stream in self.discover_streams():
stream_type = type(stream)
if stream_type not in streams_by_type:
streams_by_type[stream_type] = []
streams_by_type[stream_type].append(stream)
# Initialize child streams list for parents
for stream_type, streams in streams_by_type.items():
if stream_type.parent_stream_type:
parents = streams_by_type[stream_type.parent_stream_type]
for parent in parents:
for stream in streams:
parent.child_streams.append(stream)
self.logger.info(
"Added '%s' as child stream to '%s'",
stream.name,
parent.name,
)
streams = [stream for streams in streams_by_type.values() for stream in streams]
return sorted(
streams,
key=lambda x: x.name,
reverse=False,
)
# Bookmarks and state management
def load_state(self, state: dict[str, Any]) -> None:
"""Merge or initialize stream state with the provided state dictionary input.
Override this method to perform validation and backwards-compatibility patches
on self.state. If overriding, we recommend first running
`super().load_state(state)` to ensure compatibility with the SDK.
Args:
state: Initialize the tap's state with this value.
Raises:
ValueError: If the tap's own state is None, meaning it has not been
initialized.
"""
if self.state is None:
raise ValueError("Cannot write to uninitialized state dictionary.")
for stream_name, stream_state in state.get("bookmarks", {}).items():
for key, val in stream_state.items():
write_stream_state(
self.state,
stream_name,
key,
val,
)
# State handling
def _reset_state_progress_markers(self) -> None:
"""Clear prior jobs' progress markers at beginning of sync."""
for _, state in self.state.get("bookmarks", {}).items():
_state.reset_state_progress_markers(state)
for partition_state in state.get("partitions", []):
_state.reset_state_progress_markers(partition_state)
# Fix sync replication method incompatibilities
def _set_compatible_replication_methods(self) -> None:
stream: Stream
for stream in self.streams.values():
for descendent in stream.descendent_streams:
if descendent.selected and descendent.ignore_parent_replication_key:
self.logger.warning(
"Stream descendent '%s' is selected and "
"its parent '%s' does not use inclusive "
"replication keys. "
"Forcing full table replication for '%s'.",
descendent.name,
stream.name,
stream.name,
)
stream.replication_key = None
stream.forced_replication_method = "FULL_TABLE"
# Sync methods
@final
def sync_all(self) -> None:
"""Sync all streams."""
self._reset_state_progress_markers()
self._set_compatible_replication_methods()
stream: Stream
for stream in self.streams.values():
if not stream.selected and not stream.has_selected_descendents:
self.logger.info("Skipping deselected stream '%s'.", stream.name)
continue
if stream.parent_stream_type:
self.logger.debug(
"Child stream '%s' is expected to be called "
"by parent stream '%s'. "
"Skipping direct invocation.",
type(stream).__name__,
stream.parent_stream_type.__name__,
)
continue
stream.sync()
stream.finalize_state_progress_markers()
stream._write_state_message()
# this second loop is needed for all streams to print out their costs
# including child streams which are otherwise skipped in the loop above
for stream in self.streams.values():
stream.log_sync_costs()
# Command Line Execution
@classproperty
def cli(cls) -> Callable: # noqa: N805
"""Execute standard CLI handler for taps.
Returns:
A callable CLI object.
"""
@common_options.PLUGIN_VERSION
@common_options.PLUGIN_ABOUT
@common_options.PLUGIN_ABOUT_FORMAT
@common_options.PLUGIN_CONFIG
@click.option(
"--discover",
is_flag=True,
help="Run the tap in discovery mode.",
)
@click.option(
"--test",
is_flag=False,
flag_value=CliTestOptionValue.All.value,
default=CliTestOptionValue.Disabled,
help=(
"Use --test to sync a single record for each stream. "
"Use --test=schema to test schema output without syncing "
"records."
),
)
@click.option(
"--catalog",
help="Use a Singer catalog file with the tap.",
type=click.Path(),
)
@click.option(
"--state",
help="Use a bookmarks file for incremental replication.",
type=click.Path(),
)
@click.command(
help="Execute the Singer tap.",
context_settings={"help_option_names": ["--help"]},
)
def cli(
*,
version: bool = False,
about: bool = False,
discover: bool = False,
test: CliTestOptionValue = CliTestOptionValue.Disabled,
config: tuple[str, ...] = (),
state: str | None = None,
catalog: str | None = None,
about_format: str | None = None,
) -> None:
"""Handle command line execution.
Args:
version: Display the package version.
about: Display package metadata and settings.
discover: Run the tap in discovery mode.
test: Test connectivity by syncing a single record and exiting.
about_format: Specify output style for `--about`.
config: Configuration file location or 'ENV' to use environment
variables. Accepts multiple inputs as a tuple.
catalog: Use a Singer catalog file with the tap.",
state: Use a bookmarks file for incremental replication.
"""
if version:
cls.print_version()
return
if not about:
cls.print_version(print_fn=cls.logger.info)
else:
cls.print_about(output_format=about_format)
return
validate_config: bool = True
if discover:
# Don't abort on validation failures
validate_config = False
config_files, parse_env_config = cls.config_from_cli_args(*config)
tap = cls( # type: ignore[operator]
config=config_files or None,
state=state,
catalog=catalog,
parse_env_config=parse_env_config,
validate_config=validate_config,
)
if discover:
tap.run_discovery()
if test == CliTestOptionValue.All.value:
tap.run_connection_test()
elif test == CliTestOptionValue.All.value:
tap.run_connection_test()
elif test == CliTestOptionValue.Schema.value:
tap.write_schemas()
else:
tap.sync_all()
return cli
class SQLTap(Tap):
"""A specialized Tap for extracting from SQL streams."""
# Stream class used to initialize new SQL streams from their catalog declarations.
default_stream_class: type[SQLStream]
def __init__(
self,
*,
config: dict | PurePath | str | list[PurePath | str] | None = None,
catalog: PurePath | str | dict | None = None,
state: PurePath | str | dict | None = None,
parse_env_config: bool = False,
validate_config: bool = True,
) -> None:
"""Initialize the SQL tap.
The SQLTap initializer additionally creates a cache variable for _catalog_dict.
Args:
config: Tap configuration. Can be a dictionary, a single path to a
configuration file, or a list of paths to multiple configuration
files.
catalog: Tap catalog. Can be a dictionary or a path to the catalog file.
state: Tap state. Can be dictionary or a path to the state file.
parse_env_config: Whether to look for configuration values in environment
variables.
validate_config: True to require validation of config settings.
"""
self._catalog_dict: dict | None = None
super().__init__(
config=config,
catalog=catalog,
state=state,
parse_env_config=parse_env_config,
validate_config=validate_config,
)
@property
def catalog_dict(self) -> dict:
"""Get catalog dictionary.
Returns:
The tap's catalog as a dict
"""
if self._catalog_dict:
return self._catalog_dict
if self.input_catalog:
return self.input_catalog.to_dict()
connector = self.default_stream_class.connector_class(dict(self.config))
result: dict[str, list[dict]] = {"streams": []}
result["streams"].extend(connector.discover_catalog_entries())
self._catalog_dict = result
return self._catalog_dict
def discover_streams(self) -> list[Stream]:
"""Initialize all available streams and return them as a list.
Returns:
List of discovered Stream objects.
"""
result: list[Stream] = []
for catalog_entry in self.catalog_dict["streams"]:
result.append(self.default_stream_class(self, catalog_entry))
return result
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/__init__.py | """SDK for building Singer taps."""
from __future__ import annotations
from singer_sdk import streams
from singer_sdk.connectors import SQLConnector
from singer_sdk.mapper_base import InlineMapper
from singer_sdk.plugin_base import PluginBase
from singer_sdk.sinks import BatchSink, RecordSink, Sink, SQLSink
from singer_sdk.streams import GraphQLStream, RESTStream, SQLStream, Stream
from singer_sdk.tap_base import SQLTap, Tap
from singer_sdk.target_base import SQLTarget, Target
__all__ = [
"BatchSink",
"GraphQLStream",
"InlineMapper",
"PluginBase",
"RecordSink",
"RESTStream",
"Sink",
"SQLConnector",
"SQLSink",
"SQLStream",
"SQLTap",
"SQLTarget",
"Stream",
"streams",
"Tap",
"Target",
]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/about.py | """About information for a plugin."""
from __future__ import annotations
import abc
import dataclasses
import json
import typing as t
from collections import OrderedDict
from textwrap import dedent
if t.TYPE_CHECKING:
from singer_sdk.helpers.capabilities import CapabilitiesEnum
__all__ = [
"AboutInfo",
"AboutFormatter",
"JSONFormatter",
"MarkdownFormatter",
]
@dataclasses.dataclass
class AboutInfo:
"""About information for a plugin."""
name: str
description: str | None
version: str
sdk_version: str
capabilities: list[CapabilitiesEnum]
settings: dict
class AboutFormatter(abc.ABC):
"""Abstract base class for about formatters."""
formats: t.ClassVar[dict[str, type[AboutFormatter]]] = {}
format_name: str
def __init_subclass__(cls, format_name: str) -> None:
"""Initialize subclass.
Args:
format_name: Name of the format.
"""
cls.formats[format_name] = cls
super().__init_subclass__()
@classmethod
def get_formatter(cls, name: str) -> AboutFormatter:
"""Get a formatter by name.
Args:
name: Name of the formatter.
Returns:
A formatter.
"""
return cls.formats[name]()
@abc.abstractmethod
def format_about(self, about_info: AboutInfo) -> str:
"""Render about information.
Args:
about_info: About information.
"""
...
class TextFormatter(AboutFormatter, format_name="text"):
"""About formatter for text output."""
def format_about(self, about_info: AboutInfo) -> str:
"""Render about information.
Args:
about_info: About information.
Returns:
A formatted string.
"""
return dedent(
f"""\
Name: {about_info.name}
Description: {about_info.description}
Version: {about_info.version}
SDK Version: {about_info.sdk_version}
Capabilities: {about_info.capabilities}
Settings: {about_info.settings}""",
)
class JSONFormatter(AboutFormatter, format_name="json"):
"""About formatter for JSON output."""
def __init__(self) -> None:
"""Initialize a JSONAboutFormatter."""
self.indent = 2
self.default = str
def format_about(self, about_info: AboutInfo) -> str:
"""Render about information.
Args:
about_info: About information.
Returns:
A formatted string.
"""
data = OrderedDict(
[
("name", about_info.name),
("description", about_info.description),
("version", about_info.version),
("sdk_version", about_info.sdk_version),
("capabilities", [c.value for c in about_info.capabilities]),
("settings", about_info.settings),
],
)
return json.dumps(data, indent=self.indent, default=self.default)
class MarkdownFormatter(AboutFormatter, format_name="markdown"):
"""About formatter for Markdown output."""
def format_about(self, about_info: AboutInfo) -> str:
"""Render about information.
Args:
about_info: About information.
Returns:
A formatted string.
"""
max_setting_len = t.cast(
int,
max(len(k) for k in about_info.settings["properties"]),
)
# Set table base for markdown
table_base = (
f"| {'Setting':{max_setting_len}}| Required | Default | Description |\n"
f"|:{'-' * max_setting_len}|:--------:|:-------:|:------------|\n"
)
# Empty list for string parts
md_list = []
# Get required settings for table
required_settings = about_info.settings.get("required", [])
# Iterate over Dict to set md
md_list.append(
f"# `{about_info.name}`\n\n"
f"{about_info.description}\n\n"
f"Built with the [Meltano Singer SDK](https://sdk.meltano.com).\n\n",
)
# Process capabilities and settings
capabilities = "## Capabilities\n\n"
capabilities += "\n".join([f"* `{v}`" for v in about_info.capabilities])
capabilities += "\n\n"
md_list.append(capabilities)
setting = "## Settings\n\n"
for k, v in about_info.settings.get("properties", {}).items():
md_description = v.get("description", "").replace("\n", "<BR/>")
table_base += (
f"| {k}{' ' * (max_setting_len - len(k))}"
f"| {'True' if k in required_settings else 'False':8} | "
f"{v.get('default', 'None'):7} | "
f"{md_description:11} |\n"
)
setting += table_base
setting += (
"\n"
+ "\n".join(
[
"A full list of supported settings and capabilities "
f"is available by running: `{about_info.name} --about`",
],
)
+ "\n"
)
md_list.append(setting)
return "".join(md_list)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/mapper_base.py | """Abstract base class for stream mapper plugins."""
from __future__ import annotations
import abc
from typing import TYPE_CHECKING, Callable, Iterable
import click
import singer_sdk._singerlib as singer
from singer_sdk.cli import common_options
from singer_sdk.helpers._classproperty import classproperty
from singer_sdk.helpers.capabilities import CapabilitiesEnum, PluginCapabilities
from singer_sdk.io_base import SingerReader
from singer_sdk.plugin_base import PluginBase
if TYPE_CHECKING:
from io import FileIO
class InlineMapper(PluginBase, SingerReader, metaclass=abc.ABCMeta):
"""Abstract base class for inline mappers."""
@classproperty
def capabilities(self) -> list[CapabilitiesEnum]:
"""Get capabilities.
Returns:
A list of plugin capabilities.
"""
return [
PluginCapabilities.STREAM_MAPS,
]
@staticmethod
def _write_messages(messages: Iterable[singer.Message]) -> None:
for message in messages:
singer.write_message(message)
def _process_schema_message(self, message_dict: dict) -> None:
self._write_messages(self.map_schema_message(message_dict))
def _process_record_message(self, message_dict: dict) -> None:
self._write_messages(self.map_record_message(message_dict))
def _process_state_message(self, message_dict: dict) -> None:
self._write_messages(self.map_state_message(message_dict))
def _process_activate_version_message(self, message_dict: dict) -> None:
self._write_messages(self.map_activate_version_message(message_dict))
def _process_batch_message(self, message_dict: dict) -> None:
self._write_messages(self.map_batch_message(message_dict))
@abc.abstractmethod
def map_schema_message(self, message_dict: dict) -> Iterable[singer.Message]:
"""Map a schema message to zero or more new messages.
Args:
message_dict: A SCHEMA message JSON dictionary.
"""
...
@abc.abstractmethod
def map_record_message(self, message_dict: dict) -> Iterable[singer.Message]:
"""Map a record message to zero or more new messages.
Args:
message_dict: A RECORD message JSON dictionary.
"""
...
@abc.abstractmethod
def map_state_message(self, message_dict: dict) -> Iterable[singer.Message]:
"""Map a state message to zero or more new messages.
Args:
message_dict: A STATE message JSON dictionary.
"""
...
@abc.abstractmethod
def map_activate_version_message(
self,
message_dict: dict,
) -> Iterable[singer.Message]:
"""Map a version message to zero or more new messages.
Args:
message_dict: An ACTIVATE_VERSION message JSON dictionary.
"""
...
def map_batch_message(
self,
message_dict: dict,
) -> Iterable[singer.Message]:
"""Map a batch message to zero or more new messages.
Args:
message_dict: A BATCH message JSON dictionary.
Raises:
NotImplementedError: if not implemented by subclass.
"""
raise NotImplementedError("BATCH messages are not supported by mappers.")
@classproperty
def cli(cls) -> Callable: # noqa: N805
"""Execute standard CLI handler for inline mappers.
Returns:
A callable CLI object.
"""
@common_options.PLUGIN_VERSION
@common_options.PLUGIN_ABOUT
@common_options.PLUGIN_ABOUT_FORMAT
@common_options.PLUGIN_CONFIG
@common_options.PLUGIN_FILE_INPUT
@click.command(
help="Execute the Singer mapper.",
context_settings={"help_option_names": ["--help"]},
)
def cli(
*,
version: bool = False,
about: bool = False,
config: tuple[str, ...] = (),
about_format: str | None = None,
file_input: FileIO | None = None,
) -> None:
"""Handle command line execution.
Args:
version: Display the package version.
about: Display package metadata and settings.
about_format: Specify output style for `--about`.
config: Configuration file location or 'ENV' to use environment
variables. Accepts multiple inputs as a tuple.
file_input: Specify a path to an input file to read messages from.
Defaults to standard in if unspecified.
"""
if version:
cls.print_version()
return
if not about:
cls.print_version(print_fn=cls.logger.info)
validate_config: bool = True
if about:
validate_config = False
cls.print_version(print_fn=cls.logger.info)
config_files, parse_env_config = cls.config_from_cli_args(*config)
mapper = cls( # type: ignore[operator]
config=config_files or None,
validate_config=validate_config,
parse_env_config=parse_env_config,
)
if about:
mapper.print_about(about_format)
else:
mapper.listen(file_input)
return cli
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/exceptions.py | """Defines a common set of exceptions which developers can raise and/or catch."""
from __future__ import annotations
import abc
import typing as t
if t.TYPE_CHECKING:
import requests
class ConfigValidationError(Exception):
"""Raised when a user's config settings fail validation."""
class FatalAPIError(Exception):
"""Exception raised when a failed request should not be considered retriable."""
class InvalidStreamSortException(Exception):
"""Exception to raise if sorting errors are found while syncing the records."""
class MapExpressionError(Exception):
"""Failed map expression evaluation."""
class RequestedAbortException(Exception):
"""Base class for abort and interrupt requests.
Whenever this exception is raised, streams will attempt to shut down gracefully and
will emit a final resumable `STATE` message if it is possible to do so.
"""
class MaxRecordsLimitException(RequestedAbortException):
"""Exception indicating the sync aborted due to too many records."""
class AbortedSyncExceptionBase(Exception, metaclass=abc.ABCMeta):
"""Base exception to raise when a stream sync is aborted.
Developers should not raise this directly, and instead should use:
1. `FatalAbortedSyncException` - Indicates the stream aborted abnormally and was not
able to reach a stable and resumable state.
2. `PausedSyncException` - Indicates the stream aborted abnormally and successfully
reached a 'paused' and resumable state.
Notes:
- `FULL_TABLE` sync operations cannot be paused and will always trigger a fatal
exception if aborted.
- `INCREMENTAL` and `LOG_BASED` streams are able to be paused only if a number of
preconditions are met, specifically, `state_partitioning_keys` cannot be
overridden and the stream must be declared with `is_sorted=True`.
"""
class AbortedSyncFailedException(AbortedSyncExceptionBase):
"""Exception to raise when sync is aborted and unable to reach a stable state.
This signifies that `FULL_TABLE` streams (if applicable) were successfully
completed, and any bookmarks from `INCREMENTAL` and `LOG_BASED` streams were
advanced and finalized successfully.
"""
class AbortedSyncPausedException(AbortedSyncExceptionBase):
"""Exception to raise when an aborted sync operation is paused successfully.
This exception indicates the stream aborted abnormally and successfully
reached a 'paused' status, and emitted a resumable state artifact before exiting.
Streams synced with `FULL_TABLE` replication can never have partial success or
'paused' status.
If this exception is raised, this signifies that additional records were left
on the source system and the sync operation aborted before reaching the end of the
stream. This exception signifies that bookmarks from `INCREMENTAL`
and `LOG_BASED` streams were successfully emitted and are resumable.
"""
class RecordsWithoutSchemaException(Exception):
"""Raised if a target receives RECORD messages prior to a SCHEMA message."""
class RetriableAPIError(Exception):
"""Exception raised when a failed request can be safely retried."""
def __init__(self, message: str, response: requests.Response | None = None) -> None:
"""Extends the default with the failed response as an attribute.
Args:
message (str): The Error Message
response (requests.Response): The response object.
"""
super().__init__(message)
self.response = response
class StreamMapConfigError(Exception):
"""Raised when a stream map has an invalid configuration."""
class TapStreamConnectionFailure(Exception):
"""Exception to raise when stream connection fails or stream is disconnected."""
class TooManyRecordsException(Exception):
"""Exception to raise when query returns more records than max_records."""
class ConformedNameClashException(Exception):
"""Raised when name conforming produces clashes.
e.g. two columns conformed to the same name
"""
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/target_base.py | """Target abstract class."""
from __future__ import annotations
import abc
import copy
import json
import sys
import time
from typing import IO, TYPE_CHECKING, Callable, Counter
import click
from joblib import Parallel, delayed, parallel_backend
from singer_sdk.cli import common_options
from singer_sdk.exceptions import RecordsWithoutSchemaException
from singer_sdk.helpers._batch import BaseBatchFileEncoding
from singer_sdk.helpers._classproperty import classproperty
from singer_sdk.helpers._compat import final
from singer_sdk.helpers.capabilities import (
TARGET_SCHEMA_CONFIG,
CapabilitiesEnum,
PluginCapabilities,
TargetCapabilities,
)
from singer_sdk.io_base import SingerMessageType, SingerReader
from singer_sdk.mapper import PluginMapper
from singer_sdk.plugin_base import PluginBase
if TYPE_CHECKING:
from io import FileIO
from pathlib import PurePath
from singer_sdk.sinks import Sink
_MAX_PARALLELISM = 8
class Target(PluginBase, SingerReader, metaclass=abc.ABCMeta):
"""Abstract base class for targets.
The `Target` class manages config information and is responsible for processing the
incoming Singer data stream and orchestrating any needed target `Sink` objects. As
messages are received from the tap, the `Target` class will automatically create
any needed target `Sink` objects and send records along to the appropriate `Sink`
object for that record.
"""
_MAX_RECORD_AGE_IN_MINUTES: float = 5.0
# Default class to use for creating new sink objects.
# Required if `Target.get_sink_class()` is not defined.
default_sink_class: type[Sink] | None = None
def __init__(
self,
*,
config: dict | PurePath | str | list[PurePath | str] | None = None,
parse_env_config: bool = False,
validate_config: bool = True,
) -> None:
"""Initialize the target.
Args:
config: Target configuration. Can be a dictionary, a single path to a
configuration file, or a list of paths to multiple configuration
files.
parse_env_config: Whether to look for configuration values in environment
variables.
validate_config: True to require validation of config settings.
"""
super().__init__(
config=config,
parse_env_config=parse_env_config,
validate_config=validate_config,
)
self._latest_state: dict[str, dict] = {}
self._drained_state: dict[str, dict] = {}
self._sinks_active: dict[str, Sink] = {}
self._sinks_to_clear: list[Sink] = []
self._max_parallelism: int | None = _MAX_PARALLELISM
# Approximated for max record age enforcement
self._last_full_drain_at: float = time.time()
# Initialize mapper
self.mapper: PluginMapper
self.mapper = PluginMapper(
plugin_config=dict(self.config),
logger=self.logger,
)
@classproperty
def capabilities(self) -> list[CapabilitiesEnum]:
"""Get target capabilities.
Returns:
A list of capabilities supported by this target.
"""
return [
PluginCapabilities.ABOUT,
PluginCapabilities.STREAM_MAPS,
PluginCapabilities.FLATTENING,
]
@property
def max_parallelism(self) -> int:
"""Get max parallel sinks.
The default is 8 if not overridden.
Returns:
Max number of sinks that can be drained in parallel.
"""
if self._max_parallelism is not None:
return self._max_parallelism
return _MAX_PARALLELISM
@max_parallelism.setter
def max_parallelism(self, new_value: int) -> None:
"""Override the default (max) parallelism.
The default is 8 if not overridden.
Args:
new_value: The new max degree of parallelism for this target.
"""
self._max_parallelism = new_value
def get_sink(
self,
stream_name: str,
*,
record: dict | None = None,
schema: dict | None = None,
key_properties: list[str] | None = None,
) -> Sink:
"""Return a sink for the given stream name.
A new sink will be created if `schema` is provided and if either `schema` or
`key_properties` has changed. If so, the old sink becomes archived and held
until the next drain_all() operation.
Developers only need to override this method if they want to provide a different
sink depending on the values within the `record` object. Otherwise, please see
`default_sink_class` property and/or the `get_sink_class()` method.
Raises :class:`singer_sdk.exceptions.RecordsWithoutSchemaException` if sink does
not exist and schema is not sent.
Args:
stream_name: Name of the stream.
record: Record being processed.
schema: Stream schema.
key_properties: Primary key of the stream.
Returns:
The sink used for this target.
"""
_ = record # Custom implementations may use record in sink selection.
if schema is None:
self._assert_sink_exists(stream_name)
return self._sinks_active[stream_name]
existing_sink = self._sinks_active.get(stream_name, None)
if not existing_sink:
return self.add_sink(stream_name, schema, key_properties)
if (
existing_sink.schema != schema
or existing_sink.key_properties != key_properties
):
self.logger.info(
"Schema or key properties for '%s' stream have changed. "
"Initializing a new '%s' sink...",
stream_name,
stream_name,
)
self._sinks_to_clear.append(self._sinks_active.pop(stream_name))
return self.add_sink(stream_name, schema, key_properties)
return existing_sink
def get_sink_class(self, stream_name: str) -> type[Sink]:
"""Get sink for a stream.
Developers can override this method to return a custom Sink type depending
on the value of `stream_name`. Optional when `default_sink_class` is set.
Args:
stream_name: Name of the stream.
Raises:
ValueError: If no :class:`singer_sdk.sinks.Sink` class is defined.
Returns:
The sink class to be used with the stream.
"""
if self.default_sink_class:
return self.default_sink_class
raise ValueError(
f"No sink class defined for '{stream_name}' "
"and no default sink class available.",
)
def sink_exists(self, stream_name: str) -> bool:
"""Check sink for a stream.
This method is internal to the SDK and should not need to be overridden.
Args:
stream_name: Name of the stream
Returns:
True if a sink has been initialized.
"""
return stream_name in self._sinks_active
@final
def add_sink(
self,
stream_name: str,
schema: dict,
key_properties: list[str] | None = None,
) -> Sink:
"""Create a sink and register it.
This method is internal to the SDK and should not need to be overridden.
Args:
stream_name: Name of the stream.
schema: Schema of the stream.
key_properties: Primary key of the stream.
Returns:
A new sink for the stream.
"""
self.logger.info("Initializing '%s' target sink...", self.name)
sink_class = self.get_sink_class(stream_name=stream_name)
sink = sink_class(
target=self,
stream_name=stream_name,
schema=schema,
key_properties=key_properties,
)
sink.setup()
self._sinks_active[stream_name] = sink
return sink
def _assert_sink_exists(self, stream_name: str) -> None:
"""Raise a RecordsWithoutSchemaException exception if stream doesn't exist.
Args:
stream_name: TODO
Raises:
RecordsWithoutSchemaException: If sink does not exist and schema
is not sent.
"""
if not self.sink_exists(stream_name):
raise RecordsWithoutSchemaException(
f"A record for stream '{stream_name}' was encountered before a "
"corresponding schema.",
)
# Message handling
def _handle_max_record_age(self) -> None:
"""Check if _MAX_RECORD_AGE_IN_MINUTES reached, and if so trigger drain."""
if self._max_record_age_in_minutes > self._MAX_RECORD_AGE_IN_MINUTES:
self.logger.info(
"One or more records have exceeded the max age of %d minutes. "
"Draining all sinks.",
self._MAX_RECORD_AGE_IN_MINUTES,
)
self.drain_all()
def _process_lines(self, file_input: IO[str]) -> Counter[str]:
"""Internal method to process jsonl lines from a Singer tap.
Args:
file_input: Readable stream of messages, each on a separate line.
Returns:
A counter object for the processed lines.
"""
self.logger.info("Target '%s' is listening for input from tap.", self.name)
counter = super()._process_lines(file_input)
line_count = sum(counter.values())
self.logger.info(
"Target '%s' completed reading %d lines of input "
"(%d records, %d batch manifests, %d state messages).",
self.name,
line_count,
counter[SingerMessageType.RECORD],
counter[SingerMessageType.BATCH],
counter[SingerMessageType.STATE],
)
return counter
def _process_endofpipe(self) -> None:
"""Called after all input lines have been read."""
self.drain_all(is_endofpipe=True)
def _process_record_message(self, message_dict: dict) -> None:
"""Process a RECORD message.
Args:
message_dict: TODO
"""
self._assert_line_requires(message_dict, requires={"stream", "record"})
stream_name = message_dict["stream"]
for stream_map in self.mapper.stream_maps[stream_name]:
raw_record = copy.copy(message_dict["record"])
transformed_record = stream_map.transform(raw_record)
if transformed_record is None:
# Record was filtered out by the map transform
continue
sink = self.get_sink(stream_map.stream_alias, record=transformed_record)
context = sink._get_context(transformed_record)
if sink.include_sdc_metadata_properties:
sink._add_sdc_metadata_to_record(
transformed_record,
message_dict,
context,
)
else:
sink._remove_sdc_metadata_from_record(transformed_record)
sink._validate_and_parse(transformed_record)
sink.tally_record_read()
transformed_record = sink.preprocess_record(transformed_record, context)
sink.process_record(transformed_record, context)
sink._after_process_record(context)
if sink.is_full:
self.logger.info(
"Target sink for '%s' is full. Draining...",
sink.stream_name,
)
self.drain_one(sink)
self._handle_max_record_age()
def _process_schema_message(self, message_dict: dict) -> None:
"""Process a SCHEMA messages.
Args:
message_dict: The newly received schema message.
"""
self._assert_line_requires(message_dict, requires={"stream", "schema"})
self._assert_line_requires(message_dict["schema"], requires={"properties"})
stream_name = message_dict["stream"]
schema = message_dict["schema"]
key_properties = message_dict.get("key_properties", None)
do_registration = False
if stream_name not in self.mapper.stream_maps:
do_registration = True
elif self.mapper.stream_maps[stream_name][0].raw_schema != schema:
self.logger.info(
"Schema has changed for stream '%s'. "
"Mapping definitions will be reset.",
stream_name,
)
do_registration = True
elif (
self.mapper.stream_maps[stream_name][0].raw_key_properties != key_properties
):
self.logger.info(
"Key properties have changed for stream '%s'. "
"Mapping definitions will be reset.",
stream_name,
)
do_registration = True
if not do_registration:
self.logger.debug(
"No changes detected in SCHEMA message for stream '%s'. Ignoring.",
stream_name,
)
return
self.mapper.register_raw_stream_schema(
stream_name,
schema,
key_properties,
)
for stream_map in self.mapper.stream_maps[stream_name]:
_ = self.get_sink(
stream_map.stream_alias,
schema=stream_map.transformed_schema,
key_properties=stream_map.transformed_key_properties,
)
@property
def _max_record_age_in_minutes(self) -> float:
return (time.time() - self._last_full_drain_at) / 60
def _reset_max_record_age(self) -> None:
self._last_full_drain_at = time.time()
def _process_state_message(self, message_dict: dict) -> None:
"""Process a state message. drain sinks if needed.
If state is unchanged, no actions will be taken.
Args:
message_dict: TODO
"""
self._assert_line_requires(message_dict, requires={"value"})
state = message_dict["value"]
if self._latest_state == state:
return
self._latest_state = state
def _process_activate_version_message(self, message_dict: dict) -> None:
"""Handle the optional ACTIVATE_VERSION message extension.
Args:
message_dict: TODO
"""
stream_name = message_dict["stream"]
sink = self.get_sink(stream_name)
sink.activate_version(message_dict["version"])
def _process_batch_message(self, message_dict: dict) -> None:
"""Handle the optional BATCH message extension.
Args:
message_dict: TODO
"""
sink = self.get_sink(message_dict["stream"])
encoding = BaseBatchFileEncoding.from_dict(message_dict["encoding"])
sink.process_batch_files(
encoding,
message_dict["manifest"],
)
self._handle_max_record_age()
# Sink drain methods
@final
def drain_all(self, *, is_endofpipe: bool = False) -> None:
"""Drains all sinks, starting with those cleared due to changed schema.
This method is internal to the SDK and should not need to be overridden.
Args:
is_endofpipe: This is passed by the
:meth:`~singer_sdk.Sink._process_endofpipe()` which
is called after the target instance has finished
listening to the stdin
"""
state = copy.deepcopy(self._latest_state)
self._drain_all(self._sinks_to_clear, 1)
if is_endofpipe:
for sink in self._sinks_to_clear:
sink.clean_up()
self._sinks_to_clear = []
self._drain_all(list(self._sinks_active.values()), self.max_parallelism)
if is_endofpipe:
for sink in self._sinks_active.values():
sink.clean_up()
self._write_state_message(state)
self._reset_max_record_age()
@final
def drain_one(self, sink: Sink) -> None:
"""Drain a specific sink.
This method is internal to the SDK and should not need to be overridden.
Args:
sink: Sink to be drained.
"""
if sink.current_size == 0:
return
draining_status = sink.start_drain()
sink.process_batch(draining_status)
sink.mark_drained()
def _drain_all(self, sink_list: list[Sink], parallelism: int) -> None:
if parallelism == 1:
for sink in sink_list:
self.drain_one(sink)
return
def _drain_sink(sink: Sink) -> None:
self.drain_one(sink)
with parallel_backend("threading", n_jobs=parallelism):
Parallel()(delayed(_drain_sink)(sink=sink) for sink in sink_list)
def _write_state_message(self, state: dict) -> None:
"""Emit the stream's latest state.
Args:
state: TODO
"""
state_json = json.dumps(state)
self.logger.info("Emitting completed target state %s", state_json)
sys.stdout.write(f"{state_json}\n")
sys.stdout.flush()
# CLI handler
@classproperty
def cli(cls) -> Callable: # noqa: N805
"""Execute standard CLI handler for taps.
Returns:
A callable CLI object.
"""
@common_options.PLUGIN_VERSION
@common_options.PLUGIN_ABOUT
@common_options.PLUGIN_ABOUT_FORMAT
@common_options.PLUGIN_CONFIG
@common_options.PLUGIN_FILE_INPUT
@click.command(
help="Execute the Singer target.",
context_settings={"help_option_names": ["--help"]},
)
def cli(
*,
version: bool = False,
about: bool = False,
config: tuple[str, ...] = (),
about_format: str | None = None,
file_input: FileIO | None = None,
) -> None:
"""Handle command line execution.
Args:
version: Display the package version.
about: Display package metadata and settings.
about_format: Specify output style for `--about`.
config: Configuration file location or 'ENV' to use environment
variables. Accepts multiple inputs as a tuple.
file_input: Specify a path to an input file to read messages from.
Defaults to standard in if unspecified.
"""
if version:
cls.print_version()
return
if not about:
cls.print_version(print_fn=cls.logger.info)
else:
cls.print_about(output_format=about_format)
return
validate_config: bool = True
cls.print_version(print_fn=cls.logger.info)
config_files, parse_env_config = cls.config_from_cli_args(*config)
target = cls( # type: ignore[operator]
config=config_files or None,
parse_env_config=parse_env_config,
validate_config=validate_config,
)
target.listen(file_input)
return cli
class SQLTarget(Target):
"""Target implementation for SQL destinations."""
@classproperty
def capabilities(self) -> list[CapabilitiesEnum]:
"""Get target capabilities.
Returns:
A list of capabilities supported by this target.
"""
sql_target_capabilities: list[CapabilitiesEnum] = super().capabilities
sql_target_capabilities.extend([TargetCapabilities.TARGET_SCHEMA])
return sql_target_capabilities
@classmethod
def append_builtin_config(cls: type[SQLTarget], config_jsonschema: dict) -> None:
"""Appends built-in config to `config_jsonschema` if not already set.
To customize or disable this behavior, developers may either override this class
method or override the `capabilities` property to disabled any unwanted
built-in capabilities.
For all except very advanced use cases, we recommend leaving these
implementations "as-is", since this provides the most choice to users and is
the most "future proof" in terms of taking advantage of built-in capabilities
which may be added in the future.
Args:
config_jsonschema: [description]
"""
def _merge_missing(source_jsonschema: dict, target_jsonschema: dict) -> None:
# Append any missing properties in the target with those from source.
for k, v in source_jsonschema["properties"].items():
if k not in target_jsonschema["properties"]:
target_jsonschema["properties"][k] = v
capabilities = cls.capabilities
if TargetCapabilities.TARGET_SCHEMA in capabilities:
_merge_missing(TARGET_SCHEMA_CONFIG, config_jsonschema)
super().append_builtin_config(config_jsonschema)
pass
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/pagination.py | """Generic paginator classes."""
from __future__ import annotations
import sys
from abc import ABCMeta, abstractmethod
from typing import TYPE_CHECKING, Any, Generic, Iterable, Optional, TypeVar
from urllib.parse import ParseResult, urlparse
from singer_sdk.helpers.jsonpath import extract_jsonpath
if sys.version_info >= (3, 8):
from typing import Protocol
else:
from typing_extensions import Protocol
if TYPE_CHECKING:
from requests import Response
T = TypeVar("T")
TPageToken = TypeVar("TPageToken")
def first(iterable: Iterable[T]) -> T:
"""Return the first element of an iterable or raise an exception.
Args:
iterable: An iterable.
Returns:
The first element of the iterable.
>>> first('ABC')
'A'
"""
return next(iter(iterable))
class BaseAPIPaginator(Generic[TPageToken], metaclass=ABCMeta):
"""An API paginator object."""
def __init__(self, start_value: TPageToken) -> None:
"""Create a new paginator.
Args:
start_value: Initial value.
"""
self._value: TPageToken = start_value
self._page_count = 0
self._finished = False
self._last_seen_record: dict | None = None
@property
def current_value(self) -> TPageToken:
"""Get the current pagination value.
Returns:
Current page value.
"""
return self._value
@property
def finished(self) -> bool:
"""Get a flag that indicates if the last page of data has been reached.
Returns:
True if there are no more pages.
"""
return self._finished
@property
def count(self) -> int:
"""Count the number of pages traversed so far.
Returns:
Number of pages.
"""
return self._page_count
def __str__(self) -> str:
"""Stringify this object.
Returns:
String representation.
"""
return f"{self.__class__.__name__}<{self.current_value}>"
def __repr__(self) -> str:
"""Stringify this object.
Returns:
String representation.
"""
return str(self)
def advance(self, response: Response) -> None:
"""Get a new page value and advance the current one.
Args:
response: API response object.
Raises:
RuntimeError: If a loop in pagination is detected. That is, when two
consecutive pagination tokens are identical.
"""
self._page_count += 1
if not self.has_more(response):
self._finished = True
return
new_value = self.get_next(response)
if new_value and new_value == self._value:
raise RuntimeError(
f"Loop detected in pagination. "
f"Pagination token {new_value} is identical to prior token.",
)
# Stop if new value None, empty string, 0, etc.
if not new_value:
self._finished = True
else:
self._value = new_value
def has_more(self, response: Response) -> bool: # noqa: ARG002
"""Override this method to check if the endpoint has any pages left.
Args:
response: API response object.
Returns:
Boolean flag used to indicate if the endpoint has more pages.
"""
return True
@abstractmethod
def get_next(self, response: Response) -> TPageToken | None:
"""Get the next pagination token or index from the API response.
Args:
response: API response object.
Returns:
The next page token or index. Return `None` from this method to indicate
the end of pagination.
"""
...
class SinglePagePaginator(BaseAPIPaginator[None]):
"""A paginator that does works with single-page endpoints."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Create a new paginator.
Args:
args: Paginator positional arguments for base class.
kwargs: Paginator keyword arguments for base class.
"""
super().__init__(None, *args, **kwargs)
def get_next(self, response: Response) -> None: # noqa: ARG002
"""Get the next pagination token or index from the API response.
Args:
response: API response object.
Returns:
The next page token or index. Return `None` from this method to indicate
the end of pagination.
"""
return
class BaseHATEOASPaginator(BaseAPIPaginator[Optional[ParseResult]], metaclass=ABCMeta):
"""Paginator class for APIs supporting HATEOAS links in their response bodies.
HATEOAS stands for "Hypermedia as the Engine of Application State". See
https://en.wikipedia.org/wiki/HATEOAS.
This paginator expects responses to have a key "next" with a value
like "https://api.com/link/to/next-item".
The :attr:`~singer_sdk.pagination.BaseAPIPaginator.current_value` attribute of
this paginator is a `urllib.parse.ParseResult`_ object. This object
contains the following attributes:
- scheme
- netloc
- path
- params
- query
- fragment
That means you can access and parse the query params in your stream like this:
.. code-block:: python
class MyHATEOASPaginator(BaseHATEOASPaginator):
def get_next_url(self, response):
return response.json().get("next")
class MyStream(Stream):
def get_new_paginator(self):
return MyHATEOASPaginator()
def get_url_params(self, next_page_token) -> dict:
if next_page_token:
return dict(parse_qsl(next_page_token.query))
return {}
.. _`urllib.parse.ParseResult`:
https://docs.python.org/3/library/urllib.parse.html#urllib.parse.urlparse
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Create a new paginator.
Args:
args: Paginator positional arguments for base class.
kwargs: Paginator keyword arguments for base class.
"""
super().__init__(None, *args, **kwargs)
@abstractmethod
def get_next_url(self, response: Response) -> str | None:
"""Override this method to extract a HATEOAS link from the response.
Args:
response: API response object.
"""
...
def get_next(self, response: Response) -> ParseResult | None:
"""Get the next pagination token or index from the API response.
Args:
response: API response object.
Returns:
A parsed HATEOAS link if the response has one, otherwise `None`.
"""
next_url = self.get_next_url(response)
return urlparse(next_url) if next_url else None
class HeaderLinkPaginator(BaseHATEOASPaginator):
"""Paginator class for APIs supporting HATEOAS links in their headers.
Links:
- https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Link
- https://datatracker.ietf.org/doc/html/rfc8288#section-3
"""
def get_next_url(self, response: Response) -> str | None:
"""Override this method to extract a HATEOAS link from the response.
Args:
response: API response object.
Returns:
A HATEOAS link parsed from the response headers.
"""
url: str | None = response.links.get("next", {}).get("url")
return url
class JSONPathPaginator(BaseAPIPaginator[Optional[str]]):
"""Paginator class for APIs returning a pagination token in the response body."""
def __init__(
self,
jsonpath: str,
*args: Any,
**kwargs: Any,
) -> None:
"""Create a new paginator.
Args:
jsonpath: A JSONPath expression.
args: Paginator positional arguments for base class.
kwargs: Paginator keyword arguments for base class.
"""
super().__init__(None, *args, **kwargs)
self._jsonpath = jsonpath
def get_next(self, response: Response) -> str | None:
"""Get the next page token.
Args:
response: API response object.
Returns:
The next page token.
"""
all_matches = extract_jsonpath(self._jsonpath, response.json())
return next(all_matches, None)
class SimpleHeaderPaginator(BaseAPIPaginator[Optional[str]]):
"""Paginator class for APIs returning a pagination token in the response headers."""
def __init__(
self,
key: str,
*args: Any,
**kwargs: Any,
) -> None:
"""Create a new paginator.
Args:
key: Header key that contains the next page token.
args: Paginator positional arguments for base class.
kwargs: Paginator keyword arguments for base class.
"""
super().__init__(None, *args, **kwargs)
self._key = key
def get_next(self, response: Response) -> str | None:
"""Get the next page token.
Args:
response: API response object.
Returns:
The next page token.
"""
return response.headers.get(self._key, None)
class BasePageNumberPaginator(BaseAPIPaginator[int], metaclass=ABCMeta):
"""Paginator class for APIs that use page number."""
@abstractmethod
def has_more(self, response: Response) -> bool:
"""Override this method to check if the endpoint has any pages left.
Args:
response: API response object.
Returns:
Boolean flag used to indicate if the endpoint has more pages.
"""
...
def get_next(self, response: Response) -> int | None: # noqa: ARG002
"""Get the next page number.
Args:
response: API response object.
Returns:
The next page number.
"""
return self._value + 1
class BaseOffsetPaginator(BaseAPIPaginator[int], metaclass=ABCMeta):
"""Paginator class for APIs that use page offset."""
def __init__(
self,
start_value: int,
page_size: int,
*args: Any,
**kwargs: Any,
) -> None:
"""Create a new paginator.
Args:
start_value: Initial value.
page_size: Constant page size.
args: Paginator positional arguments.
kwargs: Paginator keyword arguments.
"""
super().__init__(start_value, *args, **kwargs)
self._page_size = page_size
@abstractmethod
def has_more(self, response: Response) -> bool:
"""Override this method to check if the endpoint has any pages left.
Args:
response: API response object.
Returns:
Boolean flag used to indicate if the endpoint has more pages.
"""
...
def get_next(self, response: Response) -> int | None: # noqa: ARG002
"""Get the next page offset.
Args:
response: API response object.
Returns:
The next page offset.
"""
return self._value + self._page_size
class LegacyPaginatedStreamProtocol(Protocol[TPageToken]):
"""Protocol for legacy paginated streams classes."""
def get_next_page_token(
self,
response: Response,
previous_token: TPageToken | None,
) -> TPageToken | None:
"""Get the next page token.
Args:
response: API response object.
previous_token: Previous page token.
"""
... # pragma: no cover
class LegacyStreamPaginator(
BaseAPIPaginator[Optional[TPageToken]],
Generic[TPageToken],
):
"""Paginator that works with REST streams as they exist today."""
def __init__(
self,
stream: LegacyPaginatedStreamProtocol[TPageToken],
*args: Any,
**kwargs: Any,
) -> None:
"""Create a new paginator.
Args:
stream: A RESTStream instance.
args: Paginator positional arguments for base class.
kwargs: Paginator keyword arguments for base class.
"""
super().__init__(None, *args, **kwargs)
self.stream = stream
def get_next(self, response: Response) -> TPageToken | None:
"""Get next page value by calling the stream method.
Args:
response: API response object.
Returns:
The next page token or index. Return `None` from this method to indicate
the end of pagination.
"""
return self.stream.get_next_page_token(response, self.current_value)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/typing.py | """Classes and functions to streamline JSONSchema typing.
Usage example:
--------------
.. code-block:: python
jsonschema = PropertiesList(
Property("username", StringType, required=True),
Property("password", StringType, required=True, secret=True),
Property("id", IntegerType, required=True),
Property("foo_or_bar", StringType, allowed_values=["foo", "bar"]),
Property(
"permissions",
ArrayType(
StringType(
allowed_values=["create", "delete", "insert", "update"],
examples=["insert", "update"],
),
),
),
Property("ratio", NumberType, examples=[0.25, 0.75, 1.0]),
Property("days_active", IntegerType),
Property("updated_on", DateTimeType),
Property("is_deleted", BooleanType),
Property(
"author",
ObjectType(
Property("id", StringType),
Property("name", StringType),
)
),
Property("tags", ArrayType(StringType)),
Property(
"groups",
ArrayType(
ObjectType(
Property("id", StringType),
Property("name", StringType),
)
)
),
).to_dict()
Note:
-----
- These helpers are designed to output json in the traditional Singer dialect.
- Due to the expansive set of capabilities within the JSONSchema spec, there may be
other valid implementations which are not syntactically identical to those generated
here.
"""
from __future__ import annotations
import json
from typing import (
TYPE_CHECKING,
Any,
Callable,
Generator,
Generic,
ItemsView,
Mapping,
MutableMapping,
TypeVar,
Union,
cast,
)
import sqlalchemy
from jsonschema import ValidationError, Validator, validators
from singer_sdk.helpers._typing import (
JSONSCHEMA_ANNOTATION_SECRET,
JSONSCHEMA_ANNOTATION_WRITEONLY,
append_type,
get_datelike_property_type,
)
if TYPE_CHECKING:
import sys
if sys.version_info >= (3, 10):
from typing import TypeAlias
else:
from typing_extensions import TypeAlias
__all__ = [
"extend_validator_with_defaults",
"to_jsonschema_type",
"to_sql_type",
"JSONTypeHelper",
"StringType",
"DateTimeType",
"TimeType",
"DateType",
"DurationType",
"EmailType",
"HostnameType",
"IPv4Type",
"IPv6Type",
"UUIDType",
"URIType",
"URIReferenceType",
"URITemplateType",
"JSONPointerType",
"RelativeJSONPointerType",
"RegexType",
"BooleanType",
"IntegerType",
"NumberType",
"ArrayType",
"Property",
"ObjectType",
"CustomType",
"PropertiesList",
]
_JsonValue: TypeAlias = Union[
str,
int,
float,
bool,
list,
dict,
None,
]
T = TypeVar("T", bound=_JsonValue)
P = TypeVar("P")
def extend_validator_with_defaults(validator_class): # noqa: ANN001, ANN201
"""Fill in defaults, before validating with the provided JSON Schema Validator.
See
https://python-jsonschema.readthedocs.io/en/latest/faq/#why-doesn-t-my-schema-s-default-property-set-the-default-on-my-instance
for details.
Args:
validator_class: The JSON Schema Validator class to extend.
Returns:
The extended JSON Schema Validator class.
"""
validate_properties = validator_class.VALIDATORS["properties"]
def set_defaults(
validator: Validator,
properties: Mapping[str, dict],
instance: MutableMapping[str, Any],
schema: dict,
) -> Generator[ValidationError, None, None]:
for prop, subschema in properties.items():
if "default" in subschema:
instance.setdefault(prop, subschema["default"])
yield from validate_properties(
validator,
properties,
instance,
schema,
)
return validators.extend(
validator_class,
{"properties": set_defaults},
)
class DefaultInstanceProperty:
"""Property of default instance.
Descriptor similar to ``property`` that decorates an instance method to retrieve
a property from the instance initialized with default parameters, if the called on
the class.
"""
def __init__(self, fget: Callable) -> None:
"""Initialize the decorator.
Args:
fget: The function to decorate.
"""
self.fget = fget
def __get__(self, instance: P, owner: type[P]) -> Any: # noqa: ANN401
"""Get the property value.
Args:
instance: The instance to get the property value from.
owner: The class to get the property value from.
Returns:
The property value.
"""
if instance is None:
instance = owner()
return self.fget(instance)
class JSONTypeHelper(Generic[T]):
"""Type helper base class for JSONSchema types."""
def __init__(
self,
*,
allowed_values: list[T] | None = None,
examples: list[T] | None = None,
) -> None:
"""Initialize the type helper.
Args:
allowed_values: A list of allowed values.
examples: A list of example values.
"""
self.allowed_values = allowed_values
self.examples = examples
@DefaultInstanceProperty
def type_dict(self) -> dict:
"""Return dict describing the type.
Raises:
NotImplementedError: If the derived class does not override this method.
"""
raise NotImplementedError
@property
def extras(self) -> dict:
"""Return dict describing the JSON Schema extras.
Returns:
A dictionary containing the JSON Schema extras.
"""
result = {}
if self.allowed_values:
result["enum"] = self.allowed_values
if self.examples:
result["examples"] = self.examples
return result
def to_dict(self) -> dict:
"""Convert to dictionary.
Returns:
A JSON Schema dictionary describing the object.
"""
return self.type_dict # type: ignore[no-any-return]
def to_json(self, **kwargs: Any) -> str:
"""Convert to JSON.
Args:
kwargs: Additional keyword arguments to pass to json.dumps().
Returns:
A JSON string describing the object.
"""
return json.dumps(self.to_dict(), **kwargs)
class StringType(JSONTypeHelper[str]):
"""String type.
Examples:
>>> StringType.type_dict
{'type': ['string']}
>>> StringType().type_dict
{'type': ['string']}
>>> StringType(allowed_values=["a", "b"]).type_dict
{'type': ['string'], 'enum': ['a', 'b']}
"""
string_format: str | None = None
"""String format.
See the `formats built into the JSON Schema specification`_.
Returns:
A string describing the format.
.. _`formats built into the JSON Schema specification`:
https://json-schema.org/understanding-json-schema/reference/string.html#built-in-formats
"""
@property
def _format(self) -> dict:
return {"format": self.string_format} if self.string_format else {}
@DefaultInstanceProperty
def type_dict(self) -> dict:
"""Get type dictionary.
Returns:
A dictionary describing the type.
"""
return {
"type": ["string"],
**self._format,
**self.extras,
}
class DateTimeType(StringType):
"""DateTime type.
Example: `2018-11-13T20:20:39+00:00`
"""
string_format = "date-time"
class TimeType(StringType):
"""Time type.
Example: `20:20:39+00:00`
"""
string_format = "time"
class DateType(StringType):
"""Date type.
Example: `2018-11-13`
"""
string_format = "date"
class DurationType(StringType):
"""Duration type.
Example: `P3D`
"""
string_format = "duration"
class EmailType(StringType):
"""Email type."""
string_format = "email"
class HostnameType(StringType):
"""Hostname type."""
string_format = "hostname"
class IPv4Type(StringType):
"""IPv4 address type."""
string_format = "ipv4"
class IPv6Type(StringType):
"""IPv6 type."""
string_format = "ipv6"
class UUIDType(StringType):
"""UUID type.
Example: `3e4666bf-d5e5-4aa7-b8ce-cefe41c7568a`
"""
string_format = "uuid"
class URIType(StringType):
"""URI type."""
string_format = "uri"
class URIReferenceType(StringType):
"""URIReference type."""
string_format = "uri-reference"
class URITemplateType(StringType):
"""URITemplate type."""
string_format = "uri-template"
class JSONPointerType(StringType):
"""JSONPointer type."""
string_format = "json-pointer"
class RelativeJSONPointerType(StringType):
"""RelativeJSONPointer type."""
string_format = "relative-json-pointer"
class RegexType(StringType):
"""Regex type."""
string_format = "regex"
class BooleanType(JSONTypeHelper[bool]):
"""Boolean type.
Examples:
>>> BooleanType.type_dict
{'type': ['boolean']}
>>> BooleanType().type_dict
{'type': ['boolean']}
"""
@DefaultInstanceProperty
def type_dict(self) -> dict:
"""Get type dictionary.
Returns:
A dictionary describing the type.
"""
return {"type": ["boolean"], **self.extras}
class IntegerType(JSONTypeHelper):
"""Integer type.
Examples:
>>> IntegerType.type_dict
{'type': ['integer']}
>>> IntegerType().type_dict
{'type': ['integer']}
>>> IntegerType(allowed_values=[1, 2]).type_dict
{'type': ['integer'], 'enum': [1, 2]}
"""
@DefaultInstanceProperty
def type_dict(self) -> dict:
"""Get type dictionary.
Returns:
A dictionary describing the type.
"""
return {"type": ["integer"], **self.extras}
class NumberType(JSONTypeHelper[float]):
"""Number type.
Examples:
>>> NumberType.type_dict
{'type': ['number']}
>>> NumberType().type_dict
{'type': ['number']}
>>> NumberType(allowed_values=[1.0, 2.0]).type_dict
{'type': ['number'], 'enum': [1.0, 2.0]}
"""
@DefaultInstanceProperty
def type_dict(self) -> dict:
"""Get type dictionary.
Returns:
A dictionary describing the type.
"""
return {"type": ["number"], **self.extras}
W = TypeVar("W", bound=JSONTypeHelper)
class ArrayType(JSONTypeHelper[list], Generic[W]):
"""Array type."""
def __init__(self, wrapped_type: W | type[W], **kwargs: Any) -> None:
"""Initialize Array type with wrapped inner type.
Args:
wrapped_type: JSON Schema item type inside the array.
**kwargs: Additional keyword arguments to pass to the parent class.
"""
self.wrapped_type = wrapped_type
super().__init__(**kwargs)
@property
def type_dict(self) -> dict: # type: ignore[override]
"""Get type dictionary.
Returns:
A dictionary describing the type.
"""
return {"type": "array", "items": self.wrapped_type.type_dict, **self.extras}
class Property(JSONTypeHelper[T], Generic[T]):
"""Generic Property. Should be nested within a `PropertiesList`."""
# TODO: Make some of these arguments keyword-only. This is a breaking change.
def __init__(
self,
name: str,
wrapped: JSONTypeHelper[T] | type[JSONTypeHelper[T]],
required: bool = False, # noqa: FBT001, FBT002
default: T | None = None,
description: str | None = None,
secret: bool | None = False, # noqa: FBT002
allowed_values: list[T] | None = None,
examples: list[T] | None = None,
) -> None:
"""Initialize Property object.
Note: Properties containing secrets should be specified with `secret=True`.
Doing so will add the annotation `writeOnly=True`, in accordance with JSON
Schema Draft 7 and later, and `secret=True` as an additional hint to readers.
More info: https://json-schema.org/draft-07/json-schema-release-notes.html
Args:
name: Property name.
wrapped: JSON Schema type of the property.
required: Whether this is a required property.
default: Default value in the JSON Schema.
description: Long-text property description.
secret: True if this is a credential or other secret.
allowed_values: A list of allowed value options, if only specific values
are permitted. This will define the type as an 'enum'.
examples: Optional. A list of one or more sample values. These may be
displayed to the user as hints of the expected format of inputs.
"""
self.name = name
self.wrapped = wrapped
self.optional = not required
self.default = default
self.description = description
self.secret = secret
self.allowed_values = allowed_values or None
self.examples = examples or None
@property
def type_dict(self) -> dict: # type: ignore[override]
"""Get type dictionary.
Returns:
A dictionary describing the type.
Raises:
ValueError: If the type dict is not valid.
"""
wrapped = self.wrapped
if isinstance(wrapped, type) and not isinstance(wrapped.type_dict, Mapping):
raise ValueError(
f"Type dict for {wrapped} is not defined. "
"Try instantiating it with a nested type such as "
f"{wrapped.__name__}(StringType).",
)
return cast(dict, wrapped.type_dict)
def to_dict(self) -> dict:
"""Return a dict mapping the property name to its definition.
Returns:
A JSON Schema dictionary describing the object.
"""
type_dict = self.type_dict
if self.optional:
type_dict = append_type(type_dict, "null")
if self.default is not None:
type_dict.update({"default": self.default})
if self.description:
type_dict.update({"description": self.description})
if self.secret:
type_dict.update(
{
JSONSCHEMA_ANNOTATION_SECRET: True,
JSONSCHEMA_ANNOTATION_WRITEONLY: True,
},
)
if self.allowed_values:
type_dict.update({"enum": self.allowed_values})
if self.examples:
type_dict.update({"examples": self.examples})
return {self.name: type_dict}
class ObjectType(JSONTypeHelper):
"""Object type, which wraps one or more named properties."""
def __init__(
self,
*properties: Property,
additional_properties: W | type[W] | bool | None = None,
pattern_properties: Mapping[str, W | type[W]] | None = None,
**kwargs: Any,
) -> None:
"""Initialize ObjectType from its list of properties.
Args:
properties: Zero or more attributes for this JSON object.
additional_properties: A schema to match against unnamed properties in
this object, or a boolean indicating if extra properties are allowed.
pattern_properties: A dictionary of regex patterns to match against
property names, and the schema to match against the values.
**kwargs: Additional keyword arguments to pass to the `JSONTypeHelper`.
Examples:
>>> t = ObjectType(
... Property("name", StringType, required=True),
... Property("age", IntegerType),
... Property("height", NumberType),
... additional_properties=False,
... )
>>> print(t.to_json(indent=2))
{
"type": "object",
"properties": {
"name": {
"type": [
"string"
]
},
"age": {
"type": [
"integer",
"null"
]
},
"height": {
"type": [
"number",
"null"
]
}
},
"required": [
"name"
],
"additionalProperties": false
}
>>> t = ObjectType(
... Property("name", StringType, required=True),
... Property("age", IntegerType),
... Property("height", NumberType),
... additional_properties=StringType,
... )
>>> print(t.to_json(indent=2))
{
"type": "object",
"properties": {
"name": {
"type": [
"string"
]
},
"age": {
"type": [
"integer",
"null"
]
},
"height": {
"type": [
"number",
"null"
]
}
},
"required": [
"name"
],
"additionalProperties": {
"type": [
"string"
]
}
}
"""
self.wrapped: dict[str, Property] = {prop.name: prop for prop in properties}
self.additional_properties = additional_properties
self.pattern_properties = pattern_properties
super().__init__(**kwargs)
@property
def type_dict(self) -> dict: # type: ignore[override]
"""Get type dictionary.
Returns:
A dictionary describing the type.
"""
merged_props = {}
required = []
for w in self.wrapped.values():
merged_props.update(w.to_dict())
if not w.optional:
required.append(w.name)
result: dict[str, Any] = {"type": "object", "properties": merged_props}
if required:
result["required"] = required
if self.additional_properties is not None:
if isinstance(self.additional_properties, bool):
result["additionalProperties"] = self.additional_properties
else:
result["additionalProperties"] = self.additional_properties.type_dict
if self.pattern_properties:
result["patternProperties"] = {
k: v.type_dict for k, v in self.pattern_properties.items()
}
return result
class CustomType(JSONTypeHelper):
"""Accepts an arbitrary JSON Schema dictionary."""
def __init__(self, jsonschema_type_dict: dict) -> None:
"""Initialize JSONTypeHelper by importing an existing JSON Schema type.
Args:
jsonschema_type_dict: TODO
"""
self._jsonschema_type_dict = jsonschema_type_dict
@property
def type_dict(self) -> dict: # type: ignore[override]
"""Get type dictionary.
Returns:
A dictionary describing the type.
"""
return self._jsonschema_type_dict
class PropertiesList(ObjectType):
"""Properties list. A convenience wrapper around the ObjectType class."""
def items(self) -> ItemsView[str, Property]:
"""Get wrapped properties.
Returns:
List of (name, property) tuples.
"""
return self.wrapped.items()
def append(self, property: Property) -> None: # noqa: A002
"""Append a property to the property list.
Args:
property: Property to add
"""
self.wrapped[property.name] = property
def to_jsonschema_type(
from_type: str | sqlalchemy.types.TypeEngine | type[sqlalchemy.types.TypeEngine],
) -> dict:
"""Return the JSON Schema dict that describes the sql type.
Args:
from_type: The SQL type as a string or as a TypeEngine. If a TypeEngine is
provided, it may be provided as a class or a specific object instance.
Raises:
ValueError: If the `from_type` value is not of type `str` or `TypeEngine`.
Returns:
A compatible JSON Schema type definition.
"""
sqltype_lookup: dict[str, dict] = {
# NOTE: This is an ordered mapping, with earlier mappings taking precedence.
# If the SQL-provided type contains the type name on the left, the mapping
# will return the respective singer type.
"timestamp": DateTimeType.type_dict,
"datetime": DateTimeType.type_dict,
"date": DateType.type_dict,
"int": IntegerType.type_dict,
"number": NumberType.type_dict,
"decimal": NumberType.type_dict,
"double": NumberType.type_dict,
"float": NumberType.type_dict,
"string": StringType.type_dict,
"text": StringType.type_dict,
"char": StringType.type_dict,
"bool": BooleanType.type_dict,
"variant": StringType.type_dict,
}
if isinstance(from_type, str):
type_name = from_type
elif isinstance(from_type, sqlalchemy.types.TypeEngine):
type_name = type(from_type).__name__
elif isinstance(from_type, type) and issubclass(
from_type,
sqlalchemy.types.TypeEngine,
):
type_name = from_type.__name__
else:
raise ValueError("Expected `str` or a SQLAlchemy `TypeEngine` object or type.")
# Look for the type name within the known SQL type names:
for sqltype, jsonschema_type in sqltype_lookup.items():
if sqltype.lower() in type_name.lower():
return jsonschema_type
return sqltype_lookup["string"] # safe failover to str
def _jsonschema_type_check(jsonschema_type: dict, type_check: tuple[str]) -> bool:
"""Return True if the jsonschema_type supports the provided type.
Args:
jsonschema_type: The type dict.
type_check: A tuple of type strings to look for.
Returns:
True if the schema suports the type.
"""
if "type" in jsonschema_type:
if isinstance(jsonschema_type["type"], (list, tuple)):
for t in jsonschema_type["type"]:
if t in type_check:
return True
else:
if jsonschema_type.get("type") in type_check: # noqa: PLR5501
return True
if any(t in type_check for t in jsonschema_type.get("anyOf", ())):
return True
return False
def to_sql_type(jsonschema_type: dict) -> sqlalchemy.types.TypeEngine: # noqa: PLR0911
"""Convert JSON Schema type to a SQL type.
Args:
jsonschema_type: The JSON Schema object.
Returns:
The SQL type.
"""
if _jsonschema_type_check(jsonschema_type, ("string",)):
datelike_type = get_datelike_property_type(jsonschema_type)
if datelike_type:
if datelike_type == "date-time":
return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.DATETIME())
if datelike_type in "time":
return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.TIME())
if datelike_type == "date":
return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.DATE())
maxlength = jsonschema_type.get("maxLength")
return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.VARCHAR(maxlength))
if _jsonschema_type_check(jsonschema_type, ("integer",)):
return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.INTEGER())
if _jsonschema_type_check(jsonschema_type, ("number",)):
return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.DECIMAL())
if _jsonschema_type_check(jsonschema_type, ("boolean",)):
return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.BOOLEAN())
if _jsonschema_type_check(jsonschema_type, ("object",)):
return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.VARCHAR())
if _jsonschema_type_check(jsonschema_type, ("array",)):
return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.VARCHAR())
return cast(sqlalchemy.types.TypeEngine, sqlalchemy.types.VARCHAR())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/connectors/__init__.py | """Module for SQL-related operations."""
from __future__ import annotations
from .sql import SQLConnector
__all__ = ["SQLConnector"]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/connectors/sql.py | """Common SQL connectors for Streams and Sinks."""
from __future__ import annotations
import logging
import warnings
from contextlib import contextmanager
from datetime import datetime
from functools import lru_cache
from typing import TYPE_CHECKING, Any, Iterable, Iterator, cast
import sqlalchemy
from sqlalchemy.engine import Engine
from singer_sdk import typing as th
from singer_sdk._singerlib import CatalogEntry, MetadataMapping, Schema
from singer_sdk.exceptions import ConfigValidationError
if TYPE_CHECKING:
from sqlalchemy.engine.reflection import Inspector
class SQLConnector:
"""Base class for SQLAlchemy-based connectors.
The connector class serves as a wrapper around the SQL connection.
The functions of the connector are:
- connecting to the source
- generating SQLAlchemy connection and engine objects
- discovering schema catalog entries
- performing type conversions to/from JSONSchema types
- dialect-specific functions, such as escaping and fully qualified names
"""
allow_column_add: bool = True # Whether ADD COLUMN is supported.
allow_column_rename: bool = True # Whether RENAME COLUMN is supported.
allow_column_alter: bool = False # Whether altering column types is supported.
allow_merge_upsert: bool = False # Whether MERGE UPSERT is supported.
allow_temp_tables: bool = True # Whether temp tables are supported.
_cached_engine: Engine | None = None
def __init__(
self,
config: dict | None = None,
sqlalchemy_url: str | None = None,
) -> None:
"""Initialize the SQL connector.
Args:
config: The parent tap or target object's config.
sqlalchemy_url: Optional URL for the connection.
"""
self._config: dict[str, Any] = config or {}
self._sqlalchemy_url: str | None = sqlalchemy_url or None
@property
def config(self) -> dict:
"""If set, provides access to the tap or target config.
Returns:
The settings as a dict.
"""
return self._config
@property
def logger(self) -> logging.Logger:
"""Get logger.
Returns:
Plugin logger.
"""
return logging.getLogger("sqlconnector")
@contextmanager
def _connect(self) -> Iterator[sqlalchemy.engine.Connection]:
with self._engine.connect().execution_options(stream_results=True) as conn:
yield conn
def create_sqlalchemy_connection(self) -> sqlalchemy.engine.Connection:
"""(DEPRECATED) Return a new SQLAlchemy connection using the provided config.
Do not use the SQLConnector's connection directly. Instead, if you need
to execute something that isn't available on the connector currently,
make a child class and add a method on that connector.
By default this will create using the sqlalchemy `stream_results=True` option
described here:
https://docs.sqlalchemy.org/en/14/core/connections.html#using-server-side-cursors-a-k-a-stream-results
Developers may override this method if their provider does not support
server side cursors (`stream_results`) or in order to use different
configurations options when creating the connection object.
Returns:
A newly created SQLAlchemy engine object.
"""
warnings.warn(
"`SQLConnector.create_sqlalchemy_connection` is deprecated. "
"If you need to execute something that isn't available "
"on the connector currently, make a child class and "
"add your required method on that connector.",
DeprecationWarning,
stacklevel=2,
)
return self._engine.connect().execution_options(stream_results=True)
def create_sqlalchemy_engine(self) -> Engine:
"""(DEPRECATED) Return a new SQLAlchemy engine using the provided config.
Developers can generally override just one of the following:
`sqlalchemy_engine`, sqlalchemy_url`.
Returns:
A newly created SQLAlchemy engine object.
"""
warnings.warn(
"`SQLConnector.create_sqlalchemy_engine` is deprecated. Override"
"`_engine` or sqlalchemy_url` instead.",
DeprecationWarning,
stacklevel=2,
)
return self._engine
@property
def connection(self) -> sqlalchemy.engine.Connection:
"""(DEPRECATED) Return or set the SQLAlchemy connection object.
Do not use the SQLConnector's connection directly. Instead, if you need
to execute something that isn't available on the connector currently,
make a child class and add a method on that connector.
Returns:
The active SQLAlchemy connection object.
"""
warnings.warn(
"`SQLConnector.connection` is deprecated. If you need to execute something "
"that isn't available on the connector currently, make a child "
"class and add your required method on that connector.",
DeprecationWarning,
stacklevel=2,
)
return self.create_sqlalchemy_connection()
@property
def sqlalchemy_url(self) -> str:
"""Return the SQLAlchemy URL string.
Returns:
The URL as a string.
"""
if not self._sqlalchemy_url:
self._sqlalchemy_url = self.get_sqlalchemy_url(self.config)
return self._sqlalchemy_url
def get_sqlalchemy_url(self, config: dict[str, Any]) -> str:
"""Return the SQLAlchemy URL string.
Developers can generally override just one of the following:
`sqlalchemy_engine`, `get_sqlalchemy_url`.
Args:
config: A dictionary of settings from the tap or target config.
Returns:
The URL as a string.
Raises:
ConfigValidationError: If no valid sqlalchemy_url can be found.
"""
if "sqlalchemy_url" not in config:
raise ConfigValidationError(
"Could not find or create 'sqlalchemy_url' for connection.",
)
return cast(str, config["sqlalchemy_url"])
@staticmethod
def to_jsonschema_type(
sql_type: (
str | sqlalchemy.types.TypeEngine | type[sqlalchemy.types.TypeEngine] | Any
),
) -> dict:
"""Return a JSON Schema representation of the provided type.
By default will call `typing.to_jsonschema_type()` for strings and SQLAlchemy
types.
Developers may override this method to accept additional input argument types,
to support non-standard types, or to provide custom typing logic.
Args:
sql_type: The string representation of the SQL type, a SQLAlchemy
TypeEngine class or object, or a custom-specified object.
Raises:
ValueError: If the type received could not be translated to jsonschema.
Returns:
The JSON Schema representation of the provided type.
"""
if isinstance(sql_type, (str, sqlalchemy.types.TypeEngine)):
return th.to_jsonschema_type(sql_type)
if isinstance(sql_type, type):
if issubclass(sql_type, sqlalchemy.types.TypeEngine):
return th.to_jsonschema_type(sql_type)
raise ValueError(f"Unexpected type received: '{sql_type.__name__}'")
raise ValueError(f"Unexpected type received: '{type(sql_type).__name__}'")
@staticmethod
def to_sql_type(jsonschema_type: dict) -> sqlalchemy.types.TypeEngine:
"""Return a JSON Schema representation of the provided type.
By default will call `typing.to_sql_type()`.
Developers may override this method to accept additional input argument types,
to support non-standard types, or to provide custom typing logic.
If overriding this method, developers should call the default implementation
from the base class for all unhandled cases.
Args:
jsonschema_type: The JSON Schema representation of the source type.
Returns:
The SQLAlchemy type representation of the data type.
"""
return th.to_sql_type(jsonschema_type)
@staticmethod
def get_fully_qualified_name(
table_name: str | None = None,
schema_name: str | None = None,
db_name: str | None = None,
delimiter: str = ".",
) -> str:
"""Concatenates a fully qualified name from the parts.
Args:
table_name: The name of the table.
schema_name: The name of the schema. Defaults to None.
db_name: The name of the database. Defaults to None.
delimiter: Generally: '.' for SQL names and '-' for Singer names.
Raises:
ValueError: If all 3 name parts not supplied.
Returns:
The fully qualified name as a string.
"""
parts = []
if db_name:
parts.append(db_name)
if schema_name:
parts.append(schema_name)
if table_name:
parts.append(table_name)
if not parts:
raise ValueError(
"Could not generate fully qualified name: "
+ ":".join(
[
db_name or "(unknown-db)",
schema_name or "(unknown-schema)",
table_name or "(unknown-table-name)",
],
),
)
return delimiter.join(parts)
@property
def _dialect(self) -> sqlalchemy.engine.Dialect:
"""Return the dialect object.
Returns:
The dialect object.
"""
return cast(sqlalchemy.engine.Dialect, self._engine.dialect)
@property
def _engine(self) -> Engine:
"""Return the engine object.
This is the correct way to access the Connector's engine, if needed
(e.g. to inspect tables).
Returns:
The SQLAlchemy Engine that's attached to this SQLConnector instance.
"""
if not self._cached_engine:
self._cached_engine = self.create_engine()
return cast(Engine, self._cached_engine)
def create_engine(self) -> Engine:
"""Creates and returns a new engine. Do not call outside of _engine.
NOTE: Do not call this method. The only place that this method should
be called is inside the self._engine method. If you'd like to access
the engine on a connector, use self._engine.
This method exists solely so that tap/target developers can override it
on their subclass of SQLConnector to perform custom engine creation
logic.
Returns:
A new SQLAlchemy Engine.
"""
return sqlalchemy.create_engine(self.sqlalchemy_url, echo=False)
def quote(self, name: str) -> str:
"""Quote a name if it needs quoting, using '.' as a name-part delimiter.
Examples:
"my_table" => "`my_table`"
"my_schema.my_table" => "`my_schema`.`my_table`"
Args:
name: The unquoted name.
Returns:
str: The quoted name.
"""
return ".".join(
[
self._dialect.identifier_preparer.quote(name_part)
for name_part in name.split(".")
],
)
@lru_cache() # noqa: B019
def _warn_no_view_detection(self) -> None:
"""Print a warning, but only the first time."""
self.logger.warning(
"Provider does not support get_view_names(). "
"Streams list may be incomplete or `is_view` may be unpopulated.",
)
def get_schema_names(
self,
engine: Engine, # noqa: ARG002
inspected: Inspector,
) -> list[str]:
"""Return a list of schema names in DB.
Args:
engine: SQLAlchemy engine
inspected: SQLAlchemy inspector instance for engine
Returns:
List of schema names
"""
return inspected.get_schema_names()
def get_object_names(
self,
engine: Engine, # noqa: ARG002
inspected: Inspector,
schema_name: str,
) -> list[tuple[str, bool]]:
"""Return a list of syncable objects.
Args:
engine: SQLAlchemy engine
inspected: SQLAlchemy inspector instance for engine
schema_name: Schema name to inspect
Returns:
List of tuples (<table_or_view_name>, <is_view>)
"""
# Get list of tables and views
table_names = inspected.get_table_names(schema=schema_name)
try:
view_names = inspected.get_view_names(schema=schema_name)
except NotImplementedError:
# Some DB providers do not understand 'views'
self._warn_no_view_detection()
view_names = []
return [(t, False) for t in table_names] + [(v, True) for v in view_names]
# TODO maybe should be splitted into smaller parts?
def discover_catalog_entry(
self,
engine: Engine, # noqa: ARG002
inspected: Inspector,
schema_name: str,
table_name: str,
is_view: bool, # noqa: FBT001
) -> CatalogEntry:
"""Create `CatalogEntry` object for the given table or a view.
Args:
engine: SQLAlchemy engine
inspected: SQLAlchemy inspector instance for engine
schema_name: Schema name to inspect
table_name: Name of the table or a view
is_view: Flag whether this object is a view, returned by `get_object_names`
Returns:
`CatalogEntry` object for the given table or a view
"""
# Initialize unique stream name
unique_stream_id = self.get_fully_qualified_name(
db_name=None,
schema_name=schema_name,
table_name=table_name,
delimiter="-",
)
# Detect key properties
possible_primary_keys: list[list[str]] = []
pk_def = inspected.get_pk_constraint(table_name, schema=schema_name)
if pk_def and "constrained_columns" in pk_def:
possible_primary_keys.append(pk_def["constrained_columns"])
possible_primary_keys.extend(
index_def["column_names"]
for index_def in inspected.get_indexes(table_name, schema=schema_name)
if index_def.get("unique", False)
)
key_properties = next(iter(possible_primary_keys), None)
# Initialize columns list
table_schema = th.PropertiesList()
for column_def in inspected.get_columns(table_name, schema=schema_name):
column_name = column_def["name"]
is_nullable = column_def.get("nullable", False)
jsonschema_type: dict = self.to_jsonschema_type(
cast(sqlalchemy.types.TypeEngine, column_def["type"]),
)
table_schema.append(
th.Property(
name=column_name,
wrapped=th.CustomType(jsonschema_type),
required=not is_nullable,
),
)
schema = table_schema.to_dict()
# Initialize available replication methods
addl_replication_methods: list[str] = [""] # By default an empty list.
# Notes regarding replication methods:
# - 'INCREMENTAL' replication must be enabled by the user by specifying
# a replication_key value.
# - 'LOG_BASED' replication must be enabled by the developer, according
# to source-specific implementation capabilities.
replication_method = next(reversed(["FULL_TABLE", *addl_replication_methods]))
# Create the catalog entry object
return CatalogEntry(
tap_stream_id=unique_stream_id,
stream=unique_stream_id,
table=table_name,
key_properties=key_properties,
schema=Schema.from_dict(schema),
is_view=is_view,
replication_method=replication_method,
metadata=MetadataMapping.get_standard_metadata(
schema_name=schema_name,
schema=schema,
replication_method=replication_method,
key_properties=key_properties,
valid_replication_keys=None, # Must be defined by user
),
database=None, # Expects single-database context
row_count=None,
stream_alias=None,
replication_key=None, # Must be defined by user
)
def discover_catalog_entries(self) -> list[dict]:
"""Return a list of catalog entries from discovery.
Returns:
The discovered catalog entries as a list.
"""
result: list[dict] = []
engine = self._engine
inspected = sqlalchemy.inspect(engine)
for schema_name in self.get_schema_names(engine, inspected):
# Iterate through each table and view
for table_name, is_view in self.get_object_names(
engine,
inspected,
schema_name,
):
catalog_entry = self.discover_catalog_entry(
engine,
inspected,
schema_name,
table_name,
is_view,
)
result.append(catalog_entry.to_dict())
return result
def parse_full_table_name(
self,
full_table_name: str,
) -> tuple[str | None, str | None, str]:
"""Parse a fully qualified table name into its parts.
Developers may override this method if their platform does not support the
traditional 3-part convention: `db_name.schema_name.table_name`
Args:
full_table_name: A table name or a fully qualified table name. Depending on
SQL the platform, this could take the following forms:
- `<db>.<schema>.<table>` (three part names)
- `<db>.<table>` (platforms which do not use schema groupings)
- `<schema>.<name>` (if DB name is already in context)
- `<table>` (if DB name and schema name are already in context)
Returns:
A three part tuple (db_name, schema_name, table_name) with any unspecified
or unused parts returned as None.
"""
db_name: str | None = None
schema_name: str | None = None
parts = full_table_name.split(".")
if len(parts) == 1:
table_name = full_table_name
if len(parts) == 2: # noqa: PLR2004
schema_name, table_name = parts
if len(parts) == 3: # noqa: PLR2004
db_name, schema_name, table_name = parts
return db_name, schema_name, table_name
def table_exists(self, full_table_name: str) -> bool:
"""Determine if the target table already exists.
Args:
full_table_name: the target table name.
Returns:
True if table exists, False if not, None if unsure or undetectable.
"""
_, schema_name, table_name = self.parse_full_table_name(full_table_name)
return cast(
bool,
sqlalchemy.inspect(self._engine).has_table(table_name, schema_name),
)
def schema_exists(self, schema_name: str) -> bool:
"""Determine if the target database schema already exists.
Args:
schema_name: The target database schema name.
Returns:
True if the database schema exists, False if not.
"""
schema_names = sqlalchemy.inspect(self._engine).get_schema_names()
return schema_name in schema_names
def get_table_columns(
self,
full_table_name: str,
column_names: list[str] | None = None,
) -> dict[str, sqlalchemy.Column]:
"""Return a list of table columns.
Args:
full_table_name: Fully qualified table name.
column_names: A list of column names to filter to.
Returns:
An ordered list of column objects.
"""
_, schema_name, table_name = self.parse_full_table_name(full_table_name)
inspector = sqlalchemy.inspect(self._engine)
columns = inspector.get_columns(table_name, schema_name)
return {
col_meta["name"]: sqlalchemy.Column(
col_meta["name"],
col_meta["type"],
nullable=col_meta.get("nullable", False),
)
for col_meta in columns
if not column_names
or col_meta["name"].casefold() in {col.casefold() for col in column_names}
}
def get_table(
self,
full_table_name: str,
column_names: list[str] | None = None,
) -> sqlalchemy.Table:
"""Return a table object.
Args:
full_table_name: Fully qualified table name.
column_names: A list of column names to filter to.
Returns:
A table object with column list.
"""
columns = self.get_table_columns(
full_table_name=full_table_name,
column_names=column_names,
).values()
_, schema_name, table_name = self.parse_full_table_name(full_table_name)
meta = sqlalchemy.MetaData()
return sqlalchemy.schema.Table(
table_name,
meta,
*list(columns),
schema=schema_name,
)
def column_exists(self, full_table_name: str, column_name: str) -> bool:
"""Determine if the target table already exists.
Args:
full_table_name: the target table name.
column_name: the target column name.
Returns:
True if table exists, False if not.
"""
return column_name in self.get_table_columns(full_table_name)
def create_schema(self, schema_name: str) -> None:
"""Create target schema.
Args:
schema_name: The target schema to create.
"""
with self._connect() as conn:
conn.execute(sqlalchemy.schema.CreateSchema(schema_name))
def create_empty_table(
self,
full_table_name: str,
schema: dict,
primary_keys: list[str] | None = None,
partition_keys: list[str] | None = None,
as_temp_table: bool = False, # noqa: FBT001, FBT002
) -> None:
"""Create an empty target table.
Args:
full_table_name: the target table name.
schema: the JSON schema for the new table.
primary_keys: list of key properties.
partition_keys: list of partition keys.
as_temp_table: True to create a temp table.
Raises:
NotImplementedError: if temp tables are unsupported and as_temp_table=True.
RuntimeError: if a variant schema is passed with no properties defined.
"""
if as_temp_table:
raise NotImplementedError("Temporary tables are not supported.")
_ = partition_keys # Not supported in generic implementation.
_, schema_name, table_name = self.parse_full_table_name(full_table_name)
meta = sqlalchemy.MetaData(schema=schema_name)
columns: list[sqlalchemy.Column] = []
primary_keys = primary_keys or []
try:
properties: dict = schema["properties"]
except KeyError as e:
raise RuntimeError(
f"Schema for '{full_table_name}' does not define properties: {schema}",
) from e
for property_name, property_jsonschema in properties.items():
is_primary_key = property_name in primary_keys
columns.append(
sqlalchemy.Column(
property_name,
self.to_sql_type(property_jsonschema),
primary_key=is_primary_key,
),
)
_ = sqlalchemy.Table(table_name, meta, *columns)
meta.create_all(self._engine)
def _create_empty_column(
self,
full_table_name: str,
column_name: str,
sql_type: sqlalchemy.types.TypeEngine,
) -> None:
"""Create a new column.
Args:
full_table_name: The target table name.
column_name: The name of the new column.
sql_type: SQLAlchemy type engine to be used in creating the new column.
Raises:
NotImplementedError: if adding columns is not supported.
"""
if not self.allow_column_add:
raise NotImplementedError("Adding columns is not supported.")
column_add_ddl = self.get_column_add_ddl(
table_name=full_table_name,
column_name=column_name,
column_type=sql_type,
)
with self._connect() as conn, conn.begin():
conn.execute(column_add_ddl)
def prepare_schema(self, schema_name: str) -> None:
"""Create the target database schema.
Args:
schema_name: The target schema name.
"""
schema_exists = self.schema_exists(schema_name)
if not schema_exists:
self.create_schema(schema_name)
def prepare_table(
self,
full_table_name: str,
schema: dict,
primary_keys: list[str],
partition_keys: list[str] | None = None,
as_temp_table: bool = False, # noqa: FBT002, FBT001
) -> None:
"""Adapt target table to provided schema if possible.
Args:
full_table_name: the target table name.
schema: the JSON Schema for the table.
primary_keys: list of key properties.
partition_keys: list of partition keys.
as_temp_table: True to create a temp table.
"""
if not self.table_exists(full_table_name=full_table_name):
self.create_empty_table(
full_table_name=full_table_name,
schema=schema,
primary_keys=primary_keys,
partition_keys=partition_keys,
as_temp_table=as_temp_table,
)
return
for property_name, property_def in schema["properties"].items():
self.prepare_column(
full_table_name,
property_name,
self.to_sql_type(property_def),
)
def prepare_column(
self,
full_table_name: str,
column_name: str,
sql_type: sqlalchemy.types.TypeEngine,
) -> None:
"""Adapt target table to provided schema if possible.
Args:
full_table_name: the target table name.
column_name: the target column name.
sql_type: the SQLAlchemy type.
"""
if not self.column_exists(full_table_name, column_name):
self._create_empty_column(
full_table_name=full_table_name,
column_name=column_name,
sql_type=sql_type,
)
return
self._adapt_column_type(
full_table_name,
column_name=column_name,
sql_type=sql_type,
)
def rename_column(self, full_table_name: str, old_name: str, new_name: str) -> None:
"""Rename the provided columns.
Args:
full_table_name: The fully qualified table name.
old_name: The old column to be renamed.
new_name: The new name for the column.
Raises:
NotImplementedError: If `self.allow_column_rename` is false.
"""
if not self.allow_column_rename:
raise NotImplementedError("Renaming columns is not supported.")
column_rename_ddl = self.get_column_rename_ddl(
table_name=full_table_name,
column_name=old_name,
new_column_name=new_name,
)
with self._connect() as conn:
conn.execute(column_rename_ddl)
def merge_sql_types(
self,
sql_types: list[sqlalchemy.types.TypeEngine],
) -> sqlalchemy.types.TypeEngine:
"""Return a compatible SQL type for the selected type list.
Args:
sql_types: List of SQL types.
Returns:
A SQL type that is compatible with the input types.
Raises:
ValueError: If sql_types argument has zero members.
"""
if not sql_types:
raise ValueError("Expected at least one member in `sql_types` argument.")
if len(sql_types) == 1:
return sql_types[0]
# Gathering Type to match variables
# sent in _adapt_column_type
current_type = sql_types[0]
cur_len: int = getattr(current_type, "length", 0)
# Convert the two types given into a sorted list
# containing the best conversion classes
sql_types = self._sort_types(sql_types)
# If greater than two evaluate the first pair then on down the line
if len(sql_types) > 2: # noqa: PLR2004
return self.merge_sql_types(
[self.merge_sql_types([sql_types[0], sql_types[1]])] + sql_types[2:],
)
# Get the generic type class
for opt in sql_types:
# Get the length
opt_len: int = getattr(opt, "length", 0)
generic_type = type(opt.as_generic())
if isinstance(generic_type, type):
if issubclass(
generic_type,
(sqlalchemy.types.String, sqlalchemy.types.Unicode),
) or issubclass(
generic_type,
(sqlalchemy.types.String, sqlalchemy.types.Unicode),
):
# If length None or 0 then is varchar max ?
if (
(opt_len is None)
or (opt_len == 0)
or (cur_len and (opt_len >= cur_len))
):
return opt
# If best conversion class is equal to current type
# return the best conversion class
elif str(opt) == str(current_type):
return opt
raise ValueError(
f"Unable to merge sql types: {', '.join([str(t) for t in sql_types])}",
)
def _sort_types(
self,
sql_types: Iterable[sqlalchemy.types.TypeEngine],
) -> list[sqlalchemy.types.TypeEngine]:
"""Return the input types sorted from most to least compatible.
For example, [Smallint, Integer, Datetime, String, Double] would become
[Unicode, String, Double, Integer, Smallint, Datetime].
String types will be listed first, then decimal types, then integer types,
then bool types, and finally datetime and date. Higher precision, scale, and
length will be sorted earlier.
Args:
sql_types (List[sqlalchemy.types.TypeEngine]): [description]
Returns:
The sorted list.
"""
def _get_type_sort_key(
sql_type: sqlalchemy.types.TypeEngine,
) -> tuple[int, int]:
# return rank, with higher numbers ranking first
_len = int(getattr(sql_type, "length", 0) or 0)
_pytype = cast(type, sql_type.python_type)
if issubclass(_pytype, (str, bytes)):
return 900, _len
if issubclass(_pytype, datetime):
return 600, _len
if issubclass(_pytype, float):
return 400, _len
if issubclass(_pytype, int):
return 300, _len
return 0, _len
return sorted(sql_types, key=_get_type_sort_key, reverse=True)
def _get_column_type(
self,
full_table_name: str,
column_name: str,
) -> sqlalchemy.types.TypeEngine:
"""Get the SQL type of the declared column.
Args:
full_table_name: The name of the table.
column_name: The name of the column.
Returns:
The type of the column.
Raises:
KeyError: If the provided column name does not exist.
"""
try:
column = self.get_table_columns(full_table_name)[column_name]
except KeyError as ex:
raise KeyError(
f"Column `{column_name}` does not exist in table `{full_table_name}`.",
) from ex
return cast(sqlalchemy.types.TypeEngine, column.type)
@staticmethod
def get_column_add_ddl(
table_name: str,
column_name: str,
column_type: sqlalchemy.types.TypeEngine,
) -> sqlalchemy.DDL:
"""Get the create column DDL statement.
Override this if your database uses a different syntax for creating columns.
Args:
table_name: Fully qualified table name of column to alter.
column_name: Column name to create.
column_type: New column sqlalchemy type.
Returns:
A sqlalchemy DDL instance.
"""
create_column_clause = sqlalchemy.schema.CreateColumn(
sqlalchemy.Column(
column_name,
column_type,
),
)
return sqlalchemy.DDL(
"ALTER TABLE %(table_name)s ADD COLUMN %(create_column_clause)s",
{
"table_name": table_name,
"create_column_clause": create_column_clause,
},
)
@staticmethod
def get_column_rename_ddl(
table_name: str,
column_name: str,
new_column_name: str,
) -> sqlalchemy.DDL:
"""Get the create column DDL statement.
Override this if your database uses a different syntax for renaming columns.
Args:
table_name: Fully qualified table name of column to alter.
column_name: Existing column name.
new_column_name: New column name.
Returns:
A sqlalchemy DDL instance.
"""
return sqlalchemy.DDL(
"ALTER TABLE %(table_name)s "
"RENAME COLUMN %(column_name)s to %(new_column_name)s",
{
"table_name": table_name,
"column_name": column_name,
"new_column_name": new_column_name,
},
)
@staticmethod
def get_column_alter_ddl(
table_name: str,
column_name: str,
column_type: sqlalchemy.types.TypeEngine,
) -> sqlalchemy.DDL:
"""Get the alter column DDL statement.
Override this if your database uses a different syntax for altering columns.
Args:
table_name: Fully qualified table name of column to alter.
column_name: Column name to alter.
column_type: New column type string.
Returns:
A sqlalchemy DDL instance.
"""
return sqlalchemy.DDL(
"ALTER TABLE %(table_name)s ALTER COLUMN %(column_name)s (%(column_type)s)",
{
"table_name": table_name,
"column_name": column_name,
"column_type": column_type,
},
)
@staticmethod
def remove_collation(
column_type: sqlalchemy.types.TypeEngine,
) -> str | None:
"""Removes collation for the given column TypeEngine instance.
Args:
column_type: Column SQLAlchemy type.
Returns:
The removed collation as a string.
"""
if hasattr(column_type, "collation") and column_type.collation:
column_type_collation: str = column_type.collation
column_type.collation = None
return column_type_collation
return None
@staticmethod
def update_collation(
column_type: sqlalchemy.types.TypeEngine,
collation: str | None,
) -> None:
"""Sets column collation if column type has a collation attribute.
Args:
column_type: Column SQLAlchemy type.
collation: The colation
"""
if hasattr(column_type, "collation") and collation:
column_type.collation = collation
def _adapt_column_type(
self,
full_table_name: str,
column_name: str,
sql_type: sqlalchemy.types.TypeEngine,
) -> None:
"""Adapt table column type to support the new JSON schema type.
Args:
full_table_name: The target table name.
column_name: The target column name.
sql_type: The new SQLAlchemy type.
Raises:
NotImplementedError: if altering columns is not supported.
"""
current_type: sqlalchemy.types.TypeEngine = self._get_column_type(
full_table_name,
column_name,
)
# remove collation if present and save it
current_type_collation = self.remove_collation(current_type)
# Check if the existing column type and the sql type are the same
if str(sql_type) == str(current_type):
# The current column and sql type are the same
# Nothing to do
return
# Not the same type, generic type or compatible types
# calling merge_sql_types for assistnace
compatible_sql_type = self.merge_sql_types([current_type, sql_type])
if str(compatible_sql_type) == str(current_type):
# Nothing to do
return
# Put the collation level back before altering the column
if current_type_collation:
self.update_collation(compatible_sql_type, current_type_collation)
if not self.allow_column_alter:
raise NotImplementedError(
"Altering columns is not supported. "
f"Could not convert column '{full_table_name}.{column_name}' "
f"from '{current_type}' to '{compatible_sql_type}'.",
)
alter_column_ddl = self.get_column_alter_ddl(
table_name=full_table_name,
column_name=column_name,
column_type=compatible_sql_type,
)
with self._connect() as conn:
conn.execute(alter_column_ddl)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/configuration/__init__.py | """Configuration parsing and handling."""
from __future__ import annotations
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/configuration/_dict_config.py | """Helpers for parsing and wrangling configuration dictionaries."""
from __future__ import annotations
import logging
import os
from pathlib import Path
from typing import Any, Iterable
from dotenv import find_dotenv
from dotenv.main import DotEnv
from singer_sdk.helpers._typing import is_string_array_type
from singer_sdk.helpers._util import read_json_file
logger = logging.getLogger(__name__)
def parse_environment_config(
config_schema: dict[str, Any],
prefix: str,
dotenv_path: str | None = None,
) -> dict[str, Any]:
"""Parse configuration from environment variables.
Args:
config_schema: A JSON Schema dictionary for the configuration.
prefix: Prefix for environment variables.
dotenv_path: Path to a .env file. If None, will try to find one in increasingly
higher folders.
Raises:
ValueError: If an un-parsable setting is found.
Returns:
A configuration dictionary.
"""
result: dict[str, Any] = {}
if not dotenv_path:
dotenv_path = find_dotenv()
logger.debug("Loading configuration from %s", dotenv_path)
DotEnv(dotenv_path).set_as_environment_variables()
for config_key in config_schema["properties"]:
env_var_name = prefix + config_key.upper().replace("-", "_")
if env_var_name in os.environ:
env_var_value = os.environ[env_var_name]
logger.info(
"Parsing '%s' config from env variable '%s'.",
config_key,
env_var_name,
)
if is_string_array_type(config_schema["properties"][config_key]):
if env_var_value[0] == "[" and env_var_value[-1] == "]":
raise ValueError(
"A bracketed list was detected in the environment variable "
f"'{env_var_name}'. This syntax is no longer supported. "
"Please remove the brackets and try again.",
)
result[config_key] = env_var_value.split(",")
else:
result[config_key] = env_var_value
return result
def merge_config_sources(
inputs: Iterable[str],
config_schema: dict[str, Any],
env_prefix: str,
) -> dict[str, Any]:
"""Merge configuration from multiple sources into a single dictionary.
Args:
inputs: A sequence of configuration sources (file paths or ENV).
config_schema: A JSON Schema dictionary for the configuration.
env_prefix: Prefix for environment variables.
Raises:
FileNotFoundError: If any of config files does not exist.
Returns:
A single configuration dictionary.
"""
config: dict[str, Any] = {}
for config_path in inputs:
if config_path == "ENV":
env_config = parse_environment_config(config_schema, prefix=env_prefix)
config.update(env_config)
continue
if not Path(config_path).is_file():
raise FileNotFoundError(
f"Could not locate config file at '{config_path}'."
"Please check that the file exists.",
)
config.update(read_json_file(config_path))
return config
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/graphql.py | """Abstract base class for API-type streams."""
from __future__ import annotations
import abc
from typing import Any
from singer_sdk.helpers._classproperty import classproperty
from singer_sdk.streams.rest import RESTStream
class GraphQLStream(RESTStream, metaclass=abc.ABCMeta):
"""Abstract base class for API-type streams.
GraphQL streams inherit from the class `GraphQLStream`, which in turn inherits from
the `RESTStream` class. GraphQL streams are very similar to REST API-based streams,
but instead of specifying a `path` and `url_params`, developers override the
GraphQL query text.
"""
path = ""
rest_method = "POST"
@classproperty
def records_jsonpath(cls) -> str: # type: ignore[override] # noqa: N805
"""Get the JSONPath expression to extract records from an API response.
Returns:
JSONPath expression string
"""
return f"$.data.{cls.name}[*]"
@property
def query(self) -> str:
"""Set or return the GraphQL query string.
Raises:
NotImplementedError: If the derived class doesn't define this property.
"""
raise NotImplementedError("GraphQLStream `query` is not defined.")
def prepare_request_payload(
self,
context: dict | None,
next_page_token: Any | None,
) -> dict | None:
"""Prepare the data payload for the GraphQL API request.
Developers generally should generally not need to override this method.
Instead, developers set the payload by properly configuring the `query`
attribute.
Args:
context: Stream partition or context dictionary.
next_page_token: Token, page number or any request argument to request the
next page of data.
Returns:
Dictionary with the body to use for the request.
Raises:
ValueError: If the `query` property is not set in the request body.
"""
params = self.get_url_params(context, next_page_token)
query = self.query
if query is None:
raise ValueError("Graphql `query` property not set.")
if not query.lstrip().startswith("query"):
# Wrap text in "query { }" if not already wrapped
query = "query { " + query + " }"
query = query.lstrip()
request_data = {
"query": (" ".join([line.strip() for line in query.splitlines()])),
"variables": params,
}
self.logger.debug("Attempting query:\n%s", query)
return request_data
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py | """Abstract base class for API-type streams."""
from __future__ import annotations
import abc
import copy
import logging
from http import HTTPStatus
from typing import TYPE_CHECKING, Any, Callable, Generator, Generic, Iterable, TypeVar
from urllib.parse import urlparse
from warnings import warn
import backoff
import requests
from singer_sdk import metrics
from singer_sdk.authenticators import SimpleAuthenticator
from singer_sdk.exceptions import FatalAPIError, RetriableAPIError
from singer_sdk.helpers.jsonpath import extract_jsonpath
from singer_sdk.pagination import (
BaseAPIPaginator,
JSONPathPaginator,
LegacyStreamPaginator,
SimpleHeaderPaginator,
)
from singer_sdk.streams.core import Stream
if TYPE_CHECKING:
import sys
from datetime import datetime
from backoff.types import Details
from singer_sdk._singerlib import Schema
from singer_sdk.plugin_base import PluginBase as TapBaseClass
if sys.version_info >= (3, 10):
from typing import TypeAlias
else:
from typing_extensions import TypeAlias
DEFAULT_PAGE_SIZE = 1000
DEFAULT_REQUEST_TIMEOUT = 300 # 5 minutes
_TToken = TypeVar("_TToken")
_Auth: TypeAlias = Callable[[requests.PreparedRequest], requests.PreparedRequest]
class RESTStream(Stream, Generic[_TToken], metaclass=abc.ABCMeta):
"""Abstract base class for REST API streams."""
_page_size: int = DEFAULT_PAGE_SIZE
_requests_session: requests.Session | None
rest_method = "GET"
#: JSONPath expression to extract records from the API response.
records_jsonpath: str = "$[*]"
#: Response code reference for rate limit retries
extra_retry_statuses: list[int] = [HTTPStatus.TOO_MANY_REQUESTS]
#: Optional JSONPath expression to extract a pagination token from the API response.
#: Example: `"$.next_page"`
next_page_token_jsonpath: str | None = None
# Private constants. May not be supported in future releases:
_LOG_REQUEST_METRICS: bool = True
# Disabled by default for safety:
_LOG_REQUEST_METRIC_URLS: bool = False
@property
@abc.abstractmethod
def url_base(self) -> str:
"""Return the base url, e.g. ``https://api.mysite.com/v3/``."""
def __init__(
self,
tap: TapBaseClass,
name: str | None = None,
schema: dict[str, Any] | Schema | None = None,
path: str | None = None,
) -> None:
"""Initialize the REST stream.
Args:
tap: Singer Tap this stream belongs to.
schema: JSON schema for records in this stream.
name: Name of this stream.
path: URL path for this entity stream.
"""
super().__init__(name=name, schema=schema, tap=tap)
if path:
self.path = path
self._http_headers: dict = {}
self._requests_session = requests.Session()
self._compiled_jsonpath = None
self._next_page_token_compiled_jsonpath = None
@staticmethod
def _url_encode(val: str | datetime | bool | int | list[str]) -> str:
"""Encode the val argument as url-compatible string.
Args:
val: TODO
Returns:
TODO
"""
return val.replace("/", "%2F") if isinstance(val, str) else str(val)
def get_url(self, context: dict | None) -> str:
"""Get stream entity URL.
Developers override this method to perform dynamic URL generation.
Args:
context: Stream partition or context dictionary.
Returns:
A URL, optionally targeted to a specific partition or context.
"""
url = "".join([self.url_base, self.path or ""])
vals = copy.copy(dict(self.config))
vals.update(context or {})
for k, v in vals.items():
search_text = "".join(["{", k, "}"])
if search_text in url:
url = url.replace(search_text, self._url_encode(v))
return url
# HTTP Request functions
@property
def requests_session(self) -> requests.Session:
"""Get requests session.
Returns:
The `requests.Session`_ object for HTTP requests.
.. _requests.Session:
https://requests.readthedocs.io/en/latest/api/#request-sessions
"""
if not self._requests_session:
self._requests_session = requests.Session()
return self._requests_session
def validate_response(self, response: requests.Response) -> None:
"""Validate HTTP response.
Checks for error status codes and wether they are fatal or retriable.
In case an error is deemed transient and can be safely retried, then this
method should raise an :class:`singer_sdk.exceptions.RetriableAPIError`.
By default this applies to 5xx error codes, along with values set in:
:attr:`~singer_sdk.RESTStream.extra_retry_statuses`
In case an error is unrecoverable raises a
:class:`singer_sdk.exceptions.FatalAPIError`. By default, this applies to
4xx errors, excluding values found in:
:attr:`~singer_sdk.RESTStream.extra_retry_statuses`
Tap developers are encouraged to override this method if their APIs use HTTP
status codes in non-conventional ways, or if they communicate errors
differently (e.g. in the response body).
.. image:: ../images/200.png
Args:
response: A `requests.Response`_ object.
Raises:
FatalAPIError: If the request is not retriable.
RetriableAPIError: If the request is retriable.
.. _requests.Response:
https://requests.readthedocs.io/en/latest/api/#requests.Response
"""
if (
response.status_code in self.extra_retry_statuses
or HTTPStatus.INTERNAL_SERVER_ERROR
<= response.status_code
<= max(HTTPStatus)
):
msg = self.response_error_message(response)
raise RetriableAPIError(msg, response)
if (
HTTPStatus.BAD_REQUEST
<= response.status_code
< HTTPStatus.INTERNAL_SERVER_ERROR
):
msg = self.response_error_message(response)
raise FatalAPIError(msg)
def response_error_message(self, response: requests.Response) -> str:
"""Build error message for invalid http statuses.
WARNING - Override this method when the URL path may contain secrets or PII
Args:
response: A `requests.Response`_ object.
Returns:
str: The error message
"""
full_path = urlparse(response.url).path or self.path
error_type = (
"Client"
if HTTPStatus.BAD_REQUEST
<= response.status_code
< HTTPStatus.INTERNAL_SERVER_ERROR
else "Server"
)
return (
f"{response.status_code} {error_type} Error: "
f"{response.reason} for path: {full_path}"
)
def request_decorator(self, func: Callable) -> Callable:
"""Instantiate a decorator for handling request failures.
Uses a wait generator defined in `backoff_wait_generator` to
determine backoff behaviour. Try limit is defined in
`backoff_max_tries`, and will trigger the event defined in
`backoff_handler` before retrying. Developers may override one or
all of these methods to provide custom backoff or retry handling.
Args:
func: Function to decorate.
Returns:
A decorated method.
"""
decorator: Callable = backoff.on_exception(
self.backoff_wait_generator,
(
ConnectionResetError,
RetriableAPIError,
requests.exceptions.ReadTimeout,
requests.exceptions.ConnectionError,
requests.exceptions.ChunkedEncodingError,
requests.exceptions.ContentDecodingError,
),
max_tries=self.backoff_max_tries,
on_backoff=self.backoff_handler,
jitter=self.backoff_jitter,
)(func)
return decorator
def _request(
self,
prepared_request: requests.PreparedRequest,
context: dict | None,
) -> requests.Response:
"""TODO.
Args:
prepared_request: TODO
context: Stream partition or context dictionary.
Returns:
TODO
"""
response = self.requests_session.send(prepared_request, timeout=self.timeout)
self._write_request_duration_log(
endpoint=self.path,
response=response,
context=context,
extra_tags={"url": prepared_request.path_url}
if self._LOG_REQUEST_METRIC_URLS
else None,
)
self.validate_response(response)
logging.debug("Response received successfully.")
return response
def get_url_params(
self,
context: dict | None, # noqa: ARG002
next_page_token: _TToken | None, # noqa: ARG002
) -> dict[str, Any]:
"""Return a dictionary of values to be used in URL parameterization.
If paging is supported, developers may override with specific paging logic.
Args:
context: Stream partition or context dictionary.
next_page_token: Token, page number or any request argument to request the
next page of data.
Returns:
Dictionary of URL query parameters to use in the request.
"""
return {}
def build_prepared_request(
self,
*args: Any,
**kwargs: Any,
) -> requests.PreparedRequest:
"""Build a generic but authenticated request.
Uses the authenticator instance to mutate the request with authentication.
Args:
*args: Arguments to pass to `requests.Request`_.
**kwargs: Keyword arguments to pass to `requests.Request`_.
Returns:
A `requests.PreparedRequest`_ object.
.. _requests.PreparedRequest:
https://requests.readthedocs.io/en/latest/api/#requests.PreparedRequest
.. _requests.Request:
https://requests.readthedocs.io/en/latest/api/#requests.Request
"""
request = requests.Request(*args, **kwargs)
self.requests_session.auth = self.authenticator
return self.requests_session.prepare_request(request)
def prepare_request(
self,
context: dict | None,
next_page_token: _TToken | None,
) -> requests.PreparedRequest:
"""Prepare a request object for this stream.
If partitioning is supported, the `context` object will contain the partition
definitions. Pagination information can be parsed from `next_page_token` if
`next_page_token` is not None.
Args:
context: Stream partition or context dictionary.
next_page_token: Token, page number or any request argument to request the
next page of data.
Returns:
Build a request with the stream's URL, path, query parameters,
HTTP headers and authenticator.
"""
http_method = self.rest_method
url: str = self.get_url(context)
params: dict = self.get_url_params(context, next_page_token)
request_data = self.prepare_request_payload(context, next_page_token)
headers = self.http_headers
return self.build_prepared_request(
method=http_method,
url=url,
params=params,
headers=headers,
json=request_data,
)
def request_records(self, context: dict | None) -> Iterable[dict]:
"""Request records from REST endpoint(s), returning response records.
If pagination is detected, pages will be recursed automatically.
Args:
context: Stream partition or context dictionary.
Yields:
An item for every record in the response.
"""
paginator = self.get_new_paginator()
decorated_request = self.request_decorator(self._request)
with metrics.http_request_counter(self.name, self.path) as request_counter:
request_counter.context = context
while not paginator.finished:
prepared_request = self.prepare_request(
context,
next_page_token=paginator.current_value,
)
resp = decorated_request(prepared_request, context)
request_counter.increment()
self.update_sync_costs(prepared_request, resp, context)
yield from self.parse_response(resp)
paginator.advance(resp)
def _write_request_duration_log(
self,
endpoint: str,
response: requests.Response,
context: dict | None,
extra_tags: dict | None,
) -> None:
"""TODO.
Args:
endpoint: TODO
response: TODO
context: Stream partition or context dictionary.
extra_tags: TODO
"""
extra_tags = extra_tags or {}
if context:
extra_tags[metrics.Tag.CONTEXT] = context
point = metrics.Point(
"timer",
metric=metrics.Metric.HTTP_REQUEST_DURATION,
value=response.elapsed.total_seconds(),
tags={
metrics.Tag.STREAM: self.name,
metrics.Tag.ENDPOINT: endpoint,
metrics.Tag.HTTP_STATUS_CODE: response.status_code,
metrics.Tag.STATUS: (
metrics.Status.SUCCEEDED
if response.status_code < HTTPStatus.BAD_REQUEST
else metrics.Status.FAILED
),
**extra_tags,
},
)
self._log_metric(point)
def update_sync_costs(
self,
request: requests.PreparedRequest,
response: requests.Response,
context: dict | None,
) -> dict[str, int]:
"""Update internal calculation of Sync costs.
Args:
request: the Request object that was just called.
response: the `requests.Response` object
context: the context passed to the call
Returns:
A dict of costs (for the single request) whose keys are
the "cost domains". See `calculate_sync_cost` for details.
"""
call_costs = self.calculate_sync_cost(request, response, context)
self._sync_costs = {
k: self._sync_costs.get(k, 0) + call_costs.get(k, 0) for k in call_costs
}
return self._sync_costs
# Overridable:
def calculate_sync_cost(
self,
request: requests.PreparedRequest, # noqa: ARG002
response: requests.Response, # noqa: ARG002
context: dict | None, # noqa: ARG002
) -> dict[str, int]:
"""Calculate the cost of the last API call made.
This method can optionally be implemented in streams to calculate
the costs (in arbitrary units to be defined by the tap developer)
associated with a single API/network call. The request and response objects
are available in the callback, as well as the context.
The method returns a dict where the keys are arbitrary cost dimensions,
and the values the cost along each dimension for this one call. For
instance: { "rest": 0, "graphql": 42 } for a call to github's graphql API.
All keys should be present in the dict.
This method can be overridden by tap streams. By default it won't do
anything.
Args:
request: the API Request object that was just called.
response: the `requests.Response` object
context: the context passed to the call
Returns:
A dict of accumulated costs whose keys are the "cost domains".
"""
return {}
def prepare_request_payload(
self,
context: dict | None,
next_page_token: _TToken | None,
) -> dict | None:
"""Prepare the data payload for the REST API request.
By default, no payload will be sent (return None).
Developers may override this method if the API requires a custom payload along
with the request. (This is generally not required for APIs which use the
HTTP 'GET' method.)
Args:
context: Stream partition or context dictionary.
next_page_token: Token, page number or any request argument to request the
next page of data.
"""
def get_new_paginator(self) -> BaseAPIPaginator:
"""Get a fresh paginator for this API endpoint.
Returns:
A paginator instance.
"""
if hasattr(self, "get_next_page_token"):
warn(
"`RESTStream.get_next_page_token` is deprecated and will not be used "
"in a future version of the Meltano Singer SDK. "
"Override `RESTStream.get_new_paginator` instead.",
DeprecationWarning,
stacklevel=2,
)
return LegacyStreamPaginator(self)
if self.next_page_token_jsonpath:
return JSONPathPaginator(self.next_page_token_jsonpath)
return SimpleHeaderPaginator("X-Next-Page")
@property
def http_headers(self) -> dict:
"""Return headers dict to be used for HTTP requests.
If an authenticator is also specified, the authenticator's headers will be
combined with `http_headers` when making HTTP requests.
Returns:
Dictionary of HTTP headers to use as a base for every request.
"""
result = self._http_headers
if "user_agent" in self.config:
result["User-Agent"] = self.config.get("user_agent")
return result
@property
def timeout(self) -> int:
"""Return the request timeout limit in seconds.
The default timeout is 300 seconds, or as defined by DEFAULT_REQUEST_TIMEOUT.
Returns:
The request timeout limit as number of seconds.
"""
return DEFAULT_REQUEST_TIMEOUT
# Records iterator
def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]:
"""Return a generator of record-type dictionary objects.
Each record emitted should be a dictionary of property names to their values.
Args:
context: Stream partition or context dictionary.
Yields:
One item per (possibly processed) record in the API.
"""
for record in self.request_records(context):
transformed_record = self.post_process(record, context)
if transformed_record is None:
# Record filtered out during post_process()
continue
yield transformed_record
def parse_response(self, response: requests.Response) -> Iterable[dict]:
"""Parse the response and return an iterator of result records.
Args:
response: A raw `requests.Response`_ object.
Yields:
One item for every item found in the response.
.. _requests.Response:
https://requests.readthedocs.io/en/latest/api/#requests.Response
"""
yield from extract_jsonpath(self.records_jsonpath, input=response.json())
# Abstract methods:
@property
def authenticator(self) -> _Auth:
"""Return or set the authenticator for managing HTTP auth headers.
If an authenticator is not specified, REST-based taps will simply pass
`http_headers` as defined in the stream class.
Returns:
Authenticator instance that will be used to authenticate all outgoing
requests.
"""
return SimpleAuthenticator(stream=self)
def backoff_wait_generator(self) -> Generator[float, None, None]:
"""The wait generator used by the backoff decorator on request failure.
See for options:
https://github.com/litl/backoff/blob/master/backoff/_wait_gen.py
And see for examples: `Code Samples <../code_samples.html#custom-backoff>`_
Returns:
The wait generator
"""
return backoff.expo(factor=2)
def backoff_max_tries(self) -> int:
"""The number of attempts before giving up when retrying requests.
Returns:
Number of max retries.
"""
return 5
def backoff_jitter(self, value: float) -> float:
"""Amount of jitter to add.
For more information see
https://github.com/litl/backoff/blob/master/backoff/_jitter.py
We chose to default to ``random_jitter`` instead of ``full_jitter`` as we keep
some level of default jitter to be "nice" to downstream APIs but it's still
relatively close to the default value that's passed in to make tap developers'
life easier.
Args:
value: Base amount to wait in seconds
Returns:
Time in seconds to wait until the next request.
"""
return backoff.random_jitter(value)
def backoff_handler(self, details: Details) -> None:
"""Adds additional behaviour prior to retry.
By default will log out backoff details, developers can override
to extend or change this behaviour.
Args:
details: backoff invocation details
https://github.com/litl/backoff#event-handlers
"""
logging.error(
"Backing off %(wait)0.2f seconds after %(tries)d tries "
"calling function %(target)s with args %(args)s and kwargs "
"%(kwargs)s",
details.get("wait"),
details.get("tries"),
details.get("target"),
details.get("args"),
details.get("kwargs"),
)
def backoff_runtime(
self,
*,
value: Callable[[Any], int],
) -> Generator[int, None, None]:
"""Optional backoff wait generator that can replace the default `backoff.expo`.
It is based on parsing the thrown exception of the decorated method, making it
possible for response values to be in scope.
You may want to review :meth:`~singer_sdk.RESTStream.backoff_jitter` if you're
overriding this function.
Args:
value: a callable which takes as input the decorated
function's thrown exception and determines how
long to wait.
Yields:
The thrown exception
"""
exception = yield # type: ignore[misc]
while True:
exception = yield value(exception)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/__init__.py | """SDK for building Singer taps."""
from __future__ import annotations
from singer_sdk.streams.core import Stream
from singer_sdk.streams.graphql import GraphQLStream
from singer_sdk.streams.rest import RESTStream
from singer_sdk.streams.sql import SQLStream
__all__ = ["Stream", "GraphQLStream", "RESTStream", "SQLStream"]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/core.py | """Stream abstract class."""
from __future__ import annotations
import abc
import copy
import datetime
import gzip
import itertools
import json
from os import PathLike
from pathlib import Path
from types import MappingProxyType
from typing import (
TYPE_CHECKING,
Any,
Generator,
Iterable,
Iterator,
Mapping,
TypeVar,
cast,
)
from uuid import uuid4
import pendulum
import singer_sdk._singerlib as singer
from singer_sdk import metrics
from singer_sdk.exceptions import (
AbortedSyncFailedException,
AbortedSyncPausedException,
InvalidStreamSortException,
MaxRecordsLimitException,
)
from singer_sdk.helpers._batch import (
BaseBatchFileEncoding,
BatchConfig,
SDKBatchMessage,
)
from singer_sdk.helpers._catalog import pop_deselected_record_properties
from singer_sdk.helpers._compat import final
from singer_sdk.helpers._flattening import get_flattening_options
from singer_sdk.helpers._state import (
finalize_state_progress_markers,
get_starting_replication_value,
get_state_partitions_list,
get_writeable_state_dict,
increment_state,
is_state_non_resumable,
log_sort_error,
reset_state_progress_markers,
write_replication_key_signpost,
write_starting_replication_value,
)
from singer_sdk.helpers._typing import (
TypeConformanceLevel,
conform_record_data_types,
is_datetime_type,
)
from singer_sdk.helpers._util import utc_now
from singer_sdk.mapper import RemoveRecordTransform, SameRecordTransform, StreamMap
if TYPE_CHECKING:
import logging
from singer_sdk.plugin_base import PluginBase as TapBaseClass
# Replication methods
REPLICATION_FULL_TABLE = "FULL_TABLE"
REPLICATION_INCREMENTAL = "INCREMENTAL"
REPLICATION_LOG_BASED = "LOG_BASED"
FactoryType = TypeVar("FactoryType", bound="Stream")
_T = TypeVar("_T")
def lazy_chunked_generator(
iterable: Iterable[_T],
chunk_size: int,
) -> Generator[Iterator[_T], None, None]:
"""Yield a generator for each chunk of the given iterable.
Args:
iterable: The iterable to chunk.
chunk_size: The size of each chunk.
Yields:
A generator for each chunk of the given iterable.
"""
iterator = iter(iterable)
while True:
chunk = list(itertools.islice(iterator, chunk_size))
if not chunk:
break
yield iter(chunk)
class Stream(metaclass=abc.ABCMeta):
"""Abstract base class for tap streams."""
STATE_MSG_FREQUENCY = 10000
"""Number of records between state messages."""
ABORT_AT_RECORD_COUNT: int | None = None
"""
If set, raise `MaxRecordsLimitException` if the limit is exceeded.
"""
TYPE_CONFORMANCE_LEVEL = TypeConformanceLevel.RECURSIVE
"""Type conformance level for this stream.
Field types in the schema are used to convert record field values to the correct
type.
Available options are:
- ``TypeConformanceLevel.NONE``: No conformance is performed.
- ``TypeConformanceLevel.RECURSIVE``: Conformance is performed recursively through
all nested levels in the record.
- ``TypeConformanceLevel.ROOT_ONLY``: Conformance is performed only on the
root level.
"""
# Used for nested stream relationships
parent_stream_type: type[Stream] | None = None
"""Parent stream type for this stream. If this stream is a child stream, this should
be set to the parent stream class.
"""
ignore_parent_replication_key: bool = False
# Internal API cost aggregator
_sync_costs: dict[str, int] = {}
# Batch attributes
batch_size: int = 1000
"""Max number of records to write to each batch file."""
def __init__(
self,
tap: TapBaseClass,
schema: str | PathLike | dict[str, Any] | singer.Schema | None = None,
name: str | None = None,
) -> None:
"""Init tap stream.
Args:
tap: Singer Tap this stream belongs to.
schema: JSON schema for records in this stream.
name: Name of this stream.
Raises:
ValueError: TODO
FileNotFoundError: TODO
"""
if name:
self.name: str = name
if not self.name:
raise ValueError("Missing argument or class variable 'name'.")
self.logger: logging.Logger = tap.logger
self.metrics_logger = tap.metrics_logger
self.tap_name: str = tap.name
self._config: dict = dict(tap.config)
self._tap = tap
self._tap_state = tap.state
self._tap_input_catalog: singer.Catalog | None = None
self._stream_maps: list[StreamMap] | None = None
self.forced_replication_method: str | None = None
self._replication_key: str | None = None
self._primary_keys: list[str] | None = None
self._state_partitioning_keys: list[str] | None = None
self._schema_filepath: Path | None = None
self._metadata: singer.MetadataMapping | None = None
self._mask: singer.SelectionMask | None = None
self._schema: dict
self._is_state_flushed: bool = True
self.child_streams: list[Stream] = []
if schema:
if isinstance(schema, (PathLike, str)):
if not Path(schema).is_file():
raise FileNotFoundError(
f"Could not find schema file '{self.schema_filepath}'.",
)
self._schema_filepath = Path(schema)
elif isinstance(schema, dict):
self._schema = schema
elif isinstance(schema, singer.Schema):
self._schema = schema.to_dict()
else:
raise ValueError(
f"Unexpected type {type(schema).__name__} for arg 'schema'.",
)
if self.schema_filepath:
self._schema = json.loads(Path(self.schema_filepath).read_text())
if not self.schema:
raise ValueError(
f"Could not initialize schema for stream '{self.name}'. "
"A valid schema object or filepath was not provided.",
)
@property
def stream_maps(self) -> list[StreamMap]:
"""Get stream transformation maps.
The 0th item is the primary stream map. List should not be empty.
Returns:
A list of one or more map transformations for this stream.
"""
if self._stream_maps:
return self._stream_maps
if self._tap.mapper:
self._stream_maps = self._tap.mapper.stream_maps[self.name]
self.logger.info(
"Tap has custom mapper. Using %d provided map(s).",
len(self.stream_maps),
)
else:
self.logger.info(
"No custom mapper provided for '%s'. Using SameRecordTransform.",
self.name,
)
self._stream_maps = [
SameRecordTransform(
stream_alias=self.name,
raw_schema=self.schema,
key_properties=self.primary_keys,
flattening_options=get_flattening_options(self.config),
),
]
return self._stream_maps
@property
def is_timestamp_replication_key(self) -> bool:
"""Check is replication key is a timestamp.
Developers can override to `True` in order to force this value, although this
should not be required in most use cases since the type can generally be
accurately detected from the JSON Schema.
Returns:
True if the stream uses a timestamp-based replication key.
"""
if not self.replication_key:
return False
type_dict = self.schema.get("properties", {}).get(self.replication_key)
return is_datetime_type(type_dict)
def get_starting_replication_key_value(self, context: dict | None) -> Any | None:
"""Get starting replication key.
Will return the value of the stream's replication key when `--state` is passed.
If no prior state exists, will return `None`.
Developers should use this method to seed incremental processing for
non-datetime replication keys. For datetime and date replication keys, use
:meth:`~singer_sdk.Stream.get_starting_timestamp()`
Args:
context: Stream partition or context dictionary.
Returns:
Starting replication value.
"""
state = self.get_context_state(context)
return get_starting_replication_value(state)
def get_starting_timestamp(self, context: dict | None) -> datetime.datetime | None:
"""Get starting replication timestamp.
Will return the value of the stream's replication key when `--state` is passed.
If no state exists, will return `start_date` if set, or `None` if neither
the stream state nor `start_date` is set.
Developers should use this method to seed incremental processing for date
and datetime replication keys. For non-datetime replication keys, use
:meth:`~singer_sdk.Stream.get_starting_replication_key_value()`
Args:
context: Stream partition or context dictionary.
Returns:
`start_date` from config, or state value if using timestamp replication.
Raises:
ValueError: If the replication value is not a valid timestamp.
"""
value = self.get_starting_replication_key_value(context)
if value is None:
return None
if not self.is_timestamp_replication_key:
raise ValueError(
f"The replication key {self.replication_key} is not of timestamp type",
)
return cast(datetime.datetime, pendulum.parse(value))
@final
@property
def selected(self) -> bool:
"""Check if stream is selected.
Returns:
True if the stream is selected.
"""
return self.mask.get((), True)
@final
@property
def has_selected_descendents(self) -> bool:
"""Check descendents.
Returns:
True if any child streams are selected, recursively.
"""
for child in self.child_streams or []:
if child.selected or child.has_selected_descendents:
return True
return False
@final
@property
def descendent_streams(self) -> list[Stream]:
"""Get child streams.
Returns:
A list of all children, recursively.
"""
result: list[Stream] = list(self.child_streams) or []
for child in self.child_streams:
result += child.descendent_streams or []
return result
def _write_replication_key_signpost(
self,
context: dict | None,
value: datetime.datetime | str | int | float,
) -> None:
"""Write the signpost value, if available.
Args:
context: Stream partition or context dictionary.
value: TODO
Returns:
TODO
"""
if not value:
return
state = self.get_context_state(context)
write_replication_key_signpost(state, value)
def compare_start_date(self, value: str, start_date_value: str) -> str:
"""Compare a bookmark value to a start date and return the most recent value.
If the replication key is a datetime-formatted string, this method will parse
the value and compare it to the start date. Otherwise, the bookmark value is
returned.
If the tap uses a non-datetime replication key (e.g. an UNIX timestamp), the
developer is encouraged to override this method to provide custom logic for
comparing the bookmark value to the start date.
Args:
value: The replication key value.
start_date_value: The start date value from the config.
Returns:
The most recent value between the bookmark and start date.
"""
if self.is_timestamp_replication_key:
return max(value, start_date_value, key=pendulum.parse)
return value
def _write_starting_replication_value(self, context: dict | None) -> None:
"""Write the starting replication value, if available.
Args:
context: Stream partition or context dictionary.
"""
value = None
state = self.get_context_state(context)
if self.replication_key:
replication_key_value = state.get("replication_key_value")
if replication_key_value and self.replication_key == state.get(
"replication_key",
):
value = replication_key_value
# Use start_date if it is more recent than the replication_key state
start_date_value: str | None = self.config.get("start_date")
if start_date_value:
if not value:
value = start_date_value
else:
value = self.compare_start_date(value, start_date_value)
write_starting_replication_value(state, value)
def get_replication_key_signpost(
self,
context: dict | None, # noqa: ARG002
) -> datetime.datetime | Any | None:
"""Get the replication signpost.
For timestamp-based replication keys, this defaults to `utc_now()`. For
non-timestamp replication keys, default to `None`. For consistency in subsequent
calls, the value will be frozen (cached) at its initially called state, per
partition argument if applicable.
Developers may optionally override this method in advanced use cases such
as unsorted incremental streams or complex hierarchical stream scenarios.
For more info: :doc:`/implementation/state`
Args:
context: Stream partition or context dictionary.
Returns:
Max allowable bookmark value for this stream's replication key.
"""
if self.is_timestamp_replication_key:
return utc_now()
return None
@property
def schema_filepath(self) -> Path | None:
"""Get path to schema file.
Returns:
Path to a schema file for the stream or `None` if n/a.
"""
return self._schema_filepath
@property
def schema(self) -> dict:
"""Get schema.
Returns:
JSON Schema dictionary for this stream.
"""
return self._schema
@property
def primary_keys(self) -> list[str] | None:
"""Get primary keys.
Returns:
A list of primary key(s) for the stream.
"""
if not self._primary_keys:
return []
return self._primary_keys
@primary_keys.setter
def primary_keys(self, new_value: list[str]) -> None:
"""Set primary key(s) for the stream.
Args:
new_value: TODO
"""
self._primary_keys = new_value
@property
def state_partitioning_keys(self) -> list[str] | None:
"""Get state partition keys.
If not set, a default partitioning will be inherited from the stream's context.
If an empty list is set (`[]`), state will be held in one bookmark per stream.
Returns:
Partition keys for the stream state bookmarks.
"""
return self._state_partitioning_keys
@state_partitioning_keys.setter
def state_partitioning_keys(self, new_value: list[str] | None) -> None:
"""Set partition keys for the stream state bookmarks.
If not set, a default partitioning will be inherited from the stream's context.
If an empty list is set (`[]`), state will be held in one bookmark per stream.
Args:
new_value: the new list of keys
"""
self._state_partitioning_keys = new_value
@property
def replication_key(self) -> str | None:
"""Get replication key.
Returns:
Replication key for the stream.
"""
if not self._replication_key:
return None
return self._replication_key
@replication_key.setter
def replication_key(self, new_value: str) -> None:
"""Set replication key for the stream.
Args:
new_value: TODO
"""
self._replication_key = new_value
@property
def is_sorted(self) -> bool:
"""Expect stream to be sorted.
When `True`, incremental streams will attempt to resume if unexpectedly
interrupted.
Returns:
`True` if stream is sorted. Defaults to `False`.
"""
return False
@property
def check_sorted(self) -> bool:
"""Check if stream is sorted.
This setting enables additional checks which may trigger
`InvalidStreamSortException` if records are found which are unsorted.
Returns:
`True` if sorting is checked. Defaults to `True`.
"""
return True
@property
def metadata(self) -> singer.MetadataMapping:
"""Get stream metadata.
Metadata attributes (`inclusion`, `selected`, etc.) are part of the Singer spec.
Metadata from an input catalog will override standard metadata.
Returns:
A mapping from property breadcrumbs to metadata objects.
"""
if self._metadata is not None:
return self._metadata
if self._tap_input_catalog:
catalog_entry = self._tap_input_catalog.get_stream(self.tap_stream_id)
if catalog_entry:
self._metadata = catalog_entry.metadata
return self._metadata
self._metadata = singer.MetadataMapping.get_standard_metadata(
schema=self.schema,
replication_method=self.forced_replication_method,
key_properties=self.primary_keys or [],
valid_replication_keys=(
[self.replication_key] if self.replication_key else None
),
schema_name=None,
)
# If there's no input catalog, select all streams
if self._tap_input_catalog is None:
self._metadata.root.selected = True
return self._metadata
@property
def _singer_catalog_entry(self) -> singer.CatalogEntry:
"""Return catalog entry as specified by the Singer catalog spec.
Returns:
TODO
"""
return singer.CatalogEntry(
tap_stream_id=self.tap_stream_id,
stream=self.name,
schema=singer.Schema.from_dict(self.schema),
metadata=self.metadata,
key_properties=self.primary_keys or [],
replication_key=self.replication_key,
replication_method=self.replication_method,
is_view=None,
database=None,
table=None,
row_count=None,
stream_alias=None,
)
@property
def _singer_catalog(self) -> singer.Catalog:
"""TODO.
Returns:
TODO
"""
return singer.Catalog([(self.tap_stream_id, self._singer_catalog_entry)])
@property
def config(self) -> Mapping[str, Any]:
"""Get stream configuration.
Returns:
A frozen (read-only) config dictionary map.
"""
return MappingProxyType(self._config)
@property
def tap_stream_id(self) -> str:
"""Return a unique stream ID.
Default implementations will return `self.name` but this behavior may be
overridden if required by the developer.
Returns:
Unique stream ID.
"""
return self.name
@property
def replication_method(self) -> str:
"""Get replication method.
Returns:
Replication method to be used for this stream.
"""
if self.forced_replication_method:
return str(self.forced_replication_method)
if self.replication_key:
return REPLICATION_INCREMENTAL
return REPLICATION_FULL_TABLE
# State properties:
@property
def tap_state(self) -> dict:
"""Return a writeable state dict for the entire tap.
Note: This dictionary is shared (and writable) across all streams.
This method is internal to the SDK and should not need to be overridden.
Developers may access this property but this is not recommended except in
advanced use cases. Instead, developers should access the latest stream
replication key values using :meth:`~singer_sdk.Stream.get_starting_timestamp()`
for timestamp keys, or
:meth:`~singer_sdk.Stream.get_starting_replication_key_value()` for
non-timestamp keys.
Returns:
A writeable state dict for the entire tap.
"""
return self._tap_state
def get_context_state(self, context: dict | None) -> dict:
"""Return a writable state dict for the given context.
Gives a partitioned context state if applicable; else returns stream state.
A blank state will be created in none exists.
This method is internal to the SDK and should not need to be overridden.
Developers may access this property but this is not recommended except in
advanced use cases. Instead, developers should access the latest stream
replication key values using
:meth:`~singer_sdk.Stream.get_starting_timestamp()` for timestamp keys, or
:meth:`~singer_sdk.Stream.get_starting_replication_key_value()` for
non-timestamp keys.
Partition level may be overridden by
:attr:`~singer_sdk.Stream.state_partitioning_keys` if set.
Args:
context: Stream partition or context dictionary.
Returns:
A partitioned context state if applicable; else returns stream state.
A blank state will be created in none exists.
"""
state_partition_context = self._get_state_partition_context(context)
if state_partition_context:
return get_writeable_state_dict(
self.tap_state,
self.name,
state_partition_context=state_partition_context,
)
return self.stream_state
@property
def stream_state(self) -> dict:
"""Get writable state.
This method is internal to the SDK and should not need to be overridden.
Developers may access this property but this is not recommended except in
advanced use cases. Instead, developers should access the latest stream
replication key values using :meth:`~singer_sdk.Stream.get_starting_timestamp()`
for timestamp keys, or
:meth:`~singer_sdk.Stream.get_starting_replication_key_value()` for
non-timestamp keys.
A blank state entry will be created if one doesn't already exist.
Returns:
A writable state dict for this stream.
"""
return get_writeable_state_dict(self.tap_state, self.name)
# Partitions
@property
def partitions(self) -> list[dict] | None:
"""Get stream partitions.
Developers may override this property to provide a default partitions list.
By default, this method returns a list of any partitions which are already
defined in state, otherwise None.
Returns:
A list of partition key dicts (if applicable), otherwise `None`.
"""
result: list[dict] = []
for partition_state in (
get_state_partitions_list(self.tap_state, self.name) or []
):
result.append(partition_state["context"])
return result or None
# Private bookmarking methods
def _increment_stream_state(
self,
latest_record: dict[str, Any],
*,
context: dict | None = None,
) -> None:
"""Update state of stream or partition with data from the provided record.
Raises `InvalidStreamSortException` is `self.is_sorted = True` and unsorted data
is detected.
Note: The default implementation does not advance any bookmarks unless
`self.replication_method == 'INCREMENTAL'.
Args:
latest_record: TODO
context: Stream partition or context dictionary.
Raises:
ValueError: TODO
"""
# This also creates a state entry if one does not yet exist:
state_dict = self.get_context_state(context)
# Advance state bookmark values if applicable
if latest_record and self.replication_method == REPLICATION_INCREMENTAL:
if not self.replication_key:
raise ValueError(
f"Could not detect replication key for '{self.name}' stream"
f"(replication method={self.replication_method})",
)
treat_as_sorted = self.is_sorted
if not treat_as_sorted and self.state_partitioning_keys is not None:
# Streams with custom state partitioning are not resumable.
treat_as_sorted = False
increment_state(
state_dict,
replication_key=self.replication_key,
latest_record=latest_record,
is_sorted=treat_as_sorted,
check_sorted=self.check_sorted,
)
# Private message authoring methods:
def _write_state_message(self) -> None:
"""Write out a STATE message with the latest state."""
if not self._is_state_flushed:
singer.write_message(singer.StateMessage(value=self.tap_state))
self._is_state_flushed = True
def _generate_schema_messages(self) -> Generator[singer.SchemaMessage, None, None]:
"""Generate schema messages from stream maps.
Yields:
Schema message objects.
"""
bookmark_keys = [self.replication_key] if self.replication_key else None
for stream_map in self.stream_maps:
if isinstance(stream_map, RemoveRecordTransform):
# Don't emit schema if the stream's records are all ignored.
continue
schema_message = singer.SchemaMessage(
stream_map.stream_alias,
stream_map.transformed_schema,
stream_map.transformed_key_properties,
bookmark_keys,
)
yield schema_message
def _write_schema_message(self) -> None:
"""Write out a SCHEMA message with the stream schema."""
for schema_message in self._generate_schema_messages():
singer.write_message(schema_message)
@property
def mask(self) -> singer.SelectionMask:
"""Get a boolean mask for stream and property selection.
Returns:
A mapping of breadcrumbs to boolean values, representing stream and field
selection.
"""
if self._mask is None:
self._mask = self.metadata.resolve_selection()
return self._mask
def _generate_record_messages(
self,
record: dict,
) -> Generator[singer.RecordMessage, None, None]:
"""Write out a RECORD message.
Args:
record: A single stream record.
Yields:
Record message objects.
"""
pop_deselected_record_properties(record, self.schema, self.mask, self.logger)
record = conform_record_data_types(
stream_name=self.name,
record=record,
schema=self.schema,
level=self.TYPE_CONFORMANCE_LEVEL,
logger=self.logger,
)
for stream_map in self.stream_maps:
mapped_record = stream_map.transform(record)
# Emit record if not filtered
if mapped_record is not None:
record_message = singer.RecordMessage(
stream=stream_map.stream_alias,
record=mapped_record,
version=None,
time_extracted=utc_now(),
)
yield record_message
def _write_record_message(self, record: dict) -> None:
"""Write out a RECORD message.
Args:
record: A single stream record.
"""
for record_message in self._generate_record_messages(record):
singer.write_message(record_message)
self._is_state_flushed = False
def _write_batch_message(
self,
encoding: BaseBatchFileEncoding,
manifest: list[str],
) -> None:
"""Write out a BATCH message.
Args:
encoding: The encoding to use for the batch.
manifest: A list of filenames for the batch.
"""
singer.write_message(
SDKBatchMessage(
stream=self.name,
encoding=encoding,
manifest=manifest,
),
)
self._is_state_flushed = False
def _log_metric(self, point: metrics.Point) -> None:
"""Log a single measurement.
Args:
point: A single measurement value.
"""
metrics.log(self.metrics_logger, point=point)
def log_sync_costs(self) -> None:
"""Log a summary of Sync costs.
The costs are calculated via `calculate_sync_cost`.
This method can be overridden to log results in a custom
format. It is only called once at the end of the life of
the stream.
"""
if len(self._sync_costs) > 0:
msg = f"Total Sync costs for stream {self.name}: {self._sync_costs}"
self.logger.info(msg)
def _check_max_record_limit(self, current_record_index: int) -> None:
"""Raise an exception if dry run record limit exceeded.
Raised if we find dry run record limit exceeded,
aka `current_record_index > self.ABORT_AT_RECORD_COUNT - 1`.
Args:
current_record_index: The zero-based index of the current record.
Raises:
AbortedSyncFailedException: Raised if sync could not reach a valid state.
AbortedSyncPausedException: Raised if sync was able to be transitioned into
a valid state without data loss or corruption.
"""
if (
self.ABORT_AT_RECORD_COUNT is not None
and current_record_index > self.ABORT_AT_RECORD_COUNT - 1
):
try:
self._abort_sync(
abort_reason=MaxRecordsLimitException(
"Stream prematurely aborted due to the stream's max dry run "
f"record limit ({self.ABORT_AT_RECORD_COUNT}) being reached.",
),
)
except (AbortedSyncFailedException, AbortedSyncPausedException) as ex:
raise ex
def _abort_sync(self, abort_reason: Exception) -> None:
"""Handle a sync operation being aborted.
This method will attempt to close out the sync operation as gracefully as
possible - for instance, if a max runtime or record count is reached. This can
also be called for `SIGTERM` and KeyboardInterrupt events.
If a state message is pending, we will attempt to write it to STDOUT before
shutting down.
If the stream can reach a valid resumable state, then we will raise
`AbortedSyncPausedException`. Otherwise, `AbortedSyncFailedException` will be
raised.
Args:
abort_reason: The exception that triggered the sync to be aborted.
Raises:
AbortedSyncFailedException: Raised if sync could not reach a valid state.
AbortedSyncPausedException: Raised if sync was able to be transitioned into
a valid state without data loss or corruption.
"""
self._write_state_message() # Write out state message if pending.
if self.replication_method == "FULL_TABLE":
raise AbortedSyncFailedException(
"Sync operation aborted for stream in 'FULL_TABLE' replication mode.",
) from abort_reason
if is_state_non_resumable(self.stream_state):
raise AbortedSyncFailedException(
"Sync operation aborted and state is not in a resumable state.",
) from abort_reason
# Else, the sync operation can be assumed to be in a valid resumable state.
raise AbortedSyncPausedException from abort_reason
# Handle interim stream state
def reset_state_progress_markers(self, state: dict | None = None) -> None:
"""Reset progress markers. If all=True, all state contexts will be set.
This method is internal to the SDK and should not need to be overridden.
Args:
state: State object to promote progress markers with.
"""
if state is None or state == {}:
context: dict | None
for context in self.partitions or [{}]:
state = self.get_context_state(context or None)
reset_state_progress_markers(state)
return
reset_state_progress_markers(state)
def finalize_state_progress_markers(self, state: dict | None = None) -> None:
"""Reset progress markers. If all=True, all state contexts will be finalized.
This method is internal to the SDK and should not need to be overridden.
If all=True and the stream has children, child streams will also be finalized.
Args:
state: State object to promote progress markers with.
"""
if state is None or state == {}:
for child_stream in self.child_streams or []:
child_stream.finalize_state_progress_markers()
context: dict | None
for context in self.partitions or [{}]:
state = self.get_context_state(context or None)
finalize_state_progress_markers(state)
return
finalize_state_progress_markers(state)
# Private sync methods:
def _process_record(
self,
record: dict,
child_context: dict | None = None,
partition_context: dict | None = None,
) -> None:
"""Process a record.
Args:
record: The record to process.
child_context: The child context.
partition_context: The partition context.
"""
partition_context = partition_context or {}
child_context = copy.copy(
self.get_child_context(record=record, context=child_context),
)
for key, val in partition_context.items():
# Add state context to records if not already present
if key not in record:
record[key] = val
# Sync children, except when primary mapper filters out the record
if self.stream_maps[0].get_filter_result(record):
self._sync_children(child_context)
def _sync_records(
self,
context: dict | None = None,
*,
write_messages: bool = True,
) -> Generator[dict, Any, Any]:
"""Sync records, emitting RECORD and STATE messages.
Args:
context: Stream partition or context dictionary.
write_messages: Whether to write Singer messages to stdout.
Raises:
InvalidStreamSortException: Raised if sorting errors are found while
syncing the records.
Yields:
Each record from the source.
"""
# Initialize metrics
record_counter = metrics.record_counter(self.name)
timer = metrics.sync_timer(self.name)
record_index = 0
context_element: dict | None
context_list: list[dict] | None
context_list = [context] if context is not None else self.partitions
selected = self.selected
with record_counter, timer:
for context_element in context_list or [{}]:
record_counter.context = context_element
timer.context = context_element
partition_record_index = 0
current_context = context_element or None
state = self.get_context_state(current_context)
state_partition_context = self._get_state_partition_context(
current_context,
)
self._write_starting_replication_value(current_context)
child_context: dict | None = (
None if current_context is None else copy.copy(current_context)
)
for record_result in self.get_records(current_context):
self._check_max_record_limit(current_record_index=record_index)
if isinstance(record_result, tuple):
# Tuple items should be the record and the child context
record, child_context = record_result
else:
record = record_result
try:
self._process_record(
record,
child_context=child_context,
partition_context=state_partition_context,
)
except InvalidStreamSortException as ex:
log_sort_error(
log_fn=self.logger.error,
ex=ex,
record_count=record_index + 1,
partition_record_count=partition_record_index + 1,
current_context=current_context,
state_partition_context=state_partition_context,
stream_name=self.name,
)
raise ex
if selected:
if write_messages:
self._write_record_message(record)
self._increment_stream_state(record, context=current_context)
if (
record_index + 1
) % self.STATE_MSG_FREQUENCY == 0 and write_messages:
self._write_state_message()
record_counter.increment()
yield record
record_index += 1
partition_record_index += 1
if current_context == state_partition_context:
# Finalize per-partition state only if 1:1 with context
finalize_state_progress_markers(state)
if not context:
# Finalize total stream only if we have the full full context.
# Otherwise will be finalized by tap at end of sync.
finalize_state_progress_markers(self.stream_state)
if write_messages:
# Reset interim bookmarks before emitting final STATE message:
self._write_state_message()
def _sync_batches(
self,
batch_config: BatchConfig,
context: dict | None = None,
) -> None:
"""Sync batches, emitting BATCH messages.
Args:
batch_config: The batch configuration.
context: Stream partition or context dictionary.
"""
with metrics.batch_counter(self.name, context=context) as counter:
for encoding, manifest in self.get_batches(batch_config, context):
counter.increment()
self._write_batch_message(encoding=encoding, manifest=manifest)
self._write_state_message()
# Public methods ("final", not recommended to be overridden)
@final
def sync(self, context: dict | None = None) -> None:
"""Sync this stream.
This method is internal to the SDK and should not need to be overridden.
Args:
context: Stream partition or context dictionary.
"""
msg = f"Beginning {self.replication_method.lower()} sync of '{self.name}'"
if context:
msg += f" with context: {context}"
self.logger.info("%s...", msg)
# Use a replication signpost, if available
signpost = self.get_replication_key_signpost(context)
if signpost:
self._write_replication_key_signpost(context, signpost)
# Send a SCHEMA message to the downstream target:
if self.selected:
self._write_schema_message()
batch_config = self.get_batch_config(self.config)
if batch_config:
self._sync_batches(batch_config, context=context)
else:
# Sync the records themselves:
for _ in self._sync_records(context=context):
pass
def _sync_children(self, child_context: dict | None) -> None:
if child_context is None:
self.logger.warning(
"Context for child streams of '%s' is null, "
"skipping sync of any child streams",
self.name,
)
return
for child_stream in self.child_streams:
if child_stream.selected or child_stream.has_selected_descendents:
child_stream.sync(context=child_context)
# Overridable Methods
def apply_catalog(self, catalog: singer.Catalog) -> None:
"""Apply a catalog dict, updating any settings overridden within the catalog.
Developers may override this method in order to introduce advanced catalog
parsing, or to explicitly fail on advanced catalog customizations which
are not supported by the tap.
Args:
catalog: Catalog object passed to the tap. Defines schema, primary and
replication keys, as well as selection metadata.
"""
self._tap_input_catalog = catalog
catalog_entry = catalog.get_stream(self.name)
if catalog_entry:
self.primary_keys = catalog_entry.key_properties
self.replication_key = catalog_entry.replication_key
if catalog_entry.replication_method:
self.forced_replication_method = catalog_entry.replication_method
def _get_state_partition_context(self, context: dict | None) -> dict | None:
"""Override state handling if Stream.state_partitioning_keys is specified.
Args:
context: Stream partition or context dictionary.
Returns:
TODO
"""
if context is None:
return None
if self.state_partitioning_keys is None:
return context
return {k: v for k, v in context.items() if k in self.state_partitioning_keys}
def get_child_context(self, record: dict, context: dict | None) -> dict | None:
"""Return a child context object from the record and optional provided context.
By default, will return context if provided and otherwise the record dict.
Developers may override this behavior to send specific information to child
streams for context.
Return ``None`` if no child streams should be synced, for example if the
parent record was deleted and the child records can no longer be synced.
Args:
record: Individual record in the stream.
context: Stream partition or context dictionary.
Returns:
A dictionary with context values for a child stream, or None if no child
streams should be synced.
Raises:
NotImplementedError: If the stream has children but this method is not
overriden.
"""
if context is None:
for child_stream in self.child_streams:
if child_stream.state_partitioning_keys is None:
parent_type = type(self).__name__
child_type = type(child_stream).__name__
raise NotImplementedError(
"No child context behavior was defined between parent stream "
f"'{self.name}' and child stream '{child_stream.name}'."
"The parent stream must define "
f"`{parent_type}.get_child_context()` and/or the child stream "
f"must define `{child_type}.state_partitioning_keys`.",
)
return context or record
# Abstract Methods
@abc.abstractmethod
def get_records(
self,
context: dict | None,
) -> Iterable[dict | tuple[dict, dict | None]]:
"""Abstract record generator function. Must be overridden by the child class.
Each record emitted should be a dictionary of property names to their values.
Returns either a record dict or a tuple: (record_dict, child_context)
A method which should retrieve data from the source and return records
incrementally using the python `yield` operator.
Only custom stream types need to define this method. REST and GraphQL streams
should instead use the class-specific methods for REST or GraphQL, respectively.
This method takes an optional `context` argument, which can be safely ignored
unless the stream is a child stream or requires partitioning.
More info: :doc:`/partitioning`.
Parent streams can optionally return a tuple, in which
case the second item in the tuple being a `child_context` dictionary for the
stream's `context`.
If the child context object in the tuple is ``None``, the child streams will
be skipped. This is useful for cases where the parent record was deleted and
the child records can no longer be synced.
More info: :doc:`/parent_streams`
Args:
context: Stream partition or context dictionary.
"""
def get_batch_config(self, config: Mapping) -> BatchConfig | None:
"""Return the batch config for this stream.
Args:
config: Tap configuration dictionary.
Returns:
Batch config for this stream.
"""
raw = config.get("batch_config")
return BatchConfig.from_dict(raw) if raw else None
def get_batches(
self,
batch_config: BatchConfig,
context: dict | None = None,
) -> Iterable[tuple[BaseBatchFileEncoding, list[str]]]:
"""Batch generator function.
Developers are encouraged to override this method to customize batching
behavior for databases, bulk APIs, etc.
Args:
batch_config: Batch config for this stream.
context: Stream partition or context dictionary.
Yields:
A tuple of (encoding, manifest) for each batch.
"""
sync_id = f"{self.tap_name}--{self.name}-{uuid4()}"
prefix = batch_config.storage.prefix or ""
for i, chunk in enumerate(
lazy_chunked_generator(
self._sync_records(context, write_messages=False),
self.batch_size,
),
start=1,
):
filename = f"{prefix}{sync_id}-{i}.json.gz"
with batch_config.storage.fs() as fs:
# TODO: Determine compression from config.
with fs.open(filename, "wb") as f, gzip.GzipFile(
fileobj=f,
mode="wb",
) as gz:
gz.writelines(
(json.dumps(record) + "\n").encode() for record in chunk
)
file_url = fs.geturl(filename)
yield batch_config.encoding, [file_url]
def post_process(
self,
row: dict,
context: dict | None = None, # noqa: ARG002
) -> dict | None:
"""As needed, append or transform raw data to match expected structure.
Optional. This method gives developers an opportunity to "clean up" the results
prior to returning records to the downstream tap - for instance: cleaning,
renaming, or appending properties to the raw record result returned from the
API.
Developers may also return `None` from this method to filter out
invalid or not-applicable records from the stream.
Args:
row: Individual record in the stream.
context: Stream partition or context dictionary.
Returns:
The resulting record dict, or `None` if the record should be excluded.
"""
return row
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/sql.py | """Base class for SQL-type streams."""
from __future__ import annotations
import abc
from typing import TYPE_CHECKING, Any, Iterable, cast
import sqlalchemy
import singer_sdk.helpers._catalog as catalog
from singer_sdk._singerlib import CatalogEntry, MetadataMapping
from singer_sdk.connectors import SQLConnector
from singer_sdk.streams.core import Stream
if TYPE_CHECKING:
from singer_sdk.plugin_base import PluginBase as TapBaseClass
class SQLStream(Stream, metaclass=abc.ABCMeta):
"""Base class for SQLAlchemy-based streams."""
connector_class = SQLConnector
def __init__(
self,
tap: TapBaseClass,
catalog_entry: dict,
connector: SQLConnector | None = None,
) -> None:
"""Initialize the database stream.
If `connector` is omitted, a new connector will be created.
Args:
tap: The parent tap object.
catalog_entry: Catalog entry dict.
connector: Optional connector to reuse.
"""
self._connector: SQLConnector
self._connector = connector or self.connector_class(dict(tap.config))
self.catalog_entry = catalog_entry
super().__init__(
tap=tap,
schema=self.schema,
name=self.tap_stream_id,
)
@property
def _singer_catalog_entry(self) -> CatalogEntry:
"""Return catalog entry as specified by the Singer catalog spec.
Returns:
A CatalogEntry object.
"""
return cast(CatalogEntry, CatalogEntry.from_dict(self.catalog_entry))
@property
def connector(self) -> SQLConnector:
"""Return a connector object.
Returns:
The connector object.
"""
return self._connector
@property
def metadata(self) -> MetadataMapping:
"""Return the Singer metadata.
Metadata from an input catalog will override standard metadata.
Returns:
Metadata object as specified in the Singer spec.
"""
return self._singer_catalog_entry.metadata
@property
def schema(self) -> dict:
"""Return metadata object (dict) as specified in the Singer spec.
Metadata from an input catalog will override standard metadata.
Returns:
The schema object.
"""
return cast(dict, self._singer_catalog_entry.schema.to_dict())
@property
def tap_stream_id(self) -> str:
"""Return the unique ID used by the tap to identify this stream.
Generally, this is the same value as in `Stream.name`.
In rare cases, such as for database types with multi-part names,
this may be slightly different from `Stream.name`.
Returns:
The unique tap stream ID as a string.
"""
return self._singer_catalog_entry.tap_stream_id
@property
def primary_keys(self) -> list[str] | None:
"""Get primary keys from the catalog entry definition.
Returns:
A list of primary key(s) for the stream.
"""
return self._singer_catalog_entry.metadata.root.table_key_properties or []
@primary_keys.setter
def primary_keys(self, new_value: list[str]) -> None:
"""Set or reset the primary key(s) in the stream's catalog entry.
Args:
new_value: a list of one or more column names
"""
self._singer_catalog_entry.metadata.root.table_key_properties = new_value
@property
def fully_qualified_name(self) -> str:
"""Generate the fully qualified version of the table name.
Raises:
ValueError: If table_name is not able to be detected.
Returns:
The fully qualified name.
"""
catalog_entry = self._singer_catalog_entry
if not catalog_entry.table:
raise ValueError(
f"Missing table name in catalog entry: {catalog_entry.to_dict()}",
)
return self.connector.get_fully_qualified_name(
table_name=catalog_entry.table,
schema_name=catalog_entry.metadata.root.schema_name,
db_name=catalog_entry.database,
)
def get_selected_schema(self) -> dict:
"""Return a copy of the Stream JSON schema, dropping any fields not selected.
Returns:
A dictionary containing a copy of the Stream JSON schema, filtered
to any selection criteria.
"""
return catalog.get_selected_schema(
stream_name=self.name,
schema=self.schema,
mask=self.mask,
logger=self.logger,
)
# Get records from stream
def get_records(self, context: dict | None) -> Iterable[dict[str, Any]]:
"""Return a generator of record-type dictionary objects.
If the stream has a replication_key value defined, records will be sorted by the
incremental key. If the stream also has an available starting bookmark, the
records will be filtered for values greater than or equal to the bookmark value.
Args:
context: If partition context is provided, will read specifically from this
data slice.
Yields:
One dict per record.
Raises:
NotImplementedError: If partition is passed in context and the stream does
not support partitioning.
"""
if context:
raise NotImplementedError(
f"Stream '{self.name}' does not support partitioning.",
)
selected_column_names = self.get_selected_schema()["properties"].keys()
table = self.connector.get_table(
full_table_name=self.fully_qualified_name,
column_names=selected_column_names,
)
query = table.select()
if self.replication_key:
replication_key_col = table.columns[self.replication_key]
query = query.order_by(replication_key_col)
start_val = self.get_starting_replication_key_value(context)
if start_val:
query = query.where(
sqlalchemy.text(":replication_key >= :start_val").bindparams(
replication_key=replication_key_col,
start_val=start_val,
),
)
if self.ABORT_AT_RECORD_COUNT is not None:
# Limit record count to one greater than the abort threshold. This ensures
# `MaxRecordsLimitException` exception is properly raised by caller
# `Stream._sync_records()` if more records are available than can be
# processed.
query = query.limit(self.ABORT_AT_RECORD_COUNT + 1)
with self.connector._connect() as conn:
for record in conn.execute(query):
yield dict(record._mapping)
__all__ = ["SQLStream", "SQLConnector"]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/sinks/batch.py | """Sink classes load data to a target."""
from __future__ import annotations
import abc
import datetime
import uuid
from singer_sdk.sinks.core import Sink
class BatchSink(Sink):
"""Base class for batched record writers."""
def _get_context(self, record: dict) -> dict: # noqa: ARG002
"""Return a batch context. If no batch is active, return a new batch context.
The SDK-generated context will contain `batch_id` (GUID string) and
`batch_start_time` (datetime).
NOTE: Future versions of the SDK may expand the available context attributes.
Args:
record: Individual record in the stream.
Returns:
TODO
"""
if self._pending_batch is None:
new_context = {
"batch_id": str(uuid.uuid4()),
"batch_start_time": datetime.datetime.now(tz=datetime.timezone.utc),
}
self.start_batch(new_context)
self._pending_batch = new_context
return self._pending_batch
def start_batch(self, context: dict) -> None:
"""Start a new batch with the given context.
The SDK-generated context will contain `batch_id` (GUID string) and
`batch_start_time` (datetime).
Developers may optionally override this method to add custom markers to the
`context` dict and/or to initialize batch resources - such as initializing a
local temp file to hold batch records before uploading.
Args:
context: Stream partition or context dictionary.
"""
def process_record(self, record: dict, context: dict) -> None:
"""Load the latest record from the stream.
Developers may either load to the `context` dict for staging (the
default behavior for Batch types), or permanently write out to the target.
If this method is not overridden, the default implementation will create a
`context["records"]` list and append all records for processing during
:meth:`~singer_sdk.BatchSink.process_batch()`.
If duplicates are merged, these can be tracked via
:meth:`~singer_sdk.Sink.tally_duplicate_merged()`.
Args:
record: Individual record in the stream.
context: Stream partition or context dictionary.
"""
if "records" not in context:
context["records"] = []
context["records"].append(record)
@abc.abstractmethod
def process_batch(self, context: dict) -> None:
"""Process a batch with the given batch context.
This method must be overridden.
If :meth:`~singer_sdk.BatchSink.process_record()` is not overridden,
the `context["records"]` list will contain all records from the given batch
context.
If duplicates are merged, these can be tracked via
:meth:`~singer_sdk.Sink.tally_duplicate_merged()`.
Args:
context: Stream partition or context dictionary.
"""
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/sinks/__init__.py | """Sink classes for targets."""
from __future__ import annotations
from singer_sdk.sinks.batch import BatchSink
from singer_sdk.sinks.core import Sink
from singer_sdk.sinks.record import RecordSink
from singer_sdk.sinks.sql import SQLSink
__all__ = ["BatchSink", "RecordSink", "Sink", "SQLSink"]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/sinks/core.py | """Sink classes load data to a target."""
from __future__ import annotations
import abc
import datetime
import json
import time
from gzip import GzipFile
from gzip import open as gzip_open
from types import MappingProxyType
from typing import IO, TYPE_CHECKING, Any, Mapping, Sequence
from dateutil import parser
from jsonschema import Draft7Validator, FormatChecker
from singer_sdk.helpers._batch import (
BaseBatchFileEncoding,
BatchConfig,
BatchFileFormat,
StorageTarget,
)
from singer_sdk.helpers._compat import final
from singer_sdk.helpers._typing import (
DatetimeErrorTreatmentEnum,
get_datelike_property_type,
handle_invalid_timestamp_in_record,
)
if TYPE_CHECKING:
from logging import Logger
from singer_sdk.plugin_base import PluginBase
JSONSchemaValidator = Draft7Validator
class Sink(metaclass=abc.ABCMeta):
"""Abstract base class for target sinks."""
# max timestamp/datetime supported, used to reset invalid dates
logger: Logger
MAX_SIZE_DEFAULT = 10000
def __init__(
self,
target: PluginBase,
stream_name: str,
schema: dict,
key_properties: list[str] | None,
) -> None:
"""Initialize target sink.
Args:
target: Target instance.
stream_name: Name of the stream to sink.
schema: Schema of the stream to sink.
key_properties: Primary key of the stream to sink.
"""
self.logger = target.logger
self._config = dict(target.config)
self._pending_batch: dict | None = None
self.stream_name = stream_name
self.logger.info(
"Initializing target sink for stream '%s'...",
stream_name,
)
self.schema = schema
if self.include_sdc_metadata_properties:
self._add_sdc_metadata_to_schema()
else:
self._remove_sdc_metadata_from_schema()
self.records_to_drain: list[dict] | Any = []
self._context_draining: dict | None = None
self.latest_state: dict | None = None
self._draining_state: dict | None = None
self.drained_state: dict | None = None
self._key_properties = key_properties or []
# Tally counters
self._total_records_written: int = 0
self._total_dupe_records_merged: int = 0
self._total_records_read: int = 0
self._batch_records_read: int = 0
self._batch_dupe_records_merged: int = 0
self._validator = Draft7Validator(schema, format_checker=FormatChecker())
def _get_context(self, record: dict) -> dict: # noqa: ARG002
"""Return an empty dictionary by default.
NOTE: Future versions of the SDK may expand the available context attributes.
Args:
record: Individual record in the stream.
Returns:
TODO
"""
return {}
# Size properties
@property
def max_size(self) -> int:
"""Get max batch size.
Returns:
Max number of records to batch before `is_full=True`
"""
return self.MAX_SIZE_DEFAULT
@property
def current_size(self) -> int:
"""Get current batch size.
Returns:
The number of records to drain.
"""
return self._batch_records_read
@property
def is_full(self) -> bool:
"""Check against size limit.
Returns:
True if the sink needs to be drained.
"""
return self.current_size >= self.max_size
# Tally methods
@final
def tally_record_read(self, count: int = 1) -> None:
"""Increment the records read tally.
This method is called automatically by the SDK when records are read.
Args:
count: Number to increase record count by.
"""
self._total_records_read += count
self._batch_records_read += count
@final
def tally_record_written(self, count: int = 1) -> None:
"""Increment the records written tally.
This method is called automatically by the SDK after
:meth:`~singer_sdk.Sink.process_record()`
or :meth:`~singer_sdk.Sink.process_batch()`.
Args:
count: Number to increase record count by.
"""
self._total_records_written += count
@final
def tally_duplicate_merged(self, count: int = 1) -> None:
"""Increment the records merged tally.
This method should be called directly by the Target implementation.
Args:
count: Number to increase record count by.
"""
self._total_dupe_records_merged += count
self._batch_dupe_records_merged += count
# Properties
@property
def config(self) -> Mapping[str, Any]:
"""Get plugin configuration.
Returns:
A frozen (read-only) config dictionary map.
"""
return MappingProxyType(self._config)
@property
def batch_config(self) -> BatchConfig | None:
"""Get batch configuration.
Returns:
A frozen (read-only) config dictionary map.
"""
raw = self.config.get("batch_config")
return BatchConfig.from_dict(raw) if raw else None
@property
def include_sdc_metadata_properties(self) -> bool:
"""Check if metadata columns should be added.
Returns:
True if metadata columns should be added.
"""
return self.config.get("add_record_metadata", False)
@property
def datetime_error_treatment(self) -> DatetimeErrorTreatmentEnum:
"""Return a treatment to use for datetime parse errors: ERROR. MAX, or NULL.
Returns:
TODO
"""
return DatetimeErrorTreatmentEnum.ERROR
@property
def key_properties(self) -> list[str]:
"""Return key properties.
Returns:
A list of stream key properties.
"""
return self._key_properties
# Record processing
def _add_sdc_metadata_to_record(
self,
record: dict,
message: dict,
context: dict,
) -> None:
"""Populate metadata _sdc columns from incoming record message.
Record metadata specs documented at:
https://sdk.meltano.com/en/latest/implementation/record_metadata.md
Args:
record: Individual record in the stream.
message: TODO
context: Stream partition or context dictionary.
"""
record["_sdc_extracted_at"] = message.get("time_extracted")
record["_sdc_received_at"] = datetime.datetime.now(
tz=datetime.timezone.utc,
).isoformat()
record["_sdc_batched_at"] = (
context.get("batch_start_time", None)
or datetime.datetime.now(tz=datetime.timezone.utc)
).isoformat()
record["_sdc_deleted_at"] = record.get("_sdc_deleted_at")
record["_sdc_sequence"] = int(round(time.time() * 1000))
record["_sdc_table_version"] = message.get("version")
def _add_sdc_metadata_to_schema(self) -> None:
"""Add _sdc metadata columns.
Record metadata specs documented at:
https://sdk.meltano.com/en/latest/implementation/record_metadata.md
"""
properties_dict = self.schema["properties"]
for col in {
"_sdc_extracted_at",
"_sdc_received_at",
"_sdc_batched_at",
"_sdc_deleted_at",
}:
properties_dict[col] = {
"type": ["null", "string"],
"format": "date-time",
}
for col in {"_sdc_sequence", "_sdc_table_version"}:
properties_dict[col] = {"type": ["null", "integer"]}
def _remove_sdc_metadata_from_schema(self) -> None:
"""Remove _sdc metadata columns.
Record metadata specs documented at:
https://sdk.meltano.com/en/latest/implementation/record_metadata.md
"""
properties_dict = self.schema["properties"]
for col in {
"_sdc_extracted_at",
"_sdc_received_at",
"_sdc_batched_at",
"_sdc_deleted_at",
"_sdc_sequence",
"_sdc_table_version",
}:
properties_dict.pop(col, None)
def _remove_sdc_metadata_from_record(self, record: dict) -> None:
"""Remove metadata _sdc columns from incoming record message.
Record metadata specs documented at:
https://sdk.meltano.com/en/latest/implementation/record_metadata.md
Args:
record: Individual record in the stream.
"""
record.pop("_sdc_extracted_at", None)
record.pop("_sdc_received_at", None)
record.pop("_sdc_batched_at", None)
record.pop("_sdc_deleted_at", None)
record.pop("_sdc_sequence", None)
record.pop("_sdc_table_version", None)
# Record validation
def _validate_and_parse(self, record: dict) -> dict:
"""Validate or repair the record, parsing to python-native types as needed.
Args:
record: Individual record in the stream.
Returns:
TODO
"""
self._validator.validate(record)
self._parse_timestamps_in_record(
record=record,
schema=self.schema,
treatment=self.datetime_error_treatment,
)
return record
def _parse_timestamps_in_record(
self,
record: dict,
schema: dict,
treatment: DatetimeErrorTreatmentEnum,
) -> None:
"""Parse strings to datetime.datetime values, repairing or erroring on failure.
Attempts to parse every field that is of type date/datetime/time. If its value
is out of range, repair logic will be driven by the `treatment` input arg:
MAX, NULL, or ERROR.
Args:
record: Individual record in the stream.
schema: TODO
treatment: TODO
"""
for key in record:
datelike_type = get_datelike_property_type(schema["properties"][key])
if datelike_type:
date_val = record[key]
try:
if record[key] is not None:
date_val = parser.parse(date_val)
except parser.ParserError as ex:
date_val = handle_invalid_timestamp_in_record(
record,
[key],
date_val,
datelike_type,
ex,
treatment,
self.logger,
)
record[key] = date_val
def _after_process_record(self, context: dict) -> None:
"""Perform post-processing and record keeping. Internal hook.
Args:
context: Stream partition or context dictionary.
"""
self.logger.debug("Processed record: %s", context)
# SDK developer overrides:
def preprocess_record(self, record: dict, context: dict) -> dict: # noqa: ARG002
"""Process incoming record and return a modified result.
Args:
record: Individual record in the stream.
context: Stream partition or context dictionary.
Returns:
A new, processed record.
"""
return record
@abc.abstractmethod
def process_record(self, record: dict, context: dict) -> None:
"""Load the latest record from the stream.
Implementations may either load to the `context` dict for staging (the
default behavior for Batch types), or permanently write out to the target.
Anything appended to :attr:`singer_sdk.Sink.records_to_drain` will be
automatically passed to
:meth:`~singer_sdk.Sink.process_batch()` to be permanently written during the
process_batch operation.
If duplicates are merged, these can be tracked via
:meth:`~singer_sdk.Sink.tally_duplicate_merged()`.
Args:
record: Individual record in the stream.
context: Stream partition or context dictionary.
"""
def start_drain(self) -> dict:
"""Set and return `self._context_draining`.
Returns:
TODO
"""
self._context_draining = self._pending_batch or {}
self._pending_batch = None
return self._context_draining
@abc.abstractmethod
def process_batch(self, context: dict) -> None:
"""Process all records per the batch's `context` dictionary.
If duplicates are merged, these can optionally be tracked via
`tally_duplicate_merged()`.
Args:
context: Stream partition or context dictionary.
Raises:
NotImplementedError: If derived class does not override this method.
"""
raise NotImplementedError("No handling exists for process_batch().")
def mark_drained(self) -> None:
"""Reset `records_to_drain` and any other tracking."""
self.drained_state = self._draining_state
self._draining_state = None
self._context_draining = None
if self._batch_records_read:
self.tally_record_written(
self._batch_records_read - self._batch_dupe_records_merged,
)
self._batch_records_read = 0
def activate_version(self, new_version: int) -> None:
"""Bump the active version of the target table.
This method should be overridden by developers if a custom implementation is
expected.
Args:
new_version: The version number to activate.
"""
_ = new_version
self.logger.warning(
"ACTIVATE_VERSION message received but not implemented by this target. "
"Ignoring.",
)
def setup(self) -> None:
"""Perform any setup actions at the beginning of a Stream.
Setup is executed once per Sink instance, after instantiation. If a Schema
change is detected, a new Sink is instantiated and this method is called again.
"""
self.logger.info("Setting up %s", self.stream_name)
def clean_up(self) -> None:
"""Perform any clean up actions required at end of a stream.
Implementations should ensure that clean up does not affect resources
that may be in use from other instances of the same sink. Stream name alone
should not be relied on, it's recommended to use a uuid as well.
"""
self.logger.info("Cleaning up %s", self.stream_name)
def process_batch_files(
self,
encoding: BaseBatchFileEncoding,
files: Sequence[str],
) -> None:
"""Process a batch file with the given batch context.
Args:
encoding: The batch file encoding.
files: The batch files to process.
Raises:
NotImplementedError: If the batch file encoding is not supported.
"""
file: GzipFile | IO
storage: StorageTarget | None = None
for path in files:
head, tail = StorageTarget.split_url(path)
if self.batch_config:
storage = self.batch_config.storage
else:
storage = StorageTarget.from_url(head)
if encoding.format == BatchFileFormat.JSONL:
with storage.fs(create=False) as batch_fs, batch_fs.open(
tail,
mode="rb",
) as file:
open_file = (
gzip_open(file) if encoding.compression == "gzip" else file
)
context = {
"records": [
json.loads(line)
for line in open_file # type: ignore[attr-defined]
],
}
self.process_batch(context)
else:
raise NotImplementedError(
f"Unsupported batch encoding format: {encoding.format}",
)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/sinks/record.py | """Sink classes load data to a target."""
from __future__ import annotations
import abc
from singer_sdk.helpers._compat import final
from singer_sdk.sinks.core import Sink
class RecordSink(Sink):
"""Base class for singleton record writers."""
current_size = 0 # Records are always written directly
def _after_process_record(self, context: dict) -> None: # noqa: ARG002
"""Perform post-processing and record keeping. Internal hook.
The RecordSink class uses this method to tally each record written.
Args:
context: Stream partition or context dictionary.
"""
self.tally_record_written()
@final
def process_batch(self, context: dict) -> None:
"""Do nothing and return immediately.
The RecordSink class does not support batching.
This method may not be overridden.
Args:
context: Stream partition or context dictionary.
"""
@final
def start_batch(self, context: dict) -> None:
"""Do nothing and return immediately.
The RecordSink class does not support batching.
This method may not be overridden.
Args:
context: Stream partition or context dictionary.
"""
@abc.abstractmethod
def process_record(self, record: dict, context: dict) -> None:
"""Load the latest record from the stream.
This method must be overridden.
Implementations should permanently serialize each record to the target
prior to returning.
If duplicates are merged/skipped instead of being loaded, merges can be
tracked via :meth:`~singer_sdk.Sink.tally_duplicate_merged()`.
Args:
record: Individual record in the stream.
context: Stream partition or context dictionary.
"""
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/sinks/sql.py | """Sink classes load data to SQL targets."""
from __future__ import annotations
import re
from collections import defaultdict
from copy import copy
from textwrap import dedent
from typing import TYPE_CHECKING, Any, Iterable
import sqlalchemy
from pendulum import now
from sqlalchemy.sql.expression import bindparam
from singer_sdk.connectors import SQLConnector
from singer_sdk.exceptions import ConformedNameClashException
from singer_sdk.helpers._conformers import replace_leading_digit
from singer_sdk.sinks.batch import BatchSink
if TYPE_CHECKING:
from sqlalchemy.sql import Executable
from singer_sdk.plugin_base import PluginBase
class SQLSink(BatchSink):
"""SQL-type sink type."""
connector_class: type[SQLConnector]
soft_delete_column_name = "_sdc_deleted_at"
version_column_name = "_sdc_table_version"
def __init__(
self,
target: PluginBase,
stream_name: str,
schema: dict,
key_properties: list[str] | None,
connector: SQLConnector | None = None,
) -> None:
"""Initialize SQL Sink.
Args:
target: The target object.
stream_name: The source tap's stream name.
schema: The JSON Schema definition.
key_properties: The primary key columns.
connector: Optional connector to reuse.
"""
self._connector: SQLConnector
self._connector = connector or self.connector_class(dict(target.config))
super().__init__(target, stream_name, schema, key_properties)
@property
def connector(self) -> SQLConnector:
"""The connector object.
Returns:
The connector object.
"""
return self._connector
@property
def connection(self) -> sqlalchemy.engine.Connection:
"""Get or set the SQLAlchemy connection for this sink.
Returns:
A connection object.
"""
return self.connector.connection
@property
def table_name(self) -> str:
"""Return the table name, with no schema or database part.
Returns:
The target table name.
"""
parts = self.stream_name.split("-")
table = self.stream_name if len(parts) == 1 else parts[-1]
return self.conform_name(table, "table")
@property
def schema_name(self) -> str | None:
"""Return the schema name or `None` if using names with no schema part.
Returns:
The target schema name.
"""
# Look for a default_target_scheme in the configuraion fle
default_target_schema: str = self.config.get("default_target_schema", None)
parts = self.stream_name.split("-")
# 1) When default_target_scheme is in the configuration use it
# 2) if the streams are in <schema>-<table> format use the
# stream <schema>
# 3) Return None if you don't find anything
if default_target_schema:
return default_target_schema
if len(parts) in {2, 3}:
# Stream name is a two-part or three-part identifier.
# Use the second-to-last part as the schema name.
return self.conform_name(parts[-2], "schema")
# Schema name not detected.
return None
@property
def database_name(self) -> str | None:
"""Return the DB name or `None` if using names with no database part."""
# Assumes single-DB target context.
@property
def full_table_name(self) -> str:
"""Return the fully qualified table name.
Returns:
The fully qualified table name.
"""
return self.connector.get_fully_qualified_name(
table_name=self.table_name,
schema_name=self.schema_name,
db_name=self.database_name,
)
@property
def full_schema_name(self) -> str:
"""Return the fully qualified schema name.
Returns:
The fully qualified schema name.
"""
return self.connector.get_fully_qualified_name(
schema_name=self.schema_name,
db_name=self.database_name,
)
def conform_name(
self,
name: str,
object_type: str | None = None, # noqa: ARG002
) -> str:
"""Conform a stream property name to one suitable for the target system.
Transforms names to snake case by default, applicable to most common DBMSs'.
Developers may override this method to apply custom transformations
to database/schema/table/column names.
Args:
name: Property name.
object_type: One of ``database``, ``schema``, ``table`` or ``column``.
Returns:
The name transformed to snake case.
"""
# strip non-alphanumeric characters
name = re.sub(r"[^a-zA-Z0-9_\-\.\s]", "", name)
# strip leading/trailing whitespace,
# transform to lowercase and replace - . and spaces to _
name = (
name.lower()
.lstrip()
.rstrip()
.replace(".", "_")
.replace("-", "_")
.replace(" ", "_")
)
# replace leading digit
return replace_leading_digit(name)
@staticmethod
def _check_conformed_names_not_duplicated(
conformed_property_names: dict[str, str],
) -> None:
"""Check if conformed names produce duplicate keys.
Args:
conformed_property_names: A name:conformed_name dict map.
Raises:
ConformedNameClashException: if duplicates found.
"""
# group: {'_a': ['1_a'], 'abc': ['aBc', 'abC']} # noqa: ERA001
grouped = defaultdict(list)
for k, v in conformed_property_names.items():
grouped[v].append(k)
# filter
duplicates = list(filter(lambda p: len(p[1]) > 1, grouped.items()))
if duplicates:
raise ConformedNameClashException(
"Duplicate stream properties produced when "
f"conforming property names: {duplicates}",
)
def conform_schema(self, schema: dict) -> dict:
"""Return schema dictionary with property names conformed.
Args:
schema: JSON schema dictionary.
Returns:
A schema dictionary with the property names conformed.
"""
conformed_schema = copy(schema)
conformed_property_names = {
key: self.conform_name(key) for key in conformed_schema["properties"]
}
self._check_conformed_names_not_duplicated(conformed_property_names)
conformed_schema["properties"] = {
conformed_property_names[key]: value
for key, value in conformed_schema["properties"].items()
}
return conformed_schema
def conform_record(self, record: dict) -> dict:
"""Return record dictionary with property names conformed.
Args:
record: Dictionary representing a single record.
Returns:
New record dictionary with conformed column names.
"""
conformed_property_names = {key: self.conform_name(key) for key in record}
self._check_conformed_names_not_duplicated(conformed_property_names)
return {conformed_property_names[key]: value for key, value in record.items()}
def setup(self) -> None:
"""Set up Sink.
This method is called on Sink creation, and creates the required Schema and
Table entities in the target database.
"""
if self.schema_name:
self.connector.prepare_schema(self.schema_name)
self.connector.prepare_table(
full_table_name=self.full_table_name,
schema=self.conform_schema(self.schema),
primary_keys=self.key_properties,
as_temp_table=False,
)
@property
def key_properties(self) -> list[str]:
"""Return key properties, conformed to target system naming requirements.
Returns:
A list of key properties, conformed with `self.conform_name()`
"""
return [self.conform_name(key, "column") for key in super().key_properties]
def process_batch(self, context: dict) -> None:
"""Process a batch with the given batch context.
Writes a batch to the SQL target. Developers may override this method
in order to provide a more efficient upload/upsert process.
Args:
context: Stream partition or context dictionary.
"""
# If duplicates are merged, these can be tracked via
# :meth:`~singer_sdk.Sink.tally_duplicate_merged()`.
self.bulk_insert_records(
full_table_name=self.full_table_name,
schema=self.schema,
records=context["records"],
)
def generate_insert_statement(
self,
full_table_name: str,
schema: dict,
) -> str | Executable:
"""Generate an insert statement for the given records.
Args:
full_table_name: the target table name.
schema: the JSON schema for the new table.
Returns:
An insert statement.
"""
property_names = list(self.conform_schema(schema)["properties"].keys())
statement = dedent(
f"""\
INSERT INTO {full_table_name}
({", ".join(property_names)})
VALUES ({", ".join([f":{name}" for name in property_names])})
""", # noqa: S608
)
return statement.rstrip()
def bulk_insert_records(
self,
full_table_name: str,
schema: dict,
records: Iterable[dict[str, Any]],
) -> int | None:
"""Bulk insert records to an existing destination table.
The default implementation uses a generic SQLAlchemy bulk insert operation.
This method may optionally be overridden by developers in order to provide
faster, native bulk uploads.
Args:
full_table_name: the target table name.
schema: the JSON schema for the new table, to be used when inferring column
names.
records: the input records.
Returns:
True if table exists, False if not, None if unsure or undetectable.
"""
insert_sql = self.generate_insert_statement(
full_table_name,
schema,
)
if isinstance(insert_sql, str):
insert_sql = sqlalchemy.text(insert_sql)
conformed_records = (
[self.conform_record(record) for record in records]
if isinstance(records, list)
else (self.conform_record(record) for record in records)
)
self.logger.info("Inserting with SQL: %s", insert_sql)
with self.connector._connect() as conn, conn.begin():
conn.execute(insert_sql, conformed_records)
return len(conformed_records) if isinstance(conformed_records, list) else None
def merge_upsert_from_table(
self,
target_table_name: str,
from_table_name: str,
join_keys: list[str],
) -> int | None:
"""Merge upsert data from one table to another.
Args:
target_table_name: The destination table name.
from_table_name: The source table name.
join_keys: The merge upsert keys, or `None` to append.
Return:
The number of records copied, if detectable, or `None` if the API does not
report number of records affected/inserted.
Raises:
NotImplementedError: if the merge upsert capability does not exist or is
undefined.
"""
raise NotImplementedError
def activate_version(self, new_version: int) -> None:
"""Bump the active version of the target table.
Args:
new_version: The version number to activate.
"""
# There's nothing to do if the table doesn't exist yet
# (which it won't the first time the stream is processed)
if not self.connector.table_exists(self.full_table_name):
return
deleted_at = now()
if not self.connector.column_exists(
full_table_name=self.full_table_name,
column_name=self.version_column_name,
):
self.connector.prepare_column(
self.full_table_name,
self.version_column_name,
sql_type=sqlalchemy.types.Integer(),
)
if self.config.get("hard_delete", True):
with self.connector._connect() as conn, conn.begin():
conn.execute(
sqlalchemy.text(
f"DELETE FROM {self.full_table_name} " # noqa: S608
f"WHERE {self.version_column_name} <= {new_version}",
),
)
return
if not self.connector.column_exists(
full_table_name=self.full_table_name,
column_name=self.soft_delete_column_name,
):
self.connector.prepare_column(
self.full_table_name,
self.soft_delete_column_name,
sql_type=sqlalchemy.types.DateTime(),
)
query = sqlalchemy.text(
f"UPDATE {self.full_table_name}\n" # noqa: S608
f"SET {self.soft_delete_column_name} = :deletedate \n"
f"WHERE {self.version_column_name} < :version \n"
f" AND {self.soft_delete_column_name} IS NULL\n",
)
query = query.bindparams(
bindparam("deletedate", value=deleted_at, type_=sqlalchemy.types.DateTime),
bindparam("version", value=new_version, type_=sqlalchemy.types.Integer),
)
with self.connector._connect() as conn, conn.begin():
conn.execute(query)
__all__ = ["SQLSink", "SQLConnector"]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/internal/__init__.py | """Internal utilities for the Singer SDK."""
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/cli/common_options.py | """Common CLI options for plugins."""
from __future__ import annotations
import click
PLUGIN_VERSION = click.option(
"--version",
is_flag=True,
help="Display the package version.",
)
PLUGIN_ABOUT = click.option(
"--about",
is_flag=True,
help="Display package metadata and settings.",
)
PLUGIN_ABOUT_FORMAT = click.option(
"--format",
"about_format",
help="Specify output style for --about",
type=click.Choice(["json", "markdown"], case_sensitive=False),
default=None,
)
PLUGIN_CONFIG = click.option(
"--config",
multiple=True,
help="Configuration file location or 'ENV' to use environment variables.",
type=click.STRING,
default=(),
)
PLUGIN_FILE_INPUT = click.option(
"--input",
"file_input",
help="A path to read messages from instead of from standard in.",
type=click.File("r"),
)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/cli/__init__.py | """Helpers for the tap, target and mapper CLIs."""
from __future__ import annotations
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/target_tests.py | """Standard Target tests."""
from __future__ import annotations
from .templates import TargetFileTestTemplate, TargetTestTemplate
class TargetArrayData(TargetFileTestTemplate):
"""Test Target handles array data."""
name = "array_data"
class TargetCamelcaseComplexSchema(TargetFileTestTemplate):
"""Test Target handles CaMeLcAsE record key and attributes, nested."""
name = "camelcase_complex_schema"
class TargetCamelcaseTest(TargetFileTestTemplate):
"""Test Target handles CaMeLcAsE record key and attributes."""
name = "camelcase"
class TargetCliPrintsTest(TargetTestTemplate):
"""Test Target correctly prints version and about information."""
name = "cli_prints"
def test(self) -> None:
"""Run test."""
self.target.print_version()
self.target.print_about()
self.target.print_about(output_format="json")
class TargetDuplicateRecords(TargetFileTestTemplate):
"""Test Target handles duplicate records."""
name = "duplicate_records"
class TargetEncodedStringData(TargetFileTestTemplate):
"""Test Target handles encoded string data."""
name = "encoded_string_data"
class TargetInvalidSchemaTest(TargetFileTestTemplate):
"""Test Target handles an invalid schema message."""
name = "invalid_schema"
class TargetMultipleStateMessages(TargetFileTestTemplate):
"""Test Target correctly relays multiple received State messages (checkpoints)."""
name = "multiple_state_messages"
def test(self) -> None:
"""Run test."""
self.runner.sync_all()
state_messages = self.runner.state_messages
assert state_messages == [
'{"test_multiple_state_messages_a": 1, "test_multiple_state_messages_b": 0}', # noqa: E501
'{"test_multiple_state_messages_a": 3, "test_multiple_state_messages_b": 2}', # noqa: E501
'{"test_multiple_state_messages_a": 5, "test_multiple_state_messages_b": 6}', # noqa: E501
]
class TargetNoPrimaryKeys(TargetFileTestTemplate):
"""Test Target handles records without primary keys."""
name = "no_primary_keys"
class TargetOptionalAttributes(TargetFileTestTemplate):
"""Test Target handles optional record attributes."""
name = "optional_attributes"
class TargetRecordBeforeSchemaTest(TargetFileTestTemplate):
"""Test Target handles records arriving before schema."""
name = "record_before_schema"
class TargetRecordMissingKeyProperty(TargetFileTestTemplate):
"""Test Target handles record missing key property."""
name = "record_missing_key_property"
class TargetRecordMissingRequiredProperty(TargetFileTestTemplate):
"""Test Target handles record missing required property."""
name = "record_missing_required_property"
class TargetSchemaNoProperties(TargetFileTestTemplate):
"""Test Target handles schema with no properties."""
name = "schema_no_properties"
class TargetSchemaUpdates(TargetFileTestTemplate):
"""Test Target handles schema updates."""
name = "schema_updates"
class TargetSpecialCharsInAttributes(TargetFileTestTemplate):
"""Test Target handles special chars in attributes."""
name = "special_chars_in_attributes"
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/config.py | """Test config classes."""
from __future__ import annotations
from dataclasses import dataclass, field
@dataclass
class SuiteConfig:
"""Test Suite Config, passed to each test.
Args:
max_records_limit: Max records to fetch during tap testing.
ignore_no_records: Ignore stream test failures if stream returns no records,
for all streams.
ignore_no_records_for_streams: Ignore stream test failures if stream returns
no records, for named streams.
"""
max_records_limit: int | None = None
ignore_no_records: bool = False
ignore_no_records_for_streams: list[str] = field(default_factory=list)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/legacy.py | """Pre-built test functions which can be applied to multiple taps."""
from __future__ import annotations
import io
from contextlib import redirect_stderr, redirect_stdout
from typing import TYPE_CHECKING, Callable, cast
import singer_sdk._singerlib as singer
if TYPE_CHECKING:
from singer_sdk.mapper_base import InlineMapper
from singer_sdk.tap_base import Tap
from singer_sdk.target_base import Target
def get_standard_tap_tests(
tap_class: type[Tap],
config: dict | None = None,
) -> list[Callable]:
"""Return callable pytest which executes simple discovery and connection tests.
Args:
tap_class: TODO
config: TODO
Returns:
TODO
"""
def _test_cli_prints() -> None:
# Initialize with basic config
tap1: Tap = tap_class(config=config, parse_env_config=True)
# Test CLI prints
tap1.print_version()
tap1.print_about()
tap1.print_about(output_format="json")
def _test_discovery() -> None:
catalog1 = _get_tap_catalog(tap_class, config or {})
# Reset and re-initialize with an input catalog
tap2: Tap = tap_class(config=config, parse_env_config=True, catalog=catalog1)
assert tap2
def _test_stream_connections() -> None:
# Initialize with basic config
tap1: Tap = tap_class(config=config, parse_env_config=True)
tap1.run_connection_test()
def _test_pkeys_in_schema() -> None:
"""Verify that primary keys are actually in the stream's schema."""
tap = tap_class(config=config, parse_env_config=True)
for name, stream in tap.streams.items():
pkeys = stream.primary_keys or []
schema_props = set(stream.schema["properties"].keys())
for pkey in pkeys:
error_message = (
f"Coding error in stream '{name}': "
f"primary_key '{pkey}' is missing in schema"
)
assert pkey in schema_props, error_message
def _test_state_partitioning_keys_in_schema() -> None:
"""Verify that state partitioning keys are actually in the stream's schema."""
tap = tap_class(config=config, parse_env_config=True)
for name, stream in tap.streams.items():
sp_keys = stream.state_partitioning_keys or []
schema_props = set(stream.schema["properties"].keys())
for sp_key in sp_keys:
assert sp_key in schema_props, (
f"Coding error in stream '{name}': state_partitioning_key "
f"'{sp_key}' is missing in schema"
)
def _test_replication_keys_in_schema() -> None:
"""Verify that the replication key is actually in the stream's schema."""
tap = tap_class(config=config, parse_env_config=True)
for name, stream in tap.streams.items():
rep_key = stream.replication_key
if rep_key is None:
continue
schema_props = set(stream.schema["properties"].keys())
assert rep_key in schema_props, (
f"Coding error in stream '{name}': replication_key "
f"'{rep_key}' is missing in schema"
)
return [
_test_cli_prints,
_test_discovery,
_test_stream_connections,
_test_pkeys_in_schema,
_test_state_partitioning_keys_in_schema,
_test_replication_keys_in_schema,
]
def get_standard_target_tests(
target_class: type[Target], # noqa: ARG001
config: dict | None = None, # noqa: ARG001
) -> list[Callable]:
"""Return callable pytest which executes simple discovery and connection tests.
Args:
target_class: The target class to test.
config: A config dictionary for the tests.
Returns:
A list of callable tests.
"""
return []
def tap_sync_test(tap: Tap) -> tuple[io.StringIO, io.StringIO]:
"""Invokes a Tap object and return STDOUT and STDERR results in StringIO buffers.
Args:
tap: Any Tap object.
Returns:
A 2-item tuple with StringIO buffers from the Tap's output: (stdout, stderr)
"""
stdout_buf = io.StringIO()
stderr_buf = io.StringIO()
with redirect_stdout(stdout_buf), redirect_stderr(stderr_buf):
tap.sync_all()
stdout_buf.seek(0)
stderr_buf.seek(0)
return stdout_buf, stderr_buf
def _get_tap_catalog(
tap_class: type[Tap],
config: dict,
*,
select_all: bool = False,
) -> dict:
"""Return a catalog dict by running discovery.
Args:
tap_class: the tap class to create.
config: the config dict.
select_all: True to automatically select all streams in the catalog.
Returns:
Catalog dict created by discovery.
"""
# Initialize with basic config
tap: Tap = tap_class(config=config, parse_env_config=True)
# Test discovery
tap.run_discovery()
catalog_dict = tap.catalog_dict
if select_all:
return _select_all(catalog_dict)
return catalog_dict
def _select_all(catalog_dict: dict) -> dict:
"""Return the catalog dict with all streams selected.
Args:
catalog_dict (dict): [description]
Returns:
dict: [description]
"""
catalog = singer.Catalog.from_dict(catalog_dict)
for catalog_entry in catalog.streams:
catalog_entry.metadata.root.selected = True
return cast(dict, catalog.to_dict())
def target_sync_test(
target: Target,
input: io.StringIO | None, # noqa: A002
*,
finalize: bool = True,
) -> tuple[io.StringIO, io.StringIO]:
"""Invoke the target with the provided input.
Args:
target: Any Target object.
input: The input to process as if from STDIN.
finalize: True to process as the end of stream as a completion signal; False to
keep the sink operation open for further records.
Returns:
A 2-item tuple with StringIO buffers from the Target's output: (stdout, stderr)
"""
stdout_buf = io.StringIO()
stderr_buf = io.StringIO()
with redirect_stdout(stdout_buf), redirect_stderr(stderr_buf):
if input is not None:
target._process_lines(input)
if finalize:
target._process_endofpipe()
stdout_buf.seek(0)
stderr_buf.seek(0)
return stdout_buf, stderr_buf
def tap_to_target_sync_test(
tap: Tap,
target: Target,
) -> tuple[io.StringIO, io.StringIO, io.StringIO, io.StringIO]:
"""Test and end-to-end sink from the tap to the target.
Note: This method buffers all output from the tap in memory and should not be
used with larger datasets. Also note that the target will physically write out the
data. Cleanup afterwards should be handled by the caller, if necessary.
Args:
tap: Any Tap object.
target: Any Target object.
Returns:
A 4-item tuple with the StringIO buffers:
(tap_stdout, tap_stderr, target_stdout, target_stderr)
"""
tap_stdout, tap_stderr = tap_sync_test(tap)
target_stdout, target_stderr = target_sync_test(target, tap_stdout, finalize=True)
# Reset the tap's stdout buffer before returning
tap_stdout.seek(0)
return tap_stdout, tap_stderr, target_stdout, target_stderr
def sync_end_to_end(tap: Tap, target: Target, *mappers: InlineMapper) -> None:
"""Test and end-to-end sink from the tap to the target.
Args:
tap: Singer tap.
target: Singer target.
mappers: Zero or more inline mapper to apply in between the tap and target, in
order.
"""
buf = io.StringIO()
with redirect_stdout(buf):
tap.sync_all()
buf.seek(0)
mapper_output = buf
for mapper in mappers:
buf = io.StringIO()
with redirect_stdout(buf):
mapper.listen(mapper_output)
buf.seek(0)
mapper_output = buf
target.listen(mapper_output)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/__init__.py | """Tools and standard tests for Tap/Target implementations."""
from __future__ import annotations
from .config import SuiteConfig
from .factory import get_tap_test_class, get_target_test_class, get_test_class
from .legacy import (
_get_tap_catalog,
_select_all,
get_standard_tap_tests,
get_standard_target_tests,
sync_end_to_end,
tap_sync_test,
tap_to_target_sync_test,
target_sync_test,
)
from .runners import SingerTestRunner, TapTestRunner, TargetTestRunner
__all__ = [
"get_tap_test_class",
"get_target_test_class",
"get_test_class",
"_get_tap_catalog",
"_select_all",
"get_standard_tap_tests",
"get_standard_target_tests",
"sync_end_to_end",
"tap_sync_test",
"tap_to_target_sync_test",
"target_sync_test",
"SingerTestRunner",
"TapTestRunner",
"TargetTestRunner",
"SuiteConfig",
]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/factory.py | """Test Class Factory."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, cast
import pytest
from .config import SuiteConfig
from .runners import TapTestRunner, TargetTestRunner
from .suites import (
tap_stream_attribute_tests,
tap_stream_tests,
tap_tests,
target_tests,
)
if TYPE_CHECKING:
from singer_sdk import Tap, Target
def get_test_class(
test_runner: TapTestRunner | TargetTestRunner,
test_suites: list,
suite_config: SuiteConfig | None,
) -> object:
"""Construct a valid pytest test class from given suites.
Args:
test_runner: A Tap or Target test runner instance.
test_suites: A list of Test Suits to apply.
suite_config: SuiteConfig instance to pass to tests.
Returns:
A test class usable by pytest.
"""
class BaseTestClass:
"""Base test class."""
params: dict = {}
param_ids: dict = {}
@pytest.fixture
def config(self) -> SuiteConfig:
return suite_config or SuiteConfig()
@pytest.fixture
def resource(self) -> Any: # noqa: ANN401, PT004
yield # noqa: PT022
@pytest.fixture(scope="class")
def runner(self) -> TapTestRunner | TargetTestRunner:
# Populate runner class with cached records for use in tests
test_runner.sync_all()
return test_runner
for suite in test_suites:
# make sure given runner is of type TapTestRunner
expected_runner_class = (
TapTestRunner
if suite.kind in {"tap", "tap_stream", "tap_stream_attribute"}
else TargetTestRunner
)
assert isinstance(test_runner, expected_runner_class), (
f"Test suite of kind {suite.kind} passed, "
f"but test runner if of type {type(test_runner)}."
)
test_runner = cast(
expected_runner_class, # type: ignore[valid-type]
test_runner,
)
if suite.kind in {"tap", "target"}:
for test_class in suite.tests:
test = test_class()
test_name = f"test_{suite.kind}_{test.name}"
setattr(BaseTestClass, f"test_{suite.kind}_{test.name}", test.run)
if suite.kind in {"tap_stream", "tap_stream_attribute"}:
streams = list(test_runner.new_tap().streams.values())
if suite.kind == "tap_stream":
params = [
{
"stream": stream,
}
for stream in streams
]
param_ids = [stream.name for stream in streams]
for test_class in suite.tests:
test = test_class()
test_name = f"test_{suite.kind}_{test.name}"
setattr(
BaseTestClass,
test_name,
test.run,
)
BaseTestClass.params[test_name] = params
BaseTestClass.param_ids[test_name] = param_ids
if suite.kind == "tap_stream_attribute":
for test_class in suite.tests:
test = test_class()
test_name = f"test_{suite.kind}_{test.name}"
test_params = []
test_ids = []
for stream in streams:
test_params.extend(
[
{
"stream": stream,
"attribute_name": property_name,
}
for property_name, property_schema in stream.schema[
"properties"
].items()
if test_class.evaluate(
stream=stream,
property_name=property_name,
property_schema=property_schema,
)
],
)
test_ids.extend(
[
f"{stream.name}.{property_name}"
for property_name, property_schema in stream.schema[
"properties"
].items()
if test_class.evaluate(
stream=stream,
property_name=property_name,
property_schema=property_schema,
)
],
)
if test_params:
setattr(
BaseTestClass,
test_name,
test.run,
)
BaseTestClass.params[test_name] = test_params
BaseTestClass.param_ids[test_name] = test_ids
return BaseTestClass
def get_tap_test_class(
tap_class: type[Tap],
*,
config: dict | None = None,
include_tap_tests: bool = True,
include_stream_tests: bool = True,
include_stream_attribute_tests: bool = True,
custom_suites: list | None = None,
suite_config: SuiteConfig | None = None,
**kwargs: Any,
) -> object:
"""Get Tap Test Class.
Args:
tap_class: Meltano Singer SDK Tap class to test.
config: Config dict to use for testing.
include_tap_tests: Include tap tests.
include_stream_tests: Include Tap stream tests.
include_stream_attribute_tests: Include Tap stream attribute tests.
custom_suites: Custom test suites to add to standard tests.
suite_config: SuiteConfig instance to pass to tests.
kwargs: Keyword arguments to pass to the TapRunner.
Returns:
A test class usable by pytest.
"""
suites = custom_suites or []
if include_tap_tests:
suites.append(tap_tests)
if include_stream_tests:
suites.append(tap_stream_tests)
if include_stream_attribute_tests:
suites.append(tap_stream_attribute_tests)
# set default values
if "parse_env_config" not in kwargs:
kwargs["parse_env_config"] = True
return get_test_class(
test_runner=TapTestRunner(
tap_class=tap_class,
config=config,
suite_config=suite_config,
**kwargs,
),
test_suites=suites,
suite_config=suite_config,
)
def get_target_test_class(
target_class: type[Target],
*,
config: dict | None = None,
custom_suites: list | None = None,
suite_config: SuiteConfig | None = None,
**kwargs: Any,
) -> object:
"""Get Target Test Class.
Args:
target_class: Meltano Singer SDK Target class to test.
config: Config dict to use for testing.
custom_suites: Custom test suites to add to standard tests.
suite_config: SuiteConfig instance to pass to tests.
kwargs: Keyword arguments to pass to the TapRunner.
Returns:
A test class usable by pytest.
"""
suites = custom_suites or []
suites.append(target_tests)
# set default values
if "parse_env_config" not in kwargs:
kwargs["parse_env_config"] = True
return get_test_class(
test_runner=TargetTestRunner(
target_class=target_class,
config=config,
**kwargs,
),
test_suites=suites,
suite_config=suite_config,
)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/suites.py | """Standard Tap and Target test suites."""
from __future__ import annotations
import typing as t
from dataclasses import dataclass
from .tap_tests import (
AttributeIsBooleanTest,
AttributeIsDateTimeTest,
AttributeIsIntegerTest,
AttributeIsNumberTest,
AttributeIsObjectTest,
AttributeNotNullTest,
StreamCatalogSchemaMatchesRecordTest,
StreamPrimaryKeysTest,
StreamRecordSchemaMatchesCatalogTest,
StreamReturnsRecordTest,
TapCLIPrintsTest,
TapDiscoveryTest,
TapStreamConnectionTest,
)
# TODO: add TargetMultipleStateMessages
# TODO: fix behavior in SDK to make this pass
from .target_tests import (
TargetArrayData,
TargetCamelcaseComplexSchema,
TargetCamelcaseTest,
TargetCliPrintsTest,
TargetDuplicateRecords,
TargetEncodedStringData,
TargetInvalidSchemaTest,
TargetNoPrimaryKeys,
TargetOptionalAttributes,
TargetRecordBeforeSchemaTest,
TargetRecordMissingKeyProperty,
TargetSchemaNoProperties,
TargetSchemaUpdates,
TargetSpecialCharsInAttributes,
)
if t.TYPE_CHECKING:
from .templates import TapTestTemplate, TargetTestTemplate, TestTemplate
@dataclass
class TestSuite:
"""Test Suite container class."""
kind: str
tests: list[type[TestTemplate] | type[TapTestTemplate] | type[TargetTestTemplate]]
# Tap Test Suites
tap_tests = TestSuite(
kind="tap",
tests=[TapCLIPrintsTest, TapDiscoveryTest, TapStreamConnectionTest],
)
tap_stream_tests = TestSuite(
kind="tap_stream",
tests=[
StreamCatalogSchemaMatchesRecordTest,
StreamRecordSchemaMatchesCatalogTest,
StreamReturnsRecordTest,
StreamPrimaryKeysTest,
],
)
tap_stream_attribute_tests = TestSuite(
kind="tap_stream_attribute",
tests=[
AttributeIsBooleanTest,
AttributeIsDateTimeTest,
AttributeIsIntegerTest,
AttributeIsNumberTest,
AttributeIsObjectTest,
AttributeNotNullTest,
],
)
# Target Test Suites
target_tests = TestSuite(
kind="target",
tests=[
TargetArrayData,
TargetCamelcaseComplexSchema,
TargetCamelcaseTest,
TargetCliPrintsTest,
TargetDuplicateRecords,
TargetEncodedStringData,
TargetInvalidSchemaTest,
# TargetMultipleStateMessages,
TargetNoPrimaryKeys,
TargetOptionalAttributes,
TargetRecordBeforeSchemaTest,
TargetRecordMissingKeyProperty,
TargetSchemaNoProperties,
TargetSchemaUpdates,
TargetSpecialCharsInAttributes,
],
)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/templates.py | """Tap and Target Test Templates."""
from __future__ import annotations
import contextlib
import warnings
from pathlib import Path
from typing import TYPE_CHECKING, Any
if TYPE_CHECKING:
from singer_sdk.streams import Stream
from .config import SuiteConfig
from .runners import TapTestRunner, TargetTestRunner
class TestTemplate:
"""Each Test class requires one or more of the following arguments.
Args:
runner (SingerTestRunner): The singer runner for this test.
Possible Args:
stream (obj, optional): Initialized stream object to be tested.
stream_name (str, optional): Name of the stream to be tested.
attribute_name (str, optional): Name of the attribute to be tested.
Raises:
ValueError: [description]
NotImplementedError: [description]
NotImplementedError: [description]
"""
name: str | None = None
plugin_type: str | None = None
@property
def id(self) -> str: # noqa: A003
"""Test ID.
Raises:
NotImplementedError: if not implemented.
"""
raise NotImplementedError("ID not implemented.")
def setup(self) -> None:
"""Test setup, called before `.test()`.
This method is useful for preparing external resources (databases, folders etc.)
before test execution.
Raises:
NotImplementedError: if not implemented.
"""
raise NotImplementedError("Setup method not implemented.")
def test(self) -> None:
"""Main Test body, called after `.setup()` and before `.validate()`."""
self.runner.sync_all()
def validate(self) -> None:
"""Test validation, called after `.test()`.
This method is particularly useful in Target tests, to validate that records
were correctly written to external systems.
Raises:
NotImplementedError: if not implemented.
"""
raise NotImplementedError("Method not implemented.")
def teardown(self) -> None:
"""Test Teardown.
This method is useful for cleaning up external resources
(databases, folders etc.) after test completion.
Raises:
NotImplementedError: if not implemented.
"""
raise NotImplementedError("Method not implemented.")
def run(
self,
config: SuiteConfig,
resource: Any,
runner: TapTestRunner | TargetTestRunner,
) -> None:
"""Test main run method.
Args:
config: SuiteConfig instance, to use for test.
resource: A generic external resource, provided by a pytest fixture.
runner: A Tap or Target runner instance, to use with this test.
Raises:
ValueError: if Test instance does not have `name` and `type` properties.
"""
if not self.name or not self.plugin_type:
raise ValueError("Test must have 'name' and 'type' properties.")
self.config = config
self.resource = resource
self.runner = runner
with contextlib.suppress(NotImplementedError):
self.setup()
try:
self.test()
with contextlib.suppress(NotImplementedError):
self.validate()
finally:
with contextlib.suppress(NotImplementedError):
self.teardown()
class TapTestTemplate(TestTemplate):
"""Base Tap test template."""
plugin_type = "tap"
@property
def id(self) -> str: # noqa: A003
"""Test ID.
Returns:
Test ID string.
"""
return f"tap__{self.name}"
def run( # type: ignore[override]
self,
config: SuiteConfig,
resource: Any,
runner: TapTestRunner,
) -> None:
"""Test main run method.
Args:
config: SuiteConfig instance, to use for test.
resource: A generic external resource, provided by a pytest fixture.
runner: A Tap or Target runner instance, to use with this test.
"""
self.tap = runner.new_tap()
super().run(config, resource, runner)
class StreamTestTemplate(TestTemplate):
"""Base Tap Stream test template."""
plugin_type = "stream"
required_kwargs = ["stream"]
@property
def id(self) -> str: # noqa: A003
"""Test ID.
Returns:
Test ID string.
"""
return f"{self.stream.name}__{self.name}"
def run( # type: ignore[override]
self,
config: SuiteConfig,
resource: Any,
runner: TapTestRunner,
stream: Stream,
) -> None:
"""Test main run method.
Args:
config: SuiteConfig instance, to use for test.
resource: A generic external resource, provided by a pytest fixture.
runner: A Tap runner instance, to use with this test.
stream: A Tap Stream instance, to use with this test.
"""
self.stream = stream
self.stream_records = runner.records[stream.name]
super().run(config, resource, runner)
class AttributeTestTemplate(TestTemplate):
"""Base Tap Stream Attribute template."""
plugin_type = "attribute"
@property
def id(self) -> str: # noqa: A003
"""Test ID.
Returns:
Test ID string.
"""
return f"{self.stream.name}__{self.attribute_name}__{self.name}"
def run( # type: ignore[override]
self,
config: SuiteConfig,
resource: Any,
runner: TapTestRunner,
stream: Stream,
attribute_name: str,
) -> None:
"""Test main run method.
Args:
config: SuiteConfig instance, to use for test.
resource: A generic external resource, provided by a pytest fixture.
runner: A Tap runner instance, to use with this test.
stream: A Tap Stream instance, to use with this test.
to use with this test.
attribute_name: The name of the attribute to test.
"""
self.stream = stream
self.stream_records = runner.records[stream.name]
self.attribute_name = attribute_name
super().run(config, resource, runner)
@property
def non_null_attribute_values(self) -> list[Any]:
"""Extract attribute values from stream records.
Returns:
A list of attribute values (excluding None values).
"""
values = [
r[self.attribute_name]
for r in self.stream_records
if r.get(self.attribute_name) is not None
]
if not values:
warnings.warn(
UserWarning("No records were available to test."),
stacklevel=2,
)
return values
@classmethod
def evaluate(
cls,
stream: Stream,
property_name: str,
property_schema: dict,
) -> bool:
"""Determine if this attribute test is applicable to the given property.
Args:
stream: Parent Stream of given attribute.
property_name: Name of given attribute.
property_schema: JSON Schema of given property, in dict form.
Raises:
NotImplementedError: if not implemented.
"""
raise NotImplementedError(
"The 'evaluate' method is required for attribute tests, "
"but not implemented.",
)
class TargetTestTemplate(TestTemplate):
"""Base Target test template."""
plugin_type = "target"
def run( # type: ignore[override]
self,
config: SuiteConfig,
resource: Any,
runner: TargetTestRunner,
) -> None:
"""Test main run method.
Args:
config: SuiteConfig instance, to use for test.
resource: A generic external resource, provided by a pytest fixture.
runner: A Tap runner instance, to use with this test.
"""
self.target = runner.new_target()
super().run(config, resource, runner)
@property
def id(self) -> str: # noqa: A003
"""Test ID.
Returns:
Test ID string.
"""
return f"target__{self.name}"
class TargetFileTestTemplate(TargetTestTemplate):
"""Base Target File Test Template.
Use this when sourcing Target test input from a .singer file.
"""
def run( # type: ignore[override]
self,
config: SuiteConfig,
resource: Any,
runner: TargetTestRunner,
) -> None:
"""Test main run method.
Args:
config: SuiteConfig instance, to use for test.
resource: A generic external resource, provided by a pytest fixture.
runner: A Tap runner instance, to use with this test.
"""
# get input from file
if getattr(self, "singer_filepath", None):
assert Path(
self.singer_filepath,
).exists(), f"Singer file {self.singer_filepath} does not exist."
runner.input_filepath = self.singer_filepath
super().run(config, resource, runner)
@property
def singer_filepath(self) -> Path:
"""Get path to singer JSONL formatted messages file.
Files will be sourced from `./target_test_streams/<test name>.singer`.
Returns:
The expected Path to this tests singer file.
"""
current_dir = Path(__file__).resolve().parent
return current_dir / "target_test_streams" / f"{self.name}.singer"
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/tap_tests.py | """Standard Tap Tests."""
from __future__ import annotations
import warnings
from typing import TYPE_CHECKING, Type, cast
from dateutil import parser
import singer_sdk.helpers._typing as th
from singer_sdk import Tap
from .templates import AttributeTestTemplate, StreamTestTemplate, TapTestTemplate
if TYPE_CHECKING:
from singer_sdk.streams.core import Stream
class TapCLIPrintsTest(TapTestTemplate):
"""Test that the tap is able to print standard metadata."""
name = "cli_prints"
def test(self) -> None:
"""Run test."""
self.tap.print_version()
self.tap.print_about()
self.tap.print_about(output_format="json")
class TapDiscoveryTest(TapTestTemplate):
"""Test that discovery mode generates a valid tap catalog."""
name = "discovery"
def test(self) -> None:
"""Run test."""
tap1 = self.tap
tap1.run_discovery()
catalog = tap1.catalog_dict
# Reset and re-initialize with discovered catalog
kwargs = {k: v for k, v in self.runner.default_kwargs.items() if k != "catalog"}
tap2: Tap = cast(Type[Tap], self.runner.singer_class)(
config=self.runner.config,
catalog=catalog,
**kwargs,
)
assert tap2
class TapStreamConnectionTest(TapTestTemplate):
"""Test that the tap can connect to each stream."""
name = "stream_connections"
def test(self) -> None:
"""Run test."""
self.tap.run_connection_test()
class StreamReturnsRecordTest(StreamTestTemplate):
"""Test that a stream sync returns at least 1 record."""
name = "returns_record"
def test(self) -> None:
"""Run test."""
no_records_message = f"No records returned in stream '{self.stream.name}'."
if (
self.config.ignore_no_records
or self.stream.name in self.config.ignore_no_records_for_streams
):
# only warn if this or all streams are set to ignore no records
warnings.warn(UserWarning(no_records_message), stacklevel=2)
else:
record_count = len(self.stream_records)
assert record_count > 0, no_records_message
class StreamCatalogSchemaMatchesRecordTest(StreamTestTemplate):
"""Test all attributes in the catalog schema are present in the record schema."""
name = "catalog_schema_matches_record"
def test(self) -> None:
"""Run test."""
stream_catalog_keys = set(self.stream.schema["properties"].keys())
stream_record_keys = set().union(*(d.keys() for d in self.stream_records))
diff = stream_catalog_keys - stream_record_keys
if diff:
warnings.warn(
UserWarning(f"Fields in catalog but not in records: ({diff})"),
stacklevel=2,
)
class StreamRecordSchemaMatchesCatalogTest(StreamTestTemplate):
"""Test all attributes in the record schema are present in the catalog schema."""
name = "record_schema_matches_catalog"
def test(self) -> None:
"""Run test."""
stream_catalog_keys = set(self.stream.schema["properties"].keys())
stream_record_keys = set().union(*(d.keys() for d in self.stream_records))
diff = stream_record_keys - stream_catalog_keys
assert not diff, f"Fields in records but not in catalog: ({diff})"
class StreamPrimaryKeysTest(StreamTestTemplate):
"""Test all records for a stream's primary key are unique and non-null."""
name = "primary_keys"
def test(self) -> None:
"""Run test.
Raises:
AssertionError: if record is missing primary key.
"""
primary_keys = self.stream.primary_keys
try:
record_ids = [
(r[k] for k in primary_keys or []) for r in self.stream_records
]
except KeyError as e:
raise AssertionError(f"Record missing primary key: {str(e)}") from e
count_unique_records = len(set(record_ids))
count_records = len(self.stream_records)
assert count_unique_records == count_records, (
f"Length of set of records IDs ({count_unique_records})"
f" is not equal to number of records ({count_records})."
)
assert all(
all(k is not None for k in pk) for pk in record_ids
), "Primary keys contain some key values that are null."
class AttributeIsDateTimeTest(AttributeTestTemplate):
"""Test a given attribute contains unique values (ignores null values)."""
name = "is_datetime"
def test(self) -> None:
"""Run test.
Raises:
AssertionError: if value cannot be parsed as a datetime.
"""
for v in self.non_null_attribute_values:
try:
error_message = f"Unable to parse value ('{v}') with datetime parser."
assert parser.parse(v), error_message
except parser.ParserError as e:
raise AssertionError(error_message) from e
@classmethod
def evaluate(
cls,
stream: Stream, # noqa: ARG003
property_name: str, # noqa: ARG003
property_schema: dict,
) -> bool:
"""Determine if this attribute test is applicable to the given property.
Args:
stream: Parent Stream of given attribute.
property_name: Name of given attribute.
property_schema: JSON Schema of given property, in dict form.
Returns:
True if this test is applicable, False if not.
"""
return bool(th.is_date_or_datetime_type(property_schema))
class AttributeIsBooleanTest(AttributeTestTemplate):
"""Test an attribute is of boolean datatype (or can be cast to it)."""
name = "is_boolean"
def test(self) -> None:
"""Run test."""
for v in self.non_null_attribute_values:
assert isinstance(v, bool) or str(v).lower() in {
"true",
"false",
}, f"Unable to cast value ('{v}') to boolean type."
@classmethod
def evaluate(
cls,
stream: Stream, # noqa: ARG003
property_name: str, # noqa: ARG003
property_schema: dict,
) -> bool:
"""Determine if this attribute test is applicable to the given property.
Args:
stream: Parent Stream of given attribute.
property_name: Name of given attribute.
property_schema: JSON Schema of given property, in dict form.
Returns:
True if this test is applicable, False if not.
"""
return bool(th.is_boolean_type(property_schema))
class AttributeIsObjectTest(AttributeTestTemplate):
"""Test that a given attribute is an object type."""
name = "is_object"
def test(self) -> None:
"""Run test."""
for v in self.non_null_attribute_values:
assert isinstance(v, dict), f"Unable to cast value ('{v}') to dict type."
@classmethod
def evaluate(
cls,
stream: Stream, # noqa: ARG003
property_name: str, # noqa: ARG003
property_schema: dict,
) -> bool:
"""Determine if this attribute test is applicable to the given property.
Args:
stream: Parent Stream of given attribute.
property_name: Name of given attribute.
property_schema: JSON Schema of given property, in dict form.
Returns:
True if this test is applicable, False if not.
"""
return bool(th.is_object_type(property_schema))
class AttributeIsIntegerTest(AttributeTestTemplate):
"""Test that a given attribute can be converted to an integer type."""
name = "is_integer"
def test(self) -> None:
"""Run test."""
for v in self.non_null_attribute_values:
assert isinstance(v, int) or isinstance(
int(v),
int,
), f"Unable to cast value ('{v}') to int type."
@classmethod
def evaluate(
cls,
stream: Stream, # noqa: ARG003
property_name: str, # noqa: ARG003
property_schema: dict,
) -> bool:
"""Determine if this attribute test is applicable to the given property.
Args:
stream: Parent Stream of given attribute.
property_name: Name of given attribute.
property_schema: JSON Schema of given property, in dict form.
Returns:
True if this test is applicable, False if not.
"""
return bool(th.is_integer_type(property_schema))
class AttributeIsNumberTest(AttributeTestTemplate):
"""Test that a given attribute can be converted to a floating point number type."""
name = "is_numeric"
def test(self) -> None:
"""Run test.
Raises:
AssertionError: if value cannot be cast to float type.
"""
for v in self.non_null_attribute_values:
error_message = f"Unable to cast value ('{v}') to float type."
if not isinstance(v, (float, int)):
raise AssertionError(error_message)
@classmethod
def evaluate(
cls,
stream: Stream, # noqa: ARG003
property_name: str, # noqa: ARG003
property_schema: dict,
) -> bool:
"""Determine if this attribute test is applicable to the given property.
Args:
stream: Parent Stream of given attribute.
property_name: Name of given attribute.
property_schema: JSON Schema of given property, in dict form.
Returns:
True if this test is applicable, False if not.
"""
return bool(th.is_number_type(property_schema))
class AttributeNotNullTest(AttributeTestTemplate):
"""Test that a given attribute does not contain any null values."""
name = "not_null"
def test(self) -> None:
"""Run test."""
for r in self.stream_records:
assert (
r.get(self.attribute_name) is not None
), f"Detected null values for attribute ('{self.attribute_name}')."
@classmethod
def evaluate(
cls,
stream: Stream, # noqa: ARG003
property_name: str, # noqa: ARG003
property_schema: dict,
) -> bool:
"""Determine if this attribute test is applicable to the given property.
Args:
stream: Parent Stream of given attribute.
property_name: Name of given attribute.
property_schema: JSON Schema of given property, in dict form.
Returns:
True if this test is applicable, False if not.
"""
return not bool(th.is_null_type(property_schema))
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/runners.py | """Utility object for running taps/targets, capturing sync output during testing."""
from __future__ import annotations
import abc
import io
import json
from collections import defaultdict
from contextlib import redirect_stderr, redirect_stdout
from pathlib import Path
from typing import IO, Any, cast
from singer_sdk import Tap, Target
from singer_sdk.testing.config import SuiteConfig
class SingerTestRunner(metaclass=abc.ABCMeta):
"""Base Singer Test Runner."""
raw_messages: list[dict] = []
schema_messages: list[dict] = []
record_messages: list[dict] = []
state_messages: list[dict] = []
records: defaultdict = defaultdict(list)
def __init__(
self,
singer_class: type[Tap] | type[Target],
config: dict | None = None,
suite_config: SuiteConfig | None = None,
**kwargs: Any,
) -> None:
"""Initialize the test runner object.
Args:
singer_class (type[PluginBase]): Singer class to be tested.
config (dict): Tap/Target configuration for testing.
suite_config (SuiteConfig): SuiteConfig instance to be used when
instantiating tests.
kwargs (dict): Default arguments to be passed to tap/target on create.
"""
self.singer_class = singer_class
self.config = config or {}
self.default_kwargs = kwargs
self.suite_config = suite_config or SuiteConfig()
@staticmethod
def _clean_sync_output(raw_records: str) -> list[dict]:
"""Clean sync output.
Args:
raw_records: String containing raw messages.
Returns:
A list of raw messages in dict form.
"""
lines = raw_records.strip().split("\n")
return [json.loads(ii) for ii in lines if ii]
def create(self, kwargs: dict | None = None) -> Tap | Target:
"""Create a new tap/target from the runner defaults.
Args:
kwargs (dict, optional): [description]. Defaults to None.
Returns:
An instantiated Tap or Target.
"""
if not kwargs:
kwargs = self.default_kwargs
return self.singer_class(config=self.config, **kwargs)
@abc.abstractmethod
def sync_all(self, **kwargs: Any) -> None:
"""Sync all records.
Args:
kwargs: Keyword arguments.
"""
class TapTestRunner(SingerTestRunner):
"""Utility class to simplify tap testing."""
def __init__(
self,
tap_class: type[Tap],
config: dict | None = None,
suite_config: SuiteConfig | None = None,
**kwargs: Any,
) -> None:
"""Initialize Tap instance.
Args:
tap_class: Tap class to run.
config: Config dict to pass to Tap class.
suite_config (SuiteConfig): SuiteConfig instance to be used when
instantiating tests.
kwargs: Default arguments to be passed to tap on create.
"""
super().__init__(
singer_class=tap_class,
config=config or {},
suite_config=suite_config,
**kwargs,
)
def new_tap(self) -> Tap:
"""Get new Tap instance.
Returns:
A configured Tap instance.
"""
return cast(Tap, self.create())
def run_discovery(self) -> str:
"""Run tap discovery.
Returns:
The catalog as a string.
"""
return self.new_tap().run_discovery()
def run_connection_test(self) -> bool:
"""Run tap connection test.
Returns:
True if connection test passes, else False.
"""
new_tap = self.new_tap()
return new_tap.run_connection_test()
def run_sync_dry_run(self) -> bool:
"""Run tap sync dry run.
Returns:
True if dry run test passes, else False.
"""
new_tap = self.new_tap()
dry_run_record_limit = None
if self.suite_config.max_records_limit is not None:
dry_run_record_limit = self.suite_config.max_records_limit
return new_tap.run_sync_dry_run(dry_run_record_limit=dry_run_record_limit)
def sync_all(self, **kwargs: Any) -> None: # noqa: ARG002
"""Run a full tap sync, assigning output to the runner object.
Args:
kwargs: Unused keyword arguments.
"""
stdout, stderr = self._execute_sync()
messages = self._clean_sync_output(stdout)
self._parse_records(messages)
def _parse_records(self, messages: list[dict]) -> None:
"""Save raw and parsed messages onto the runner object.
Args:
messages: A list of messages in dict form.
"""
self.raw_messages = messages
for message in messages:
if message:
if message["type"] == "STATE":
self.state_messages.append(message)
continue
if message["type"] == "SCHEMA":
self.schema_messages.append(message)
continue
if message["type"] == "RECORD":
stream_name = message["stream"]
self.record_messages.append(message)
self.records[stream_name].append(message["record"])
continue
def _execute_sync(self) -> tuple[str, str]:
"""Invoke a Tap object and return STDOUT and STDERR results in StringIO buffers.
Returns:
A 2-item tuple with StringIO buffers from the Tap's output: (stdout, stderr)
"""
stdout_buf = io.StringIO()
stderr_buf = io.StringIO()
with redirect_stdout(stdout_buf), redirect_stderr(stderr_buf):
self.run_sync_dry_run()
stdout_buf.seek(0)
stderr_buf.seek(0)
return stdout_buf.read(), stderr_buf.read()
class TargetTestRunner(SingerTestRunner):
"""Utility class to simplify target testing."""
def __init__(
self,
target_class: type[Target],
config: dict | None = None,
suite_config: SuiteConfig | None = None,
input_filepath: Path | None = None,
input_io: io.StringIO | None = None,
**kwargs: Any,
) -> None:
"""Initialize TargetTestRunner.
Args:
target_class: Target Class to instantiate.
config: Config to pass to instantiated Target.
suite_config: Config to pass to tests.
input_filepath: (optional) Path to a singer file containing records, to pass
to the Target during testing.
input_io: (optional) StringIO containing raw records to pass to the Target
during testing.
kwargs: Default arguments to be passed to tap/target on create.
"""
super().__init__(
singer_class=target_class,
config=config or {},
suite_config=suite_config,
**kwargs,
)
self.input_filepath = input_filepath
self.input_io = input_io
self._input: IO[str] | None = None
def new_target(self) -> Target:
"""Get new Target instance.
Returns:
A configured Target instance.
"""
return cast(Target, self.create())
@property
def target_input(self) -> IO[str]:
"""Input messages to pass to Target.
Returns:
A list of raw input messages in string form.
"""
if self._input is None:
if self.input_io:
self._input = self.input_io
elif self.input_filepath:
self._input = Path(self.input_filepath).open()
return cast(IO[str], self._input)
@target_input.setter
def target_input(self, value: IO[str]) -> None:
self._input = value
def sync_all(self, *, finalize: bool = True, **kwargs: Any) -> None: # noqa: ARG002
"""Run a full tap sync, assigning output to the runner object.
Args:
finalize: True to process as the end of stream as a completion signal;
False to keep the sink operation open for further records.
kwargs: Unused keyword arguments.
"""
target = self.new_target()
stdout, stderr = self._execute_sync(
target=target,
target_input=self.target_input,
finalize=finalize,
)
self.stdout, self.stderr = (stdout.read(), stderr.read())
self.state_messages.extend(self._clean_sync_output(self.stdout))
def _execute_sync(
self,
target: Target,
target_input: IO[str],
*,
finalize: bool = True,
) -> tuple[io.StringIO, io.StringIO]:
"""Invoke the target with the provided input.
Args:
target: Target to sync.
target_input: The input to process as if from STDIN.
finalize: True to process as the end of stream as a completion signal;
False to keep the sink operation open for further records.
Returns:
A 2-item tuple with StringIO buffers from the Target's output:
(stdout, stderr)
"""
stdout_buf = io.StringIO()
stderr_buf = io.StringIO()
with redirect_stdout(stdout_buf), redirect_stderr(stderr_buf):
if target_input is not None:
target._process_lines(target_input)
if finalize:
target._process_endofpipe()
stdout_buf.seek(0)
stderr_buf.seek(0)
return stdout_buf, stderr_buf
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/pytest_plugin.py | """Pytest Plugin."""
from __future__ import annotations
import pytest
def pytest_generate_tests(metafunc: pytest.Metafunc) -> None:
"""Pytest Hook, responsible for parameterizing tests.
Called once per each test function, this hook will check if the function name is
registered in the parent classes 'params' dict, and if so will parameterize
the given test function with the values therein.
Args:
metafunc: Pytest MetaFunc instance, representing a test function or method.
"""
if metafunc.cls and hasattr(metafunc.cls, "params"):
func_arg_list = metafunc.cls.params.get(metafunc.definition.name)
func_arg_ids = (
metafunc.cls.param_ids.get(metafunc.definition.name)
if hasattr(metafunc.cls, "param_ids")
else None
)
if func_arg_list:
arg_names = list(func_arg_list[0].keys())
parameters = [
pytest.param(*tuple(func_args[name] for name in arg_names))
for func_args in func_arg_list
]
metafunc.parametrize(
",".join(arg_names),
parameters,
ids=func_arg_ids,
)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/target_test_streams/user_location_upsert_data.singer | {"type": "SCHEMA", "stream": "test_users", "key_properties": ["id"], "schema": {"required": ["id", "name"], "type": "object", "properties": {"id": {"type": "integer"}, "name": {"type": "string"}}}}
{"type": "RECORD", "stream": "test_users", "record": {"id": 1, "name": "Johny"}}
{"type": "RECORD", "stream": "test_users", "record": {"id": 2, "name": "George"}}
{"type": "RECORD", "stream": "test_users", "record": {"id": 5, "name": "Jim"}}
{"type": "RECORD", "stream": "test_users", "record": {"id": 8, "name": "Thomas"}}
{"type": "RECORD", "stream": "test_users", "record": {"id": 12, "name": "Paul"}}
{"type": "RECORD", "stream": "test_users", "record": {"id": 13, "name": "Mary"}}
{"type": "SCHEMA", "stream": "test_locations", "key_properties": ["id"], "schema": {"required": ["id", "name"], "type": "object", "properties": {"id": {"type": "integer"}, "name": {"type": "string"}}}}
{"type": "RECORD", "stream": "test_locations", "record": {"id": 1, "name": "Philly"}}
{"type": "RECORD", "stream": "test_locations", "record": {"id": 3, "name": "San Francisco"}}
{"type": "RECORD", "stream": "test_locations", "record": {"id": 6, "name": "Colorado"}}
{"type": "RECORD", "stream": "test_locations", "record": {"id": 8, "name": "Boston"}}
{"type": "SCHEMA", "stream": "test_user_in_location", "key_properties": ["id"], "schema": {"required": ["id", "user_id", "location_id"], "type": "object", "properties": {"id": {"type": "integer"}, "user_id": {"type": "integer"}, "location_id": {"type": "integer"}, "info": {"type": "object", "properties": {"weather": {"type": "string"}, "mood": {"type": "string"}}}}}}
{"type": "RECORD", "stream": "test_user_in_location", "record": {"id": 1, "user_id": 1, "location_id": 4, "info": {"weather": "rainy", "mood": "sad"}}}
{"type": "RECORD", "stream": "test_user_in_location", "record": {"id": 2, "user_id": 2, "location_id": 3, "info": {"weather": "sunny", "mood": "satisfied"}}}
{"type": "RECORD", "stream": "test_user_in_location", "record": {"id": 6, "user_id": 3, "location_id": 2, "info": {"weather": "sunny", "mood": "happy"}}}
{"type": "RECORD", "stream": "test_user_in_location", "record": {"id": 14, "user_id": 4, "location_id": 1, "info": {"weather": "cloudy", "mood": "ok"}}}
{"type": "STATE", "value": {"test_users": 13, "test_locations": 8, "test_user_in_location": 14}}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/target_test_streams/invalid_schema.singer | {"type": "SCHEMA", "stream": "test_invalid_schema", "schema": {"type": "object"}, "key_properties": []}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/target_test_streams/record_missing_required_property.singer | {"type": "SCHEMA", "stream": "test_record_missing_required_property", "key_properties": [], "schema": {"required": ["id"], "type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}}
{"type": "RECORD", "stream": "test_record_missing_required_property", "record": {"metric": 3215}}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/target_test_streams/optional_attributes.singer | {"type": "SCHEMA", "stream": "test_optional_attributes", "key_properties": ["id"], "schema": {"required": ["id"], "type": "object", "properties": {"id": {"type": "integer"}, "optional": {"type": "string"}}}}
{"type": "RECORD", "stream": "test_optional_attributes", "record": {"id": 1, "optional": "This is optional"}}
{"type": "RECORD", "stream": "test_optional_attributes", "record": {"id": 2}}
{"type": "RECORD", "stream": "test_optional_attributes", "record": {"id": 3, "optional": "Also optional"}}
{"type": "RECORD", "stream": "test_optional_attributes", "record": {"id": 4}}
{"type": "STATE", "value": {"test_optional_attributes": 4}}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/target_test_streams/array_data.singer | {"type": "SCHEMA", "stream": "test_array_data", "key_properties": ["id"], "schema": {"required": ["id"], "type": "object", "properties": {"id": {"type": "integer"}, "fruits": {"type": "array","items": {"type": "string"}}}}}
{"type": "RECORD", "stream": "test_array_data", "record": {"id": 1, "fruits": [ "apple", "orange", "pear" ]}}
{"type": "RECORD", "stream": "test_array_data", "record": {"id": 2, "fruits": [ "banana", "apple" ]}}
{"type": "RECORD", "stream": "test_array_data", "record": {"id": 3, "fruits": [ "pear" ]}}
{"type": "RECORD", "stream": "test_array_data", "record": {"id": 4, "fruits": [ "orange", "banana", "apple", "pear" ]}}
{"type": "STATE", "value": {"test_array_data": 4}}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/target_test_streams/no_primary_keys.singer | {"type": "SCHEMA", "stream": "test_no_pk", "key_properties": [], "schema": { "type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}}
{"type": "RECORD", "stream": "test_no_pk", "record": {"id": 1, "metric": 11}}
{"type": "RECORD", "stream": "test_no_pk", "record": {"id": 2, "metric": 22}}
{"type": "RECORD", "stream": "test_no_pk", "record": {"id": 3, "metric": 33}}
{"type": "STATE", "value": {"test_no_pk": 3}}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/target_test_streams/camelcase_complex_schema.singer | {"type": "SCHEMA", "stream": "ForecastingTypeToCategory", "schema": {"properties": {"Id": {"type": "string"}, "IsDeleted": {"type": ["null", "boolean"]}, "CreatedDate": {"anyOf": [{"type": "string", "format": "date-time"}, {"type": ["string", "null"]}]}, "CreatedById": {"type": ["null", "string"]}, "LastModifiedDate": {"anyOf": [{"type": "string", "format": "date-time"}, {"type": ["string", "null"]}]}, "LastModifiedById": {"type": ["null", "string"]}, "SystemModstamp": {"anyOf": [{"type": "string", "format": "date-time"}, {"type": ["string", "null"]}]}, "ForecastingTypeId": {"type": ["null", "string"]}, "ForecastingItemCategory": {"type": ["null", "string"]}, "DisplayPosition": {"type": ["null", "integer"]}, "IsAdjustable": {"type": ["null", "boolean"]}, "IsOwnerAdjustable": {"type": ["null", "boolean"]}}, "type": "object", "additionalProperties": false}, "key_properties": ["Id"]}
{"type": "SCHEMA", "stream": "ForecastingTypeToCategory", "schema": {"properties": {"Id": {"type": "string"}, "IsDeleted": {"type": ["null", "boolean"]}, "CreatedDate": {"anyOf": [{"type": "string", "format": "date-time"}, {"type": ["string", "null"]}]}, "CreatedById": {"type": ["null", "string"]}, "LastModifiedDate": {"anyOf": [{"type": "string", "format": "date-time"}, {"type": ["string", "null"]}]}, "LastModifiedById": {"type": ["null", "string"]}, "SystemModstamp": {"anyOf": [{"type": "string", "format": "date-time"}, {"type": ["string", "null"]}]}, "ForecastingTypeId": {"type": ["null", "string"]}, "ForecastingItemCategory": {"type": ["null", "string"]}, "DisplayPosition": {"type": ["null", "integer"]}, "IsAdjustable": {"type": ["null", "boolean"]}, "IsOwnerAdjustable": {"type": ["null", "boolean"]}, "age": {"type": "integer"}, "NewCamelCasedAttribute": {"type": "string"}}, "type": "object", "additionalProperties": false}, "key_properties": ["Id"]}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/target_test_streams/encoded_string_data.singer | {"type": "SCHEMA", "stream": "test_strings", "key_properties": ["id"], "schema": {"required": ["id", "info"], "type": "object", "properties": {"id": {"type": "integer"}, "info": {"type": "string"}}}}
{"type": "RECORD", "stream": "test_strings", "record": {"id": 1, "info": "simple string 2837"}}
{"type": "RECORD", "stream": "test_strings", "record": {"id": 2, "info": "απλή συμβολοσειρά"}}
{"type": "RECORD", "stream": "test_strings", "record": {"id": 3, "info": "简单的字串"}}
{"type": "RECORD", "stream": "test_strings", "record": {"id": 4, "info": "chaîne simple"}}
{"type": "RECORD", "stream": "test_strings", "record": {"id": 5, "info": "quoted \"string\""}}
{"type": "RECORD", "stream": "test_strings", "record": {"id": 6, "info": "various \" \\ \/ \n escape sequences"}}
{"type": "RECORD", "stream": "test_strings", "record": {"id": 7, "info": "\u006D"}}
{"type": "RECORD", "stream": "test_strings", "record": {"id": 8, "info": "\u0101"}}
{"type": "RECORD", "stream": "test_strings", "record": {"id": 9, "info": "\u0199"}}
{"type": "RECORD", "stream": "test_strings", "record": {"id": 10, "info": "Double quoting: \\u0000 \\u0041 \\u0001"}}
{"type": "RECORD", "stream": "test_strings", "record": {"id": 11, "info": "Control Characters in string: \u0000 \u0041 \u0001"}}
{"type": "SCHEMA", "stream": "test_strings_in_objects", "key_properties": ["id"], "schema": {"required": ["id", "info"], "type": "object", "properties": {"id": {"type": "integer"}, "info": {"required": ["name"], "type": "object", "properties": {"name": {"type": "string"}, "value": {"type": "string"}}}}}}
{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 1, "info": {"name": "simple", "value": "simple string 2837"}}}
{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 2, "info": {"name": "greek", "value": "απλή συμβολοσειρά"}}}
{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 3, "info": {"name": "chinese", "value": "简单的字串"}}}
{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 4, "info": {"name": "french", "value": "chaîne simple"}}}
{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 5, "info": {"name": "quoted string", "value": "quoted \"string\""}}}
{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 6, "info": {"name": "escape sequences", "value": "various \" \\ \/ \n escape sequences"}}}
{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 7, "info": {"name": "unicode", "value": "\u006D"}}}
{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 8, "info": {"name": "unicode", "value": "\u0101"}}}
{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 9, "info": {"name": "unicode", "value": "\u0199"}}}
{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 10, "info": {"name": "Double quoting", "value": " \\u0000 \\u0041 \\u0001"}}}
{"type": "RECORD", "stream": "test_strings_in_objects", "record": {"id": 11, "info": {"name": "Control Characters in string", "value": "\u0000 \u0041 \u0001"}}}
{"type": "SCHEMA", "stream": "test_strings_in_arrays", "key_properties": ["id"], "schema": {"required": ["id"], "type": "object", "properties": {"id": {"type": "integer"}, "strings": {"type": "array", "items": {"type": "string"}}}}}
{"type": "RECORD", "stream": "test_strings_in_arrays", "record": {"id": 1, "strings": ["simple string", "απλή συμβολοσειρά", "简单的字串"]}}
{"type": "RECORD", "stream": "test_strings_in_arrays", "record": {"id": 2, "strings": ["chaîne simple", "quoted \"string\""]}}
{"type": "RECORD", "stream": "test_strings_in_arrays", "record": {"id": 3, "strings": ["various \" \\ \/ \n escape sequences"]}}
{"type": "RECORD", "stream": "test_strings_in_arrays", "record": {"id": 4, "strings": ["\u006D", "\u0101", "\u0199"]}}
{"type": "RECORD", "stream": "test_strings_in_arrays", "record": {"id": 5, "strings": ["aaa", "Double quoting: \\u0000 \\u0041 \\u0001"]}}
{"type": "RECORD", "stream": "test_strings_in_arrays", "record": {"id": 6, "strings": ["bbb", "Control Characters in string: \u0000 \u0041 \u0001"]}}
{"type": "STATE", "value": {"test_strings": 11, "test_strings_in_objects": 11, "test_strings_in_arrays": 6}}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/target_test_streams/schema_updates.singer | {"type": "SCHEMA", "stream": "test_schema_updates", "key_properties": ["id"], "schema": {"type": "object", "properties": {"id": {"type": "integer"}, "a1": {"type": "number"}, "a2": {"type": "string"}}}}
{"type": "RECORD", "stream": "test_schema_updates", "record": {"id": 1, "a1": 101, "a2": "string1"}}
{"type": "SCHEMA", "stream": "test_schema_updates", "key_properties": ["id"], "schema": {"type": "object", "properties": {"id": {"type": "integer"}, "a1": {"type": "number"}, "a2": {"type": "string"}, "a3": {"type": "boolean"}}}}
{"type": "RECORD", "stream": "test_schema_updates", "record": {"id": 2, "a1": 102, "a2": "string2", "a3": true}}
{"type": "SCHEMA", "stream": "test_schema_updates", "key_properties": ["id"], "schema": {"type": "object", "properties": {"id": {"type": "integer"}, "a1": {"type": "number"}, "a2": {"type": "string"}, "a3": {"type": "boolean"}, "a4": {"type": "object", "properties": {"id": {"type": "integer"}, "value": {"type": "integer"}}}, "a5": {"type": "array", "items": {"type": "string"}}}}}
{"type": "RECORD", "stream": "test_schema_updates", "record": {"id": 3, "a1": 103, "a2": "string3", "a3": false, "a4": {"id": 1, "value": 1}, "a5": [ "banana", "apple" ]}}
{"type": "RECORD", "stream": "test_schema_updates", "record": {"id": 4, "a1": 104, "a2": "string4", "a3": true, "a4": {"id": 2, "value": 22}, "a5": [ "orange", "pear" ]}}
{"type": "SCHEMA", "stream": "test_schema_updates", "key_properties": ["id"], "schema": {"type": "object", "properties": {"id": {"type": "integer"}, "a1": {"type": "number"}, "a2": {"type": "string"}, "a3": {"type": "boolean"}, "a4": {"type": "object", "properties": {"id": {"type": "integer"}, "value": {"type": "integer"}}}, "a5": {"type": "array", "items": {"type": "string"}}, "a6": {"type": "integer"}}}}
{"type": "RECORD", "stream": "test_schema_updates", "record": {"id": 5, "a1": 105, "a2": "string5", "a3": false, "a4": {"id": 3, "value": 33}, "a5": [ "apple" ], "a6": 985}}
{"type": "RECORD", "stream": "test_schema_updates", "record": {"id": 6, "a1": 106, "a2": "string6", "a3": true, "a4": {"id": 4, "value": 444}, "a5": [ "banana", "orange" ], "a6": 341}}
{"type": "STATE", "value": {"test_schema_updates": 6}}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/testing/target_test_streams/multiple_state_messages.singer | {"type": "SCHEMA", "stream": "test_multiple_state_messages_a", "key_properties": ["id"], "schema": {"required": ["id"], "type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}}
{"type": "SCHEMA", "stream": "test_multiple_state_messages_b", "key_properties": ["id"], "schema": {"required": ["id"], "type": "object", "properties": {"id": {"type": "integer"}, "metric": {"type": "integer"}}}}
{"type": "RECORD", "stream": "test_multiple_state_messages_a", "record": {"id": 1, "metric": 100}}
{"type": "STATE", "value": {"test_multiple_state_messages_a": 1, "test_multiple_state_messages_b": 0}}
{"type": "RECORD", "stream": "test_multiple_state_messages_a", "record": {"id": 2, "metric": 200}}
{"type": "RECORD", "stream": "test_multiple_state_messages_b", "record": {"id": 1, "metric": 110}}
{"type": "STATE", "value": {"test_multiple_state_messages_a": 2, "test_multiple_state_messages_b": 1}}
{"type": "RECORD", "stream": "test_multiple_state_messages_a", "record": {"id": 3, "metric": 300}}
{"type": "RECORD", "stream": "test_multiple_state_messages_b", "record": {"id": 2, "metric": 220}}
{"type": "STATE", "value": {"test_multiple_state_messages_a": 3, "test_multiple_state_messages_b": 2}}
{"type": "RECORD", "stream": "test_multiple_state_messages_a", "record": {"id": 4, "metric": 400}}
{"type": "RECORD", "stream": "test_multiple_state_messages_a", "record": {"id": 5, "metric": 500}}
{"type": "RECORD", "stream": "test_multiple_state_messages_b", "record": {"id": 3, "metric": 330}}
{"type": "STATE", "value": {"test_multiple_state_messages_a": 5, "test_multiple_state_messages_b": 3}}
{"type": "RECORD", "stream": "test_multiple_state_messages_b", "record": {"id": 4, "metric": 440}}
{"type": "RECORD", "stream": "test_multiple_state_messages_b", "record": {"id": 5, "metric": 550}}
{"type": "RECORD", "stream": "test_multiple_state_messages_b", "record": {"id": 6, "metric": 660}}
{"type": "STATE", "value": {"test_multiple_state_messages_a": 5, "test_multiple_state_messages_b": 6}}
{"type": "RECORD", "stream": "test_multiple_state_messages_a", "record": {"id": 6, "metric": 600}}
| 0 |