repo_id
stringlengths 32
150
| file_path
stringlengths 46
183
| content
stringlengths 1
290k
| __index_level_0__
int64 0
0
|
---|---|---|---|
/Users/nchebolu/work/raptor/taps/tap-zendesk/src/tap_zendesk | /Users/nchebolu/work/raptor/taps/tap-zendesk/src/tap_zendesk/schemas/user.json | {
"properties": {
"active": {
"type": [
"null",
"boolean"
]
},
"alias": {
"type": [
"null",
"string"
]
},
"created_at": {
"type": [
"null",
"string"
]
},
"custom_role_id": {
"type": [
"null",
"integer"
]
},
"default_group_id": {
"type": [
"null",
"integer"
]
},
"details": {
"type": [
"null",
"string"
]
},
"email": {
"type": [
"null",
"string"
]
},
"external_id": {
"type": [
"null",
"string"
]
},
"iana_time_zone": {
"type": [
"null",
"string"
]
},
"id": {
"type": [
"null",
"integer"
]
},
"last_login_at": {
"type": [
"null",
"string"
]
},
"locale": {
"type": [
"null",
"string"
]
},
"locale_id": {
"type": [
"null",
"integer"
]
},
"moderator": {
"type": [
"null",
"boolean"
]
},
"name": {
"type": [
"null",
"string"
]
},
"notes": {
"type": [
"null",
"string"
]
},
"only_private_comments": {
"type": [
"null",
"boolean"
]
},
"organization_id": {
"type": [
"integer",
"null"
]
},
"phone": {
"type": [
"null",
"string"
]
},
"photo": {
"properties": {
"content_type": {
"type": [
"null",
"string"
]
},
"content_url": {
"type": [
"null",
"string"
]
},
"deleted": {
"type": [
"null",
"boolean"
]
},
"file_name": {
"type": [
"null",
"string"
]
},
"height": {
"type": [
"null",
"integer"
]
},
"id": {
"type": [
"null",
"integer"
]
},
"inline": {
"type": [
"null",
"boolean"
]
},
"mapped_content_url": {
"type": [
"null",
"string"
]
},
"size": {
"type": [
"null",
"integer"
]
},
"thumbnails": {
"items": {
"properties": {
"content_type": {
"type": [
"null",
"string"
]
},
"content_url": {
"type": [
"null",
"string"
]
},
"deleted": {
"type": [
"null",
"boolean"
]
},
"file_name": {
"type": [
"null",
"string"
]
},
"height": {
"type": [
"null",
"integer"
]
},
"id": {
"type": [
"null",
"integer"
]
},
"inline": {
"type": [
"null",
"boolean"
]
},
"mapped_content_url": {
"type": [
"null",
"string"
]
},
"size": {
"type": [
"null",
"integer"
]
},
"url": {
"type": [
"null",
"string"
]
},
"width": {
"type": [
"null",
"integer"
]
}
},
"type": [
"null",
"object"
]
},
"type": [
"null",
"array"
]
},
"url": {
"type": [
"null",
"string"
]
},
"width": {
"type": [
"null",
"integer"
]
}
},
"type": [
"null",
"object"
]
},
"report_csv": {
"type": [
"null",
"boolean"
]
},
"restricted_agent": {
"type": [
"null",
"boolean"
]
},
"role": {
"type": [
"null",
"string"
]
},
"role_type": {
"type": [
"null",
"integer"
]
},
"shared": {
"type": [
"null",
"boolean"
]
},
"shared_agent": {
"type": [
"null",
"boolean"
]
},
"shared_phone_number": {
"type": [
"boolean",
"null"
]
},
"signature": {
"type": [
"null",
"string"
]
},
"suspended": {
"type": [
"null",
"boolean"
]
},
"tags": {
"items": {
"type": [
"null",
"string"
]
},
"type": [
"null",
"array"
]
},
"ticket_restriction": {
"type": [
"null",
"string"
]
},
"time_zone": {
"type": [
"null",
"string"
]
},
"two_factor_auth_enabled": {
"type": [
"boolean",
"null"
]
},
"updated_at": {
"type": [
"null",
"string"
]
},
"url": {
"type": [
"null",
"string"
]
},
"verified": {
"type": [
"null",
"boolean"
]
}
},
"type": [
"null",
"object"
]
}
| 0 |
/Users/nchebolu/work/raptor/taps | /Users/nchebolu/work/raptor/taps/tap-lucidchart/pyproject.toml | [build-system]
requires = ["setuptools", "setuptools-scm"]
build-backend = "setuptools.build_meta"
[project]
name = "tap-lucidchart"
description = "`tap-lucidchart` is a Singer tap for lucidchart, built with the Meltano SDK for Singer Taps."
readme = "README.md"
requires-python = ">=3.11"
keywords = ["meltano", "lucidchart"]
classifiers = [
"Framework :: Meltano",
"Programming Language :: Python :: 3",
]
dynamic = ["version"]
dependencies = [
"singer-sdk==0.25.0",
"requests==2.29.0",
"cached-property==1"
]
[project.optional-dependencies]
dev = [
"pre-commit==2.20.0",
"black[d]==22.12.0",
"aiohttp",
"keyring",
"meltano==2.18.0"
]
test = [
"pytest==7.2.0",
"responses==0.22.0",
"freezegun==1.2.2"
]
[project.scripts]
tap-lucidchart = "tap_lucidchart.tap:Taplucidchart.cli"
[tool.isort]
profile = "black"
[tool.black]
line-length = 120
target-version = ['py311']
[tool.pyright]
exclude = [".venv", "tests", "migrations"]
pythonVersion = "3.11"
include = ["src"]
venvPath = "."
venv = ".venv"
| 0 |
/Users/nchebolu/work/raptor/taps | /Users/nchebolu/work/raptor/taps/tap-lucidchart/README.md | # tap-lucidchart
`tap-lucidchart` is a Singer tap for lucidchart.
Built with the [Meltano Tap SDK](https://sdk.meltano.com) for Singer Taps.
## Installation
An example GitLab installation command:
```bash
pipx install git+https://gitlab.com/ORG_NAME/tap-lucidchart.git
```
## Configuration
### Accepted Config Options
| Setting | Required | Default | Description |
|:--------------------|:--------:|:-------:|:------------|
| client_id | True | None | The client's ID for the API service |
| client_secret | True | None | The client's secret for the API service |
| refresh_token | True | None | The refresh token to authenticate against the API service |
A full list of supported settings and capabilities for this
tap is available by running:
```bash
tap-lucidchart --about
```
### Configure using environment variables
This Singer tap will automatically import any environment variables within the working directory's
`.env` if the `--config=ENV` is provided, such that config values will be considered if a matching
environment variable is set either in the terminal context or in the `.env` file.
### Source Authentication and Authorization
1. Add a new OAuth2.0 client under Admin->App Integrations->Custom Oauth Apps (Click Settings button). This will give you a client ID and a client secret. Add a Redirect URI to your OAuth2.0 client with the following format:
- https://lucid.app/oauth2/clients/{clientID}/redirect
2. Then navigate to the following webpage and grant access:
- https://lucid.app/oauth2/authorizeAccount?client_id={clientID}&redirect_uri=https://lucid.app/oauth2/clients/{clientID}/redirect&scope=account.info%20account.user:readonly%20offline_access
3. Copy the authorization code and make a POST request with the following format:
- https://api.lucid.co/oauth2/token
- Headers:
```
"Accept" = "application/json"
```
- Body:
```
{
"code": "{authorizationCode}",
"client_id": "{clientID}",
"client_secret": "{clientSecret}",
"grant_type": "authorization_code",
"redirect_uri": "https://lucid.app/oauth2/clients/{clientID}/redirect"
}
```
4. Copy the returned refresh token into the configuration for tap-lucidchart.
### Refresh token workaround
Due to the implementation of LucidChart's API, we've had to use a workaround to ensure a refresh token can be kept long-term. Specifically, any call to LucidChart's API that generates a new access token also invalidates the refresh token that was used to create it (providing a new refresh token instead). Thus, a refresh token provided as an environment variable will become outdated after only a single invocation of the tap.
To address this, the first time the tap is run, the provided refresh token is used. But then, the the newly returned refresh token is stored as a .txt file in `.secrets/RefreshStorage.txt`. The next time a refresh token is needed, the tap will default to the value stored in this file instead of using one provided as an environment variable. To fully reset the tap and use a fresh refresh token, it is necessary to delete the `RefreshStorage.txt` file.
You can check whether any given invocation of the tap is using a refresh token from environment variables or a refresh token from `RefreshStorage.txt` by way of logging statements which are automatically output to the terminal.
## Usage
You can easily run `tap-lucidchart` by itself or in a pipeline using [Meltano](https://meltano.com/).
### Executing the Tap Directly
```bash
tap-lucidchart --version
tap-lucidchart --help
tap-lucidchart --config CONFIG --discover > ./catalog.json
```
## Developer Resources
Follow these instructions to contribute to this project.
### Initialize your Development Environment
```bash
pipx install poetry
poetry install
```
### Create and Run Tests
Create tests within the `tests` subfolder and
then run:
```bash
poetry run pytest
```
You can also test the `tap-lucidchart` CLI interface directly using `poetry run`:
```bash
poetry run tap-lucidchart --help
```
### Testing with [Meltano](https://www.meltano.com)
_**Note:** This tap will work in any Singer environment and does not require Meltano.
Examples here are for convenience and to streamline end-to-end orchestration scenarios._
Next, install Meltano (if you haven't already) and any needed plugins:
```bash
# Install meltano
pipx install meltano
# Initialize meltano within this directory
cd tap-lucidchart
meltano install
```
Now you can test and orchestrate using Meltano:
```bash
# Test invocation:
meltano invoke tap-lucidchart --version
# OR run a test `elt` pipeline:
meltano elt tap-lucidchart target-jsonl
```
### SDK Dev Guide
See the [dev guide](https://sdk.meltano.com/en/latest/dev_guide.html) for more instructions on how to use the SDK to
develop your own taps and targets.
| 0 |
/Users/nchebolu/work/raptor/taps | /Users/nchebolu/work/raptor/taps/tap-lucidchart/meltano.yml | version: 1
send_anonymous_usage_stats: true
project_id: "tap-lucidchart"
default_environment: test
environments:
- name: test
plugins:
extractors:
- name: "tap-lucidchart"
namespace: "tap_lucidchart"
pip_url: -e .
capabilities:
- state
- catalog
- discover
- about
- stream-maps
settings:
- name: client_id
kind: password
- name: client_secret
kind: password
- name: refresh_token
kind: password
config:
loaders:
- name: target-jsonl
variant: andyh1203
pip_url: target-jsonl
| 0 |
/Users/nchebolu/work/raptor/taps/tap-lucidchart/src | /Users/nchebolu/work/raptor/taps/tap-lucidchart/src/tap_lucidchart/auth.py | """lucidchart Authentication."""
from __future__ import annotations
from datetime import datetime
from singer_sdk.authenticators import OAuthAuthenticator, SingletonMeta
from singer_sdk.streams import Stream as RESTStreamBase
import requests
from singer_sdk.helpers._util import utc_now
class lucidchartAuthenticator(OAuthAuthenticator, metaclass=SingletonMeta):
"""API Authenticator for OAuth 2.0 flows."""
file_name = r".secrets/RefreshStorage.txt"
@property
def oauth_request_body(self) -> dict:
"""Define the OAuth request body for the LucidChart API.
Returns:
A dict with the request body
"""
# Reads the refresh token from a file, if it exists. Not the ideal way of doing things.
try:
with open(self.file_name, "r+") as refresh_storage:
refresh_token = refresh_storage.readline()
self.logger.info("Using refresh token from file storage: ./secrets/RefreshStorage.txt")
except:
refresh_token = self.config["refresh_token"]
self.logger.info("Using refresh token from environment variables")
return {
"client_id": self.config["client_id"],
"client_secret": self.config["client_secret"],
"grant_type": "refresh_token",
"refresh_token": refresh_token,
}
# Authentication and refresh
def update_access_token(self) -> None:
"""Update `access_token` along with: `last_refreshed` and `expires_in`.
Raises:
RuntimeError: When OAuth login fails.
"""
request_time = utc_now()
auth_request_payload = self.oauth_request_payload
token_response = requests.post(
self.auth_endpoint,
data=auth_request_payload,
timeout=60,
)
try:
token_response.raise_for_status()
except requests.HTTPError as ex:
raise RuntimeError(
f"Failed OAuth login, response was '{token_response.json()}'. {ex}",
) from ex
self.logger.info("OAuth authorization attempt was successful.")
token_json = token_response.json()
self.access_token = token_json["access_token"]
# Stores the new refresh token as a file. Not the ideal way of doing things.
with open(self.file_name, "w") as refresh_storage:
refresh_storage.write(token_json["refresh_token"])
self.logger.info("Refresh token has been stored in ./secrets/RefreshStorage.txt")
self.expires_in = token_json.get("expires_in", self._default_expiration)
if self.expires_in is None:
self.logger.debug(
"No expires_in receied in OAuth response and no "
"default_expiration set. Token will be treated as if it never "
"expires.",
)
self.last_refreshed = request_time
@classmethod
def create_for_stream(cls, stream) -> lucidchartAuthenticator:
"""Instantiate an authenticator for a specific Singer stream.
Args:
stream: The Singer stream instance.
Returns:
A new authenticator.
"""
return cls(
stream=stream,
auth_endpoint="https://api.lucid.co/oauth2/token",
oauth_scopes="account.user:readonly", # This is the only scope needed for all operations. Expanding the tap may require adding additional scopes.
)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-lucidchart/src | /Users/nchebolu/work/raptor/taps/tap-lucidchart/src/tap_lucidchart/streams.py | """Stream type classes for tap-lucidchart."""
from __future__ import annotations
from pathlib import Path
from singer_sdk import typing as th # JSON Schema typing helpers
from tap_lucidchart.client import lucidchartStream, lucidchartPaginator
SCHEMAS_DIR = Path(__file__).parent / Path("./schemas")
class UserStream(lucidchartStream):
"""Define custom stream."""
def get_new_paginator(self):
return lucidchartPaginator()
name = "user"
path = "/users"
primary_keys = ["id"]
schema_filepath = SCHEMAS_DIR / f"{name}.json"
| 0 |
/Users/nchebolu/work/raptor/taps/tap-lucidchart/src | /Users/nchebolu/work/raptor/taps/tap-lucidchart/src/tap_lucidchart/client.py | """REST client handling, including lucidchartStream base class."""
from __future__ import annotations
import sys
from pathlib import Path
from typing import Any
from collections.abc import Callable, Iterable
from urllib.parse import parse_qsl
import requests
from singer_sdk.pagination import BaseAPIPaginator
from singer_sdk.helpers.jsonpath import extract_jsonpath
from singer_sdk.streams import RESTStream
from tap_lucidchart.auth import lucidchartAuthenticator
from functools import cached_property
_Auth = Callable[[requests.PreparedRequest], requests.PreparedRequest]
class lucidchartPaginator(BaseAPIPaginator):
"""
Customer pagination class to handle retrivel of a HATEOAS link from a muddy header.
The result of next_page_token can be added to the url parameters with no further processing.
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
"""Create a new paginator.
Args:
args: Paginator positional arguments for base class.
kwargs: Paginator keyword arguments for base class.
"""
super().__init__(None, *args, **kwargs)
def get_next(self, response: requests.Response):
"""
Retrieves a URL from the `link` parameter of the response's headers. The initial format is a string something like:
<https://api.lucid.co/users?pageSize=20&pageToken=eyJvIjoiMSIsImUiOjE2Mjg2OTc3OTF9>; rel="next"
This string is retrieved from response.headers, split based on ">;", then parsed as a query string before being returned.
The result is that next_page_token can be directly added to the url parameters with no further processing, and when this is done, page size is automatically maintained.
"""
if "link" in response.headers:
return parse_qsl(response.headers["link"].split(">;", 1)[0])
return None
class lucidchartStream(RESTStream):
"""lucidchart stream class."""
url_base = "https://api.lucid.co"
records_jsonpath = "$[*]"
next_page_token_jsonpath = "$.next_page"
@cached_property
def authenticator(self) -> _Auth:
"""Return a new authenticator object.
Returns:
An authenticator instance.
"""
return lucidchartAuthenticator.create_for_stream(self)
@property
def http_headers(self) -> dict:
"""Return the http headers needed.
Returns:
A dictionary of HTTP headers.
"""
headers = {}
headers["Lucid-Api-Version"] = "1"
headers["Accept"] = "application/json"
return headers
def get_url_params(
self,
context: dict | None,
next_page_token: Any | None,
) -> dict[str, Any]:
"""Return a dictionary of values to be used in URL parameterization.
Args:
context: The stream context.
next_page_token: The next page index or value.
Returns:
A dictionary of URL query parameters.
"""
params: dict = {}
if next_page_token:
params.update(next_page_token)
else:
params["pageSize"] = "200"
return params
| 0 |
/Users/nchebolu/work/raptor/taps/tap-lucidchart/src | /Users/nchebolu/work/raptor/taps/tap-lucidchart/src/tap_lucidchart/__init__.py | """Tap for lucidchart."""
| 0 |
/Users/nchebolu/work/raptor/taps/tap-lucidchart/src | /Users/nchebolu/work/raptor/taps/tap-lucidchart/src/tap_lucidchart/tap.py | """lucidchart tap class."""
from __future__ import annotations
from singer_sdk import Tap
from singer_sdk import typing as th # JSON schema typing helpers
from tap_lucidchart import streams
class Taplucidchart(Tap):
"""lucidchart tap class."""
name = "tap-lucidchart"
config_jsonschema = th.PropertiesList(
th.Property(
"client_id",
th.StringType,
required=True,
secret=True, # Flag config as protected.
description="The client's ID for the API service",
),
th.Property(
"client_secret",
th.StringType,
required=True,
secret=True, # Flag config as protected.
description="The client's secret for the API service",
),
th.Property(
"refresh_token",
th.StringType,
required=True,
secret=True, # Flag config as protected.
description="The refresh token to authenticate against the API service",
),
).to_dict()
def discover_streams(self) -> list[streams.lucidchartStream]:
"""Return a list of discovered streams.
Returns:
A list of discovered streams.
"""
return [
streams.UserStream(self),
]
if __name__ == "__main__":
Taplucidchart.cli()
| 0 |
/Users/nchebolu/work/raptor/taps/tap-lucidchart/src/tap_lucidchart | /Users/nchebolu/work/raptor/taps/tap-lucidchart/src/tap_lucidchart/schemas/user.json | {
"properties": {
"accountId": {
"type": [
"null",
"integer"
]
},
"email": {
"type": [
"null",
"string"
]
},
"name": {
"type": [
"null",
"string"
]
},
"roles": {
"items": {
"type": [
"null",
"string"
]
},
"type": [
"null",
"array"
]
},
"userId": {
"type": [
"null",
"integer"
]
},
"username": {
"type": [
"null",
"string"
]
}
},
"type": [
"null",
"object"
]
}
| 0 |
/Users/nchebolu/work/raptor/taps | /Users/nchebolu/work/raptor/taps/tap-okta/pyproject.toml | [build-system]
requires = ["setuptools", "setuptools-scm"]
build-backend = "setuptools.build_meta"
[project]
name = "tap-okta"
description = "`tap-okta` is a Singer tap for okta, built with the Meltano SDK for Singer Taps."
readme = "README.md"
requires-python = ">=3.11"
keywords = ["meltano", "okta"]
classifiers = [
"Framework :: Meltano",
"Programming Language :: Python :: 3",
]
dynamic = ["version"]
dependencies = [
"singer-sdk==0.25.0",
"requests==2.29.0"
]
[project.optional-dependencies]
dev = [
"pre-commit==2.20.0",
"black[d]==22.12.0",
"aiohttp",
"keyring",
"meltano==2.18.0"
]
test = [
"pytest==7.2.0",
"responses==0.22.0",
"freezegun==1.2.2"
]
[project.scripts]
tap-okta = "tap_okta.tap:Tapokta.cli"
[tool.isort]
profile = "black"
[tool.black]
line-length = 120
target-version = ['py311']
[tool.pyright]
exclude = [".venv", "tests", "migrations"]
pythonVersion = "3.11"
include = ["src"]
venvPath = "."
venv = ".venv"
| 0 |
/Users/nchebolu/work/raptor/taps | /Users/nchebolu/work/raptor/taps/tap-okta/README.md | # tap-okta
`tap-okta` is a Singer tap for okta.
Built with the [Meltano Tap SDK](https://sdk.meltano.com) for Singer Taps.
## Contributing
### Pre-commit
Pre-commit ensures that we all use the same formatting, it can be installed through
```shell
poetry run pre-commit install
```
Which will run pre-commit before committing the repository.
### Commitizen
[Commitizen](https://www.npmjs.com/package/commitizen) is a tool which allows you to create formatted commit messages
that conform to the AngularJS styling. We use semantic releaser
## Running with Meltano
You can install Meltano through `pipx` although it can also be run through docker
```shell
docker run -it -v $(pwd):/project -w /project --entrypoint /bin/bash meltano/meltano
```
Once inside the container you can run any standard Meltano command
## Setup
Run
```shell
meltano install
```
## Configuration
There are 2 mandatory fields to run, `auth_token` and `api_url`. These are secret fields and should be set directly in the `.env` file
or set through the command line with
```shell
meltano config --format env tap-okta set api_url https://myokta.okta.com
```
Please make sure not to write secrets directly in the `meltano.yml` file and commit them into source control
## Discovery
Streams and fields can be discovered through running the `select` command
```shell
meltano select tap-okta --list --all
```
If you wish to select individual fields this can also be done at this point or directly in metlano.yml
## Run pipeline
The pipeline can be run with
```shell
meltano elt tap-okta target-jsonl --job_id=some_job_id
```
The output files can be found in the output directory
### SDK Dev Guide
See the [dev guide](https://sdk.meltano.com/en/latest/dev_guide.html) for more instructions on how to use the SDK to
develop your own taps and targets.
| 0 |
/Users/nchebolu/work/raptor/taps | /Users/nchebolu/work/raptor/taps/tap-okta/.env | # TAP_OKTA_API_URL=“https://mraval.okta.com”
# TAP_OKTA_AUTH_TOKEN=“00k1YygHgvwsZDelfm5Vqq_LCcNdow6NUGDc0KNXx6"
TAP_OKTA_API_URL=“https://mravaloie-admin.oktapreview.com”
TAP_OKTA_AUTH_TOKEN=“00x7rWKx7WAhV5h3Ck5lv661zXDix4ivfAodgzs7Lb” | 0 |
/Users/nchebolu/work/raptor/taps | /Users/nchebolu/work/raptor/taps/tap-okta/meltano.yml | version: 1
send_anonymous_usage_stats: false
project_id: tap-okta
plugins:
extractors:
- name: tap-okta
namespace: tap_okta
pip_url: -e .
capabilities:
- state
- catalog
- discover
settings:
- name: auth_token
kind: string
description: The authentication token for the Okta instance
- name: api_url
kind: string
description: The url for the Okta endpoint
- name: user_page_limit
kind: integer
value: 200
description: The page limit for the users stream
- name: log_page_limit
kind: integer
value: 200
description: The page limit for the system log stream
select:
- 'users.*'
loaders:
- name: target-jsonl
variant: andyh1203
pip_url: target-jsonl
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/analytics.json | {"client_id": "5a828c49-984f-47e6-8ade-81847fd042d1", "project_id": "1b2753f5-ed41-b72e-ca09-b1b84703f516", "send_anonymous_usage_stats": false} | 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/cache/discovery.yml | # Increment this version number whenever the schema of discovery.yml is changed.
# See https://docs.meltano.com/contribute/plugins#discoveryyml-version for more information.
version: 22
extractors:
- name: tap-adwords
namespace: tap_adwords
label: Google Ads
description: Advertising Platform
variants:
- name: singer-io
docs: https://hub.meltano.com/extractors/adwords.html
repo: https://github.com/singer-io/tap-adwords
pip_url: git+https://github.com/singer-io/tap-adwords.git
capabilities:
- properties
- discover
- state
settings_group_validation:
- [developer_token, oauth_client_id, oauth_client_secret, refresh_token, user_agent,
customer_ids, start_date]
settings:
- name: developer_token
kind: password
label: Developer Token
description: Your Developer Token for Google AdWord Application
placeholder: Ex. *****************
- name: oauth_client_id
kind: password
label: OAuth Client ID
description: Your Google OAuth Client ID
placeholder: Ex. 123456789012345.apps.googleusercontent.com
- name: oauth_client_secret
kind: password
label: OAuth Client Secret
description: Your Google OAuth Client Secret
placeholder: Ex. *****************
- name: refresh_token
kind: oauth
label: Access Token
description: The Refresh Token generated through the OAuth flow run using your
OAuth Client and your Developer Token
oauth:
# https://oauth.svc.meltanodata.com/google-adwords/
provider: google-adwords
placeholder: Ex. *****************
- name: customer_ids
label: Account ID(s)
description: A comma-separated list of Ad Account IDs to replicate data from
placeholder: Ex. 1234567890,1234567891,1234567892
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: end_date
kind: date_iso8601
description: Date up to when historical data will be extracted.
- name: user_agent
value: tap-adwords via Meltano
label: User Agent for your OAuth Client
description: The User Agent for your OAuth Client (used in requests made to
the AdWords API)
placeholder: Ex. tap-adwords via Meltano <user@example.com>
- name: conversion_window_days
kind: integer
value: 0
label: Conversion Window Days
description: How many Days before the Start Date to fetch data for Performance
Reports
- name: primary_keys
kind: object
value:
KEYWORDS_PERFORMANCE_REPORT:
- customerID
- campaignID
- adGroupID
- keywordID
- day
- network
- device
AD_PERFORMANCE_REPORT:
- customerID
- campaignID
- adGroupID
- adID
- day
- network
- device
label: Primary Keys
description: Primary Keys for the selected Entities (Streams)
select:
- campaigns.*
- ad_groups.*
- ads.*
- accounts.*
- KEYWORDS_PERFORMANCE_REPORT.customerID
- KEYWORDS_PERFORMANCE_REPORT.account
- KEYWORDS_PERFORMANCE_REPORT.currency
- KEYWORDS_PERFORMANCE_REPORT.timeZone
- KEYWORDS_PERFORMANCE_REPORT.clientName
- KEYWORDS_PERFORMANCE_REPORT.campaign
- KEYWORDS_PERFORMANCE_REPORT.campaignID
- KEYWORDS_PERFORMANCE_REPORT.campaignState
- KEYWORDS_PERFORMANCE_REPORT.adGroup
- KEYWORDS_PERFORMANCE_REPORT.adGroupID
- KEYWORDS_PERFORMANCE_REPORT.adGroupState
- KEYWORDS_PERFORMANCE_REPORT.day
- KEYWORDS_PERFORMANCE_REPORT.network
- KEYWORDS_PERFORMANCE_REPORT.device
- KEYWORDS_PERFORMANCE_REPORT.clicks
- KEYWORDS_PERFORMANCE_REPORT.cost
- KEYWORDS_PERFORMANCE_REPORT.impressions
- KEYWORDS_PERFORMANCE_REPORT.interactions
- KEYWORDS_PERFORMANCE_REPORT.engagements
- KEYWORDS_PERFORMANCE_REPORT.conversions
- KEYWORDS_PERFORMANCE_REPORT.allConv
- KEYWORDS_PERFORMANCE_REPORT.views
- KEYWORDS_PERFORMANCE_REPORT.activeViewViewableImpressions
- KEYWORDS_PERFORMANCE_REPORT.activeViewMeasurableImpr
- KEYWORDS_PERFORMANCE_REPORT.activeViewMeasurableCost
- KEYWORDS_PERFORMANCE_REPORT.gmailClicksToWebsite
- KEYWORDS_PERFORMANCE_REPORT.gmailSaves
- KEYWORDS_PERFORMANCE_REPORT.gmailForwards
- KEYWORDS_PERFORMANCE_REPORT.keywordID
- KEYWORDS_PERFORMANCE_REPORT.keyword
- KEYWORDS_PERFORMANCE_REPORT.keywordState
- KEYWORDS_PERFORMANCE_REPORT.criterionServingStatus
- KEYWORDS_PERFORMANCE_REPORT.destinationURL
- KEYWORDS_PERFORMANCE_REPORT.matchType
- KEYWORDS_PERFORMANCE_REPORT.topOfPageCPC
- KEYWORDS_PERFORMANCE_REPORT.firstPageCPC
- KEYWORDS_PERFORMANCE_REPORT.imprAbsTop
- KEYWORDS_PERFORMANCE_REPORT.activeViewAvgCPM
- KEYWORDS_PERFORMANCE_REPORT.activeViewViewableCTR
- KEYWORDS_PERFORMANCE_REPORT.activeViewMeasurableImprImpr
- KEYWORDS_PERFORMANCE_REPORT.activeViewViewableImprMeasurableImpr
- KEYWORDS_PERFORMANCE_REPORT.allConvRate
- KEYWORDS_PERFORMANCE_REPORT.allConvValue
- KEYWORDS_PERFORMANCE_REPORT.avgCost
- KEYWORDS_PERFORMANCE_REPORT.avgCPC
- KEYWORDS_PERFORMANCE_REPORT.avgCPE
- KEYWORDS_PERFORMANCE_REPORT.avgCPM
- KEYWORDS_PERFORMANCE_REPORT.avgCPV
- KEYWORDS_PERFORMANCE_REPORT.avgPosition
- KEYWORDS_PERFORMANCE_REPORT.convRate
- KEYWORDS_PERFORMANCE_REPORT.totalConvValue
- KEYWORDS_PERFORMANCE_REPORT.costAllConv
- KEYWORDS_PERFORMANCE_REPORT.costConv
- KEYWORDS_PERFORMANCE_REPORT.costConvCurrentModel
- KEYWORDS_PERFORMANCE_REPORT.crossDeviceConv
- KEYWORDS_PERFORMANCE_REPORT.ctr
- KEYWORDS_PERFORMANCE_REPORT.conversionsCurrentModel
- KEYWORDS_PERFORMANCE_REPORT.convValueCurrentModel
- KEYWORDS_PERFORMANCE_REPORT.engagementRate
- KEYWORDS_PERFORMANCE_REPORT.interactionRate
- KEYWORDS_PERFORMANCE_REPORT.interactionTypes
- KEYWORDS_PERFORMANCE_REPORT.imprTop
- KEYWORDS_PERFORMANCE_REPORT.valueAllConv
- KEYWORDS_PERFORMANCE_REPORT.valueConv
- KEYWORDS_PERFORMANCE_REPORT.valueConvCurrentModel
- KEYWORDS_PERFORMANCE_REPORT.videoPlayedTo100
- KEYWORDS_PERFORMANCE_REPORT.videoPlayedTo25
- KEYWORDS_PERFORMANCE_REPORT.videoPlayedTo50
- KEYWORDS_PERFORMANCE_REPORT.videoPlayedTo75
- KEYWORDS_PERFORMANCE_REPORT.viewRate
- KEYWORDS_PERFORMANCE_REPORT.viewThroughConv
- KEYWORDS_PERFORMANCE_REPORT.searchAbsTopIS
- KEYWORDS_PERFORMANCE_REPORT.searchLostAbsTopISBudget
- KEYWORDS_PERFORMANCE_REPORT.searchLostTopISBudget
- KEYWORDS_PERFORMANCE_REPORT.searchExactMatchIS
- KEYWORDS_PERFORMANCE_REPORT.searchImprShare
- KEYWORDS_PERFORMANCE_REPORT.searchLostAbsTopISRank
- KEYWORDS_PERFORMANCE_REPORT.searchLostISRank
- KEYWORDS_PERFORMANCE_REPORT.searchLostTopISRank
- KEYWORDS_PERFORMANCE_REPORT.searchTopIS
- AD_PERFORMANCE_REPORT.customerID
- AD_PERFORMANCE_REPORT.account
- AD_PERFORMANCE_REPORT.currency
- AD_PERFORMANCE_REPORT.timeZone
- AD_PERFORMANCE_REPORT.clientName
- AD_PERFORMANCE_REPORT.campaign
- AD_PERFORMANCE_REPORT.campaignID
- AD_PERFORMANCE_REPORT.campaignState
- AD_PERFORMANCE_REPORT.adGroup
- AD_PERFORMANCE_REPORT.adGroupID
- AD_PERFORMANCE_REPORT.adGroupState
- AD_PERFORMANCE_REPORT.day
- AD_PERFORMANCE_REPORT.network
- AD_PERFORMANCE_REPORT.device
- AD_PERFORMANCE_REPORT.clicks
- AD_PERFORMANCE_REPORT.cost
- AD_PERFORMANCE_REPORT.impressions
- AD_PERFORMANCE_REPORT.interactions
- AD_PERFORMANCE_REPORT.engagements
- AD_PERFORMANCE_REPORT.conversions
- AD_PERFORMANCE_REPORT.allConv
- AD_PERFORMANCE_REPORT.views
- AD_PERFORMANCE_REPORT.activeViewViewableImpressions
- AD_PERFORMANCE_REPORT.activeViewMeasurableImpr
- AD_PERFORMANCE_REPORT.activeViewMeasurableCost
- AD_PERFORMANCE_REPORT.gmailClicksToWebsite
- AD_PERFORMANCE_REPORT.gmailSaves
- AD_PERFORMANCE_REPORT.gmailForwards
- AD_PERFORMANCE_REPORT.adID
- AD_PERFORMANCE_REPORT.adState
- AD_PERFORMANCE_REPORT.approvalStatus
- AD_PERFORMANCE_REPORT.adType
- AD_PERFORMANCE_REPORT.adStrength
- AD_PERFORMANCE_REPORT.autoAppliedAdSuggestion
- AD_PERFORMANCE_REPORT.ad
- AD_PERFORMANCE_REPORT.descriptionLine1
- AD_PERFORMANCE_REPORT.descriptionLine2
- AD_PERFORMANCE_REPORT.finalURL
- AD_PERFORMANCE_REPORT.displayURL
- AD_PERFORMANCE_REPORT.description
- AD_PERFORMANCE_REPORT.headline1
- AD_PERFORMANCE_REPORT.headline2
- AD_PERFORMANCE_REPORT.path1
- AD_PERFORMANCE_REPORT.businessName
- AD_PERFORMANCE_REPORT.callToActionTextResponsive
- AD_PERFORMANCE_REPORT.shortHeadline
- AD_PERFORMANCE_REPORT.longHeadline
- AD_PERFORMANCE_REPORT.promotionTextResponsive
- AD_PERFORMANCE_REPORT.responsiveSearchAdPath1
- AD_PERFORMANCE_REPORT.responsiveSearchAdHeadlines
- AD_PERFORMANCE_REPORT.responsiveSearchAdDescriptions
- AD_PERFORMANCE_REPORT.gmailAdBusinessName
- AD_PERFORMANCE_REPORT.gmailAdHeadline
- AD_PERFORMANCE_REPORT.gmailAdDescription
- AD_PERFORMANCE_REPORT.imageAdName
- AD_PERFORMANCE_REPORT.businessNameMultiAssetResponsiveDisplay
- AD_PERFORMANCE_REPORT.longHeadlineMultiAssetResponsiveDisplay
- AD_PERFORMANCE_REPORT.headlinesMultiAssetResponsiveDisplay
- AD_PERFORMANCE_REPORT.callToActionTextMultiAssetResponsiveDisplay
- AD_PERFORMANCE_REPORT.promotionTextMultiAssetResponsiveDisplay
- AD_PERFORMANCE_REPORT.imprAbsTop
- AD_PERFORMANCE_REPORT.activeViewAvgCPM
- AD_PERFORMANCE_REPORT.activeViewViewableCTR
- AD_PERFORMANCE_REPORT.activeViewMeasurableImprImpr
- AD_PERFORMANCE_REPORT.activeViewViewableImprMeasurableImpr
- AD_PERFORMANCE_REPORT.allConvRate
- AD_PERFORMANCE_REPORT.allConvValue
- AD_PERFORMANCE_REPORT.avgCost
- AD_PERFORMANCE_REPORT.avgCPC
- AD_PERFORMANCE_REPORT.avgCPE
- AD_PERFORMANCE_REPORT.avgCPM
- AD_PERFORMANCE_REPORT.avgCPV
- AD_PERFORMANCE_REPORT.avgPosition
- AD_PERFORMANCE_REPORT.convRate
- AD_PERFORMANCE_REPORT.totalConvValue
- AD_PERFORMANCE_REPORT.costAllConv
- AD_PERFORMANCE_REPORT.costConv
- AD_PERFORMANCE_REPORT.costConvCurrentModel
- AD_PERFORMANCE_REPORT.crossDeviceConv
- AD_PERFORMANCE_REPORT.ctr
- AD_PERFORMANCE_REPORT.conversionsCurrentModel
- AD_PERFORMANCE_REPORT.convValueCurrentModel
- AD_PERFORMANCE_REPORT.engagementRate
- AD_PERFORMANCE_REPORT.interactionRate
- AD_PERFORMANCE_REPORT.interactionTypes
- AD_PERFORMANCE_REPORT.imprTop
- AD_PERFORMANCE_REPORT.valueAllConv
- AD_PERFORMANCE_REPORT.valueConv
- AD_PERFORMANCE_REPORT.valueConvCurrentModel
- AD_PERFORMANCE_REPORT.videoPlayedTo100
- AD_PERFORMANCE_REPORT.videoPlayedTo25
- AD_PERFORMANCE_REPORT.videoPlayedTo50
- AD_PERFORMANCE_REPORT.videoPlayedTo75
- AD_PERFORMANCE_REPORT.viewRate
- AD_PERFORMANCE_REPORT.viewThroughConv
- name: meltano
repo: https://gitlab.com/meltano/tap-adwords
pip_url: git+https://gitlab.com/meltano/tap-adwords.git
capabilities:
- properties
- discover
- state
settings_group_validation:
- [developer_token, oauth_client_id, oauth_client_secret, refresh_token, user_agent,
customer_ids, start_date]
settings:
- name: developer_token
kind: password
label: Developer Token
description: Your Developer Token for Google AdWord Application
placeholder: Ex. *****************
- name: oauth_client_id
kind: password
label: OAuth Client ID
description: Your Google OAuth Client ID
placeholder: Ex. 123456789012345.apps.googleusercontent.com
- name: oauth_client_secret
kind: password
label: OAuth Client Secret
description: Your Google OAuth Client Secret
placeholder: Ex. *****************
- name: refresh_token
kind: oauth
label: Access Token
description: The Refresh Token generated through the OAuth flow run using your
OAuth Client and your Developer Token
oauth:
# https://oauth.svc.meltanodata.com/google-adwords/
provider: google-adwords
placeholder: Ex. *****************
- name: customer_ids
label: Account ID(s)
description: A comma-separated list of Ad Account IDs to replicate data from
placeholder: Ex. 1234567890,1234567891,1234567892
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: end_date
kind: date_iso8601
description: Date up to when historical data will be extracted.
- name: user_agent
value: tap-adwords via Meltano
label: User Agent for your OAuth Client
description: The User Agent for your OAuth Client (used in requests made to
the AdWords API)
placeholder: Ex. tap-adwords via Meltano <user@example.com>
- name: conversion_window_days
kind: integer
value: 0
label: Conversion Window Days
description: How many Days before the Start Date to fetch data for Performance
Reports
- name: primary_keys
kind: object
value:
KEYWORDS_PERFORMANCE_REPORT:
- customerID
- campaignID
- adGroupID
- keywordID
- day
- network
- device
AD_PERFORMANCE_REPORT:
- customerID
- campaignID
- adGroupID
- adID
- day
- network
- device
label: Primary Keys
description: Primary Keys for the selected Entities (Streams)
hidden: true
select:
- campaigns.*
- ad_groups.*
- ads.*
- accounts.*
- KEYWORDS_PERFORMANCE_REPORT.customerID
- KEYWORDS_PERFORMANCE_REPORT.account
- KEYWORDS_PERFORMANCE_REPORT.currency
- KEYWORDS_PERFORMANCE_REPORT.timeZone
- KEYWORDS_PERFORMANCE_REPORT.clientName
- KEYWORDS_PERFORMANCE_REPORT.campaign
- KEYWORDS_PERFORMANCE_REPORT.campaignID
- KEYWORDS_PERFORMANCE_REPORT.campaignState
- KEYWORDS_PERFORMANCE_REPORT.adGroup
- KEYWORDS_PERFORMANCE_REPORT.adGroupID
- KEYWORDS_PERFORMANCE_REPORT.adGroupState
- KEYWORDS_PERFORMANCE_REPORT.day
- KEYWORDS_PERFORMANCE_REPORT.network
- KEYWORDS_PERFORMANCE_REPORT.device
- KEYWORDS_PERFORMANCE_REPORT.clicks
- KEYWORDS_PERFORMANCE_REPORT.cost
- KEYWORDS_PERFORMANCE_REPORT.impressions
- KEYWORDS_PERFORMANCE_REPORT.interactions
- KEYWORDS_PERFORMANCE_REPORT.engagements
- KEYWORDS_PERFORMANCE_REPORT.conversions
- KEYWORDS_PERFORMANCE_REPORT.allConv
- KEYWORDS_PERFORMANCE_REPORT.views
- KEYWORDS_PERFORMANCE_REPORT.activeViewViewableImpressions
- KEYWORDS_PERFORMANCE_REPORT.activeViewMeasurableImpr
- KEYWORDS_PERFORMANCE_REPORT.activeViewMeasurableCost
- KEYWORDS_PERFORMANCE_REPORT.gmailClicksToWebsite
- KEYWORDS_PERFORMANCE_REPORT.gmailSaves
- KEYWORDS_PERFORMANCE_REPORT.gmailForwards
- KEYWORDS_PERFORMANCE_REPORT.keywordID
- KEYWORDS_PERFORMANCE_REPORT.keyword
- KEYWORDS_PERFORMANCE_REPORT.keywordState
- KEYWORDS_PERFORMANCE_REPORT.criterionServingStatus
- KEYWORDS_PERFORMANCE_REPORT.destinationURL
- KEYWORDS_PERFORMANCE_REPORT.matchType
- KEYWORDS_PERFORMANCE_REPORT.topOfPageCPC
- KEYWORDS_PERFORMANCE_REPORT.firstPageCPC
- KEYWORDS_PERFORMANCE_REPORT.imprAbsTop
- KEYWORDS_PERFORMANCE_REPORT.activeViewAvgCPM
- KEYWORDS_PERFORMANCE_REPORT.activeViewViewableCTR
- KEYWORDS_PERFORMANCE_REPORT.activeViewMeasurableImprImpr
- KEYWORDS_PERFORMANCE_REPORT.activeViewViewableImprMeasurableImpr
- KEYWORDS_PERFORMANCE_REPORT.allConvRate
- KEYWORDS_PERFORMANCE_REPORT.allConvValue
- KEYWORDS_PERFORMANCE_REPORT.avgCost
- KEYWORDS_PERFORMANCE_REPORT.avgCPC
- KEYWORDS_PERFORMANCE_REPORT.avgCPE
- KEYWORDS_PERFORMANCE_REPORT.avgCPM
- KEYWORDS_PERFORMANCE_REPORT.avgCPV
- KEYWORDS_PERFORMANCE_REPORT.avgPosition
- KEYWORDS_PERFORMANCE_REPORT.convRate
- KEYWORDS_PERFORMANCE_REPORT.totalConvValue
- KEYWORDS_PERFORMANCE_REPORT.costAllConv
- KEYWORDS_PERFORMANCE_REPORT.costConv
- KEYWORDS_PERFORMANCE_REPORT.costConvCurrentModel
- KEYWORDS_PERFORMANCE_REPORT.crossDeviceConv
- KEYWORDS_PERFORMANCE_REPORT.ctr
- KEYWORDS_PERFORMANCE_REPORT.conversionsCurrentModel
- KEYWORDS_PERFORMANCE_REPORT.convValueCurrentModel
- KEYWORDS_PERFORMANCE_REPORT.engagementRate
- KEYWORDS_PERFORMANCE_REPORT.interactionRate
- KEYWORDS_PERFORMANCE_REPORT.interactionTypes
- KEYWORDS_PERFORMANCE_REPORT.imprTop
- KEYWORDS_PERFORMANCE_REPORT.valueAllConv
- KEYWORDS_PERFORMANCE_REPORT.valueConv
- KEYWORDS_PERFORMANCE_REPORT.valueConvCurrentModel
- KEYWORDS_PERFORMANCE_REPORT.videoPlayedTo100
- KEYWORDS_PERFORMANCE_REPORT.videoPlayedTo25
- KEYWORDS_PERFORMANCE_REPORT.videoPlayedTo50
- KEYWORDS_PERFORMANCE_REPORT.videoPlayedTo75
- KEYWORDS_PERFORMANCE_REPORT.viewRate
- KEYWORDS_PERFORMANCE_REPORT.viewThroughConv
- KEYWORDS_PERFORMANCE_REPORT.searchAbsTopIS
- KEYWORDS_PERFORMANCE_REPORT.searchLostAbsTopISBudget
- KEYWORDS_PERFORMANCE_REPORT.searchLostTopISBudget
- KEYWORDS_PERFORMANCE_REPORT.searchExactMatchIS
- KEYWORDS_PERFORMANCE_REPORT.searchImprShare
- KEYWORDS_PERFORMANCE_REPORT.searchLostAbsTopISRank
- KEYWORDS_PERFORMANCE_REPORT.searchLostISRank
- KEYWORDS_PERFORMANCE_REPORT.searchLostTopISRank
- KEYWORDS_PERFORMANCE_REPORT.searchTopIS
- AD_PERFORMANCE_REPORT.customerID
- AD_PERFORMANCE_REPORT.account
- AD_PERFORMANCE_REPORT.currency
- AD_PERFORMANCE_REPORT.timeZone
- AD_PERFORMANCE_REPORT.clientName
- AD_PERFORMANCE_REPORT.campaign
- AD_PERFORMANCE_REPORT.campaignID
- AD_PERFORMANCE_REPORT.campaignState
- AD_PERFORMANCE_REPORT.adGroup
- AD_PERFORMANCE_REPORT.adGroupID
- AD_PERFORMANCE_REPORT.adGroupState
- AD_PERFORMANCE_REPORT.day
- AD_PERFORMANCE_REPORT.network
- AD_PERFORMANCE_REPORT.device
- AD_PERFORMANCE_REPORT.clicks
- AD_PERFORMANCE_REPORT.cost
- AD_PERFORMANCE_REPORT.impressions
- AD_PERFORMANCE_REPORT.interactions
- AD_PERFORMANCE_REPORT.engagements
- AD_PERFORMANCE_REPORT.conversions
- AD_PERFORMANCE_REPORT.allConv
- AD_PERFORMANCE_REPORT.views
- AD_PERFORMANCE_REPORT.activeViewViewableImpressions
- AD_PERFORMANCE_REPORT.activeViewMeasurableImpr
- AD_PERFORMANCE_REPORT.activeViewMeasurableCost
- AD_PERFORMANCE_REPORT.gmailClicksToWebsite
- AD_PERFORMANCE_REPORT.gmailSaves
- AD_PERFORMANCE_REPORT.gmailForwards
- AD_PERFORMANCE_REPORT.adID
- AD_PERFORMANCE_REPORT.adState
- AD_PERFORMANCE_REPORT.approvalStatus
- AD_PERFORMANCE_REPORT.adType
- AD_PERFORMANCE_REPORT.adStrength
- AD_PERFORMANCE_REPORT.autoAppliedAdSuggestion
- AD_PERFORMANCE_REPORT.ad
- AD_PERFORMANCE_REPORT.descriptionLine1
- AD_PERFORMANCE_REPORT.descriptionLine2
- AD_PERFORMANCE_REPORT.finalURL
- AD_PERFORMANCE_REPORT.displayURL
- AD_PERFORMANCE_REPORT.description
- AD_PERFORMANCE_REPORT.headline1
- AD_PERFORMANCE_REPORT.headline2
- AD_PERFORMANCE_REPORT.path1
- AD_PERFORMANCE_REPORT.businessName
- AD_PERFORMANCE_REPORT.callToActionTextResponsive
- AD_PERFORMANCE_REPORT.shortHeadline
- AD_PERFORMANCE_REPORT.longHeadline
- AD_PERFORMANCE_REPORT.promotionTextResponsive
- AD_PERFORMANCE_REPORT.responsiveSearchAdPath1
- AD_PERFORMANCE_REPORT.responsiveSearchAdHeadlines
- AD_PERFORMANCE_REPORT.responsiveSearchAdDescriptions
- AD_PERFORMANCE_REPORT.gmailAdBusinessName
- AD_PERFORMANCE_REPORT.gmailAdHeadline
- AD_PERFORMANCE_REPORT.gmailAdDescription
- AD_PERFORMANCE_REPORT.imageAdName
- AD_PERFORMANCE_REPORT.businessNameMultiAssetResponsiveDisplay
- AD_PERFORMANCE_REPORT.longHeadlineMultiAssetResponsiveDisplay
- AD_PERFORMANCE_REPORT.headlinesMultiAssetResponsiveDisplay
- AD_PERFORMANCE_REPORT.callToActionTextMultiAssetResponsiveDisplay
- AD_PERFORMANCE_REPORT.promotionTextMultiAssetResponsiveDisplay
- AD_PERFORMANCE_REPORT.imprAbsTop
- AD_PERFORMANCE_REPORT.activeViewAvgCPM
- AD_PERFORMANCE_REPORT.activeViewViewableCTR
- AD_PERFORMANCE_REPORT.activeViewMeasurableImprImpr
- AD_PERFORMANCE_REPORT.activeViewViewableImprMeasurableImpr
- AD_PERFORMANCE_REPORT.allConvRate
- AD_PERFORMANCE_REPORT.allConvValue
- AD_PERFORMANCE_REPORT.avgCost
- AD_PERFORMANCE_REPORT.avgCPC
- AD_PERFORMANCE_REPORT.avgCPE
- AD_PERFORMANCE_REPORT.avgCPM
- AD_PERFORMANCE_REPORT.avgCPV
- AD_PERFORMANCE_REPORT.avgPosition
- AD_PERFORMANCE_REPORT.convRate
- AD_PERFORMANCE_REPORT.totalConvValue
- AD_PERFORMANCE_REPORT.costAllConv
- AD_PERFORMANCE_REPORT.costConv
- AD_PERFORMANCE_REPORT.costConvCurrentModel
- AD_PERFORMANCE_REPORT.crossDeviceConv
- AD_PERFORMANCE_REPORT.ctr
- AD_PERFORMANCE_REPORT.conversionsCurrentModel
- AD_PERFORMANCE_REPORT.convValueCurrentModel
- AD_PERFORMANCE_REPORT.engagementRate
- AD_PERFORMANCE_REPORT.interactionRate
- AD_PERFORMANCE_REPORT.interactionTypes
- AD_PERFORMANCE_REPORT.imprTop
- AD_PERFORMANCE_REPORT.valueAllConv
- AD_PERFORMANCE_REPORT.valueConv
- AD_PERFORMANCE_REPORT.valueConvCurrentModel
- AD_PERFORMANCE_REPORT.videoPlayedTo100
- AD_PERFORMANCE_REPORT.videoPlayedTo25
- AD_PERFORMANCE_REPORT.videoPlayedTo50
- AD_PERFORMANCE_REPORT.videoPlayedTo75
- AD_PERFORMANCE_REPORT.viewRate
- AD_PERFORMANCE_REPORT.viewThroughConv
- name: tap-ask-nicely
namespace: tap_ask_nicely
label: AskNicely
description: Customer Experience Platform
variant: mashey
docs: https://hub.meltano.com/extractors/ask-nicely
repo: https://github.com/Mashey/tap-ask-nicely
pip_url: git+https://github.com/Mashey/tap-ask-nicely.git
capabilities:
- catalog
- discover
- state
settings_group_validation:
- [subdomain, api_key]
settings:
- name: subdomain
label: Subdomain
description: The subdomain of your Ask Nicely account.
- name: api_key
kind: password
label: API Key
documentation: https://asknicely.asknice.ly/help/apidocs/auth
description: The API Key generated via your Ask Nicely account.
- name: tap-bigquery
namespace: tap_bigquery
label: BigQuery
description: BigQuery data warehouse extractor
variant: anelendata
docs: https://hub.meltano.com/extractors/bigquery.html
repo: https://github.com/anelendata/tap-bigquery
pip_url: tap-bigquery
capabilities:
- catalog
- discover
- state
settings_group_validation:
- [streams, start_datetime, credentials_path]
settings:
- name: streams
kind: array
description: Array holding objects describing streams (tables) to extract, with
`name`, `table`, `columns`, `datetime_key`, and `filters` keys. See docs for
details.
- name: credentials_path
value: $MELTANO_PROJECT_ROOT/client_secrets.json
description: Fully qualified path to `client_secrets.json` for your service account.
- name: start_datetime
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: end_datetime
kind: date_iso8601
description: Date up to when historical data will be extracted.
- name: limit
kind: integer
description: Limits the number of records returned in each stream, applied as
a limit in the query.
- name: start_always_inclusive
kind: boolean
value: true
description: When replicating incrementally, disable to only select records whose
`datetime_key` is greater than the maximum value replicated in the last run,
by excluding records whose timestamps match exactly. This could cause records
to be missed that were created after the last run finished, but during the same
second and with the same timestamp.
- name: tap-bing-ads
namespace: tap_bing_ads
label: Bing Ads
description: Advertising Platform
variant: singer-io
docs: https://hub.meltano.com/extractors/bing-ads.html
repo: https://github.com/singer-io/tap-bing-ads
pip_url: tap-bing-ads
capabilities:
- catalog
- discover
- state
settings_group_validation:
- [customer_id, account_ids, oauth_client_id, oauth_client_secret, refresh_token,
developer_token, start_date]
settings:
- name: developer_token
kind: password
- name: oauth_client_id
kind: password
label: OAuth Client ID
- name: oauth_client_secret
kind: password
label: OAuth Client Secret
- name: refresh_token
kind: password
- name: customer_id
label: Customer ID
- name: account_ids
label: Account ID(s)
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
metadata:
ad_group_performance_report:
AbsoluteTopImpressionSharePercent:
inclusion: available
selected: false
AbsoluteTopImpressionRatePercent:
inclusion: available
selected: false
AbsoluteTopImpressionShareLostToBudgetPercent:
inclusion: available
selected: false
AbsoluteTopImpressionShareLostToRankPercent:
inclusion: available
selected: false
AudienceImpressionLostToBudgetPercent:
inclusion: available
selected: false
AudienceImpressionLostToRankPercent:
inclusion: available
selected: false
AudienceImpressionSharePercent:
inclusion: available
selected: false
ClickSharePercent:
inclusion: available
selected: false
ExactMatchImpressionSharePercent:
inclusion: available
selected: false
ImpressionLostToAdRelevancePercent:
inclusion: available
selected: false
ImpressionLostToBidPercent:
inclusion: available
selected: false
ImpressionLostToBudgetPercent:
inclusion: available
selected: false
ImpressionLostToExpectedCtrPercent:
inclusion: available
selected: false
ImpressionLostToRankPercent:
inclusion: available
selected: false
ImpressionLostToRankAggPercent:
inclusion: available
selected: false
ImpressionSharePercent:
inclusion: available
selected: false
TopImpressionRatePercent:
inclusion: available
selected: false
TopImpressionShareLostToBudgetPercent:
inclusion: available
selected: false
TopImpressionShareLostToRankPercent:
inclusion: available
selected: false
TopImpressionSharePercent:
inclusion: available
selected: false
campaign_performance_report:
AbsoluteTopImpressionSharePercent:
inclusion: available
selected: false
AbsoluteTopImpressionRatePercent:
inclusion: available
selected: false
AbsoluteTopImpressionShareLostToBudgetPercent:
inclusion: available
selected: false
AbsoluteTopImpressionShareLostToRankPercent:
inclusion: available
selected: false
AudienceImpressionLostToBudgetPercent:
inclusion: available
selected: false
AudienceImpressionLostToRankPercent:
inclusion: available
selected: false
AudienceImpressionSharePercent:
inclusion: available
selected: false
ClickSharePercent:
inclusion: available
selected: false
ExactMatchImpressionSharePercent:
inclusion: available
selected: false
ImpressionLostToAdRelevancePercent:
inclusion: available
selected: false
ImpressionLostToBidPercent:
inclusion: available
selected: false
ImpressionLostToBudgetPercent:
inclusion: available
selected: false
ImpressionLostToExpectedCtrPercent:
inclusion: available
selected: false
ImpressionLostToRankPercent:
inclusion: available
selected: false
ImpressionLostToRankAggPercent:
inclusion: available
selected: false
ImpressionSharePercent:
inclusion: available
selected: false
TopImpressionRatePercent:
inclusion: available
selected: false
TopImpressionShareLostToBudgetPercent:
inclusion: available
selected: false
TopImpressionShareLostToRankPercent:
inclusion: available
selected: false
TopImpressionSharePercent:
inclusion: available
selected: false
- name: tap-chargebee
namespace: tap_chargebee
label: Chargebee
description: Subscription billing software
variant: hotgluexyz
docs: https://hub.meltano.com/extractors/chargebee.html
repo: https://github.com/hotgluexyz/tap-chargebee
pip_url: git+https://github.com/hotgluexyz/tap-chargebee.git
capabilities:
- catalog
- discover
- state
settings_group_validation:
- - api_key
- site
- product_catalog
- start_date
settings:
- name: api_key
kind: password
label: API Key
- name: site
label: Chargebee Site
- name: product_catalog
label: Chargebee Product Catalog
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: select_fields_by_default
kind: boolean
value: true
description: Select by default any new fields discovered in Quickbooks objects
- name: state_message_threshold
kind: integer
value: 1000
description: Generate a STATE message every N records
- name: max_workers
kind: integer
value: 8
label: Maximum number of threads to use
- name: tap-csv
namespace: tap_csv
label: Comma Separated Values (CSV)
description: Generic data extractor of CSV (comma separated value) files
variants:
- name: meltanolabs
docs: https://hub.meltano.com/extractors/csv.html
repo: https://github.com/MeltanoLabs/tap-csv
pip_url: git+https://github.com/MeltanoLabs/tap-csv.git
capabilities:
- discover
- catalog
- state
settings_group_validation:
- [files]
- [csv_files_definition]
settings:
- name: files
kind: array
description: Array of objects with `entity`, `path`, and `keys` keys
- name: csv_files_definition
label: CSV Files Definition
documentation: https://github.com/MeltanoLabs/tap-csv#settings
description: Project-relative path to JSON file holding array of objects with
`entity`, `path`, and `keys` keys
placeholder: Ex. files-def.json
- name: meltano
repo: https://gitlab.com/meltano/tap-csv
pip_url: git+https://gitlab.com/meltano/tap-csv.git
capabilities:
- discover
- catalog
- state
settings_group_validation:
- [files]
- [csv_files_definition]
settings:
- name: files
kind: array
description: Array of objects with `entity`, `file`, and `keys` keys
- name: csv_files_definition
label: CSV Files Definition
documentation: https://gitlab.com/meltano/tap-csv#run
description: Project-relative path to JSON file holding array of objects with
`entity`, `file`, and `keys` keys
placeholder: Ex. files-def.json
hidden: true
- name: tap-facebook
namespace: tap_facebook
label: Facebook Ads
description: Advertising Platform
variants:
- name: singer-io
docs: https://hub.meltano.com/extractors/facebook.html
repo: https://github.com/singer-io/tap-facebook
pip_url: git+https://github.com/singer-io/tap-facebook.git
capabilities:
- properties
- discover
- state
settings_group_validation:
- [account_id, access_token, start_date]
settings:
- name: account_id
label: Account ID
description: Your Facebook Ads Account ID
placeholder: Ex. 123456789012345
- name: access_token
kind: oauth
label: Access Token
description: User Token generated by Facebook OAuth handshake
oauth:
provider: facebook
placeholder: Ex. *****************
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: end_date
kind: date_iso8601
description: Date up to when historical data will be extracted.
- name: insights_buffer_days
kind: integer
value: 0
label: Ads Insights Buffer Days
description: How many Days before the Start Date to fetch Ads Insights for
- name: include_deleted
kind: boolean
label: Include Deleted Objects
description: Determines if it should include deleted objects or not.
- name: meltano
repo: https://gitlab.com/meltano/tap-facebook
pip_url: git+https://gitlab.com/meltano/tap-facebook.git
capabilities:
- properties
- discover
- state
settings_group_validation:
- [account_id, access_token, start_date]
settings:
- name: account_id
label: Account ID
description: Your Facebook Ads Account ID
placeholder: Ex. 123456789012345
- name: access_token
kind: oauth
label: Access Token
description: User Token generated by Facebook OAuth handshake
oauth:
provider: facebook
placeholder: Ex. *****************
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: end_date
kind: date_iso8601
description: Date up to when historical data will be extracted.
- name: insights_buffer_days
kind: integer
value: 0
label: Ads Insights Buffer Days
description: How many Days before the Start Date to fetch Ads Insights for
hidden: true
- name: tap-fastly
namespace: tap_fastly
label: Fastly
description: Edge cloud computing services provider
variants:
- name: splitio
docs: https://hub.meltano.com/extractors/fastly.html
repo: https://github.com/splitio/tap-fastly
pip_url: git+https://github.com/splitio/tap-fastly.git
capabilities:
- catalog
- discover
- state
settings:
- name: api_token
kind: password
label: API Token
description: Fastly token for authenticating with the API.
placeholder: Ex. *****************
- name: start_date
kind: date_iso8601
label: Start Date
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: meltano
docs: https://hub.meltano.com/extractors/fastly.html
repo: https://gitlab.com/meltano/tap-fastly
pip_url: git+https://gitlab.com/meltano/tap-fastly.git
capabilities:
- catalog
- discover
- state
settings:
- name: api_token
kind: password
label: API Token
placeholder: Ex. *****************
- name: start_date
kind: date_iso8601
label: Start Date
- name: tap-gitlab
namespace: tap_gitlab
label: GitLab
description: Single application for the entire DevOps lifecycle
variants:
- name: meltanolabs
docs: https://hub.meltano.com/extractors/gitlab.html
repo: https://github.com/MeltanoLabs/tap-gitlab
pip_url: git+https://github.com/MeltanoLabs/tap-gitlab.git
capabilities:
- catalog
- discover
- state
settings_group_validation:
- [api_url, groups, start_date]
- [api_url, projects, start_date]
settings:
- name: api_url
value: https://gitlab.com
label: GitLab Instance
description: GitLab API/instance URL. When an API path is omitted, `/api/v4/`
is assumed.
protected: true
- name: private_token
kind: password
value: ''
label: Access Token
description: GitLab personal access token or other API token.
placeholder: Ex. *****************
- name: groups
value: ''
label: Groups
description: Space-separated names of groups to extract data from. Leave empty
and provide a project name if you'd like to pull data from a project in a
personal user namespace.
placeholder: Ex. my-organization
- name: projects
value: ''
label: Project
description: Space-separated `namespace/project` paths of projects to extract
data from. Leave empty and provide a group name to extract data from all group
projects.
placeholder: Ex. my-organization/project-1
- name: ultimate_license
kind: boolean
value: false
description: Enable to pull in extra data (like Epics, Epic Issues and other
entities) only available to GitLab Ultimate and GitLab.com Gold accounts.
- name: fetch_merge_request_commits
kind: boolean
value: false
description: For each Merge Request, also fetch the MR's commits and create
the join table `merge_request_commits` with the Merge Request and related
Commit IDs. This can slow down extraction considerably because of the many
API calls required.
- name: fetch_pipelines_extended
kind: boolean
value: false
description: For every Pipeline, also fetch extended details of each of these
pipelines. This can slow down extraction considerably because of the many
API calls required.
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: meltano
repo: https://gitlab.com/meltano/tap-gitlab
pip_url: git+https://gitlab.com/meltano/tap-gitlab.git
capabilities:
- catalog
- discover
- state
settings_group_validation:
- [api_url, groups, start_date]
- [api_url, projects, start_date]
settings:
- name: api_url
value: https://gitlab.com
label: GitLab Instance
description: GitLab API/instance URL. When an API path is omitted, `/api/v4/`
is assumed.
protected: true
- name: private_token
kind: password
value: ''
label: Access Token
description: GitLab personal access token or other API token.
placeholder: Ex. *****************
- name: groups
value: ''
label: Groups
description: Space-separated names of groups to extract data from. Leave empty
and provide a project name if you'd like to pull data from a project in a
personal user namespace.
placeholder: Ex. my-organization
- name: projects
value: ''
label: Project
description: Space-separated `namespace/project` paths of projects to extract
data from. Leave empty and provide a group name to extract data from all group
projects.
placeholder: Ex. my-organization/project-1
- name: ultimate_license
kind: boolean
value: false
description: Enable to pull in extra data (like Epics, Epic Issues and other
entities) only available to GitLab Ultimate and GitLab.com Gold accounts.
- name: fetch_merge_request_commits
kind: boolean
value: false
description: For each Merge Request, also fetch the MR's commits and create
the join table `merge_request_commits` with the Merge Request and related
Commit IDs. This can slow down extraction considerably because of the many
API calls required.
- name: fetch_pipelines_extended
kind: boolean
value: false
description: For every Pipeline, also fetch extended details of each of these
pipelines. This can slow down extraction considerably because of the many
API calls required.
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
hidden: true
- name: tap-github
namespace: tap_github
label: GitHub
description: Code hosting platform
variant: singer-io
docs: https://hub.meltano.com/extractors/github
repo: https://github.com/singer-io/tap-github
pip_url: git+https://github.com/singer-io/tap-github.git
capabilities:
- properties
- discover
- state
settings_group_validation:
- [access_token, repository, start_date]
settings:
- name: access_token
kind: password
label: Personal Access Tokens
description: Personal access token used to authenticate with GitHub. The token
can be generated by going to the [Personal Access Token settings page](https://github.com/settings/tokens).
docs: https://github.com/settings/tokens
- name: repository
label: Repositories
description: Space-separated list of repositories. Each repository must be prefaced
by the user/organization name, e.g. `"meltano/meltano meltano/sdk meltano/hub"`
placeholder: Ex. "meltano/meltano meltano/sdk meltano/hub"
- name: start_date
kind: date_iso8601
label: Start Date
description: Defines how far into the past to pull data for the provided repositories.
- name: tap-google-analytics
namespace: tap_google_analytics
label: Google Analytics
description: App and website analytics platform hosted by Google
variants:
- name: meltanolabs
docs: https://hub.meltano.com/extractors/google-analytics.html
repo: https://github.com/MeltanoLabs/tap-google-analytics
pip_url: git+https://github.com/MeltanoLabs/tap-google-analytics.git
capabilities:
- catalog
- discover
- state
settings_group_validation:
- [key_file_location, view_id, start_date]
- [client_secrets, view_id, start_date]
- [oauth_credentials.client_id, oauth_credentials.client_secret, oauth_credentials.access_token,
oauth_credentials.refresh_token, view_id, start_date]
settings:
- name: key_file_location
kind: file
value: $MELTANO_PROJECT_ROOT/client_secrets.json
label: Client Secrets File Location
description: A file that contains the Google Analytics client secrets json.
placeholder: Ex. client_secrets.json
- name: client_secrets
kind: object
label: Client Secrets JSON
description: An object that contains the Google Analytics client secrets.
placeholder: Ex. client_secrets.json
- name: oauth_credentials.client_id
kind: password
label: OAuth Client ID
description: The Google Analytics oauth client ID.
- name: oauth_credentials.client_secret
kind: password
label: OAuth Client Secret
description: The Google Analytics oauth client secret.
- name: oauth_credentials.access_token
kind: password
label: OAuth Access Token
description: The Google Analytics oauth access token.
- name: oauth_credentials.refresh_token
kind: password
label: OAuth Refresh Token
description: The Google Analytics oauth refresh token.
- name: view_id
label: View ID
description: The ID for the view to fetch data from.
placeholder: Ex. 198343027
- name: reports
label: Reports
description: The reports definition of which fields to retrieve from the view.
placeholder: Ex. my_report_definition.json
- name: start_date
kind: date_iso8601
label: Start Date
description: This property determines how much historical data will be extracted.
Please be aware that the larger the time period and amount of data, the longer
the initial extraction can be expected to take.
- name: end_date
kind: date_iso8601
label: End Date
description: Date up to when historical data will be extracted.
- name: meltano
docs: https://hub.meltano.com/extractors/google-analytics.html
repo: https://gitlab.com/meltano/tap-google-analytics
pip_url: git+https://gitlab.com/meltano/tap-google-analytics.git
capabilities:
- catalog
- discover
settings_group_validation:
- [key_file_location, view_id, start_date]
- [oauth_credentials.client_id, oauth_credentials.client_secret, oauth_credentials.access_token,
oauth_credentials.refresh_token, view_id, start_date]
settings:
- name: key_file_location
kind: file
value: $MELTANO_PROJECT_ROOT/client_secrets.json
label: Client Secrets
placeholder: Ex. client_secrets.json
- name: oauth_credentials.client_id
kind: password
label: OAuth Client ID
- name: oauth_credentials.client_secret
kind: password
label: OAuth Client Secret
- name: oauth_credentials.access_token
kind: password
label: OAuth Access Token
- name: oauth_credentials.refresh_token
kind: password
label: OAuth Refresh Token
- name: view_id
label: View ID
placeholder: Ex. 198343027
- name: reports
label: Reports
placeholder: Ex. my_report_definition.json
- name: start_date
kind: date_iso8601
description: This property determines how much historical data will be extracted.
Please be aware that the larger the time period and amount of data, the longer
the initial extraction can be expected to take.
- name: end_date
kind: date_iso8601
description: Date up to when historical data will be extracted.
hidden: true
- name: tap-google-sheets
namespace: tap_google_sheets
label: Google Sheets
description: Cloud Spreadsheets
variant: singer-io
docs: https://hub.meltano.com/extractors/google-sheets
repo: https://github.com/singer-io/tap-google-sheets
pip_url: git+https://github.com/singer-io/tap-google-sheets.git
capabilities:
- discover
- catalog
- state
settings_group_validation:
- [client_id, client_secret, refresh_token, spreadsheet_id, start_date, user_agent]
settings:
- name: client_id
label: Client ID
documentation: https://drive.google.com/open?id=1FojlvtLwS0-BzGS37R0jEXtwSHqSiO1Uw-7RKQQO-C4
description: This is the ID setup via the Google Cloud API.
- name: client_secret
kind: password
label: Client Secret
documentation: https://drive.google.com/open?id=1FojlvtLwS0-BzGS37R0jEXtwSHqSiO1Uw-7RKQQO-C4
description: This is generated when the client ID is created via the Google Cloud
API.
- name: refresh_token
kind: password
label: Refresh Token
documentation: https://drive.google.com/open?id=1FojlvtLwS0-BzGS37R0jEXtwSHqSiO1Uw-7RKQQO-C4
description: This is the token used to generate new access_tokens. It is manually
generated by making an API call to the Google Cloud API. See the [documentation](https://drive.google.com/open?id=1FojlvtLwS0-BzGS37R0jEXtwSHqSiO1Uw-7RKQQO-C4)
for more information.
- name: spreadsheet_id
label: Spreadsheet ID
description: The unique identifier for a spreadsheet.
- name: start_date
kind: date_iso8601
label: Start Date
description: The absolute minimum start date to check if a file was modified.
placeholder: Ex. "2019-01-01T00:00:00Z"
- name: user_agent
value: tap-google-sheets via Meltano
label: User Agent
description: Used to identify the tap in the Google Remote API logs.
placeholder: Ex. "tap-google-search-console <api_user_email@example.com>"
- name: tap-hubspot
namespace: tap_hubspot
label: Hubspot
description: Inbound Marketing software
variant: singer-io
docs: https://hub.meltano.com/extractors/hubspot
repo: https://github.com/singer-io/tap-hubspot
pip_url: git+https://github.com/singer-io/tap-hubspot.git
capabilities:
- discover
- properties
- state
settings_group_validation:
- [redirect_uri, client_id, client_secret, refresh_token, start_date]
settings:
- name: redirect_uri
label: Redirect URI
documentation: https://legacydocs.hubspot.com/docs/methods/oauth2/oauth2-quickstart
description: This is the URL that the user will be redirected to after they authorize
your app for the requested scopes
- name: client_id
label: Client ID
documentation: https://legacydocs.hubspot.com/docs/methods/oauth2/oauth2-quickstart
description: This identifies the app used to connect to HubSpot.
- name: client_secret
kind: password
label: Client Secret
description: The client secret used for authentication.
- name: refresh_token
kind: password
label: Refresh Token
description: This is the refresh token provided by HubSpot.
- name: start_date
kind: date_iso8601
label: Start Date
description: This is the cutoff date for syncing historical data.
- name: tap-intacct
namespace: tap_intacct
label: Sage Intacct
description: Financial management software
variant: hotgluexyz
docs: https://hub.meltano.com/extractors/intacct.html
repo: https://github.com/hotgluexyz/tap-intacct
pip_url: git+https://github.com/hotgluexyz/tap-intacct.git
capabilities:
- catalog
- discover
- state
settings_group_validation:
- - company_id
- sender_id
- sender_password
- user_id
- user_password
- start_date
settings:
- name: company_id
label: Company Id
- name: sender_id
label: Intacct Sender Id
- name: sender_password
kind: password
label: Intacct Sender Password
- name: user_id
label: Intacct User Id
- name: user_password
kind: password
label: Intacct User Password
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: select_fields_by_default
kind: boolean
value: true
description: Select by default any new fields discovered in Quickbooks objects
- name: state_message_threshold
kind: integer
value: 1000
description: Generate a STATE message every N records
- name: max_workers
kind: integer
value: 8
label: Maximum number of threads to use
- name: tap-jira
namespace: tap_jira
label: Jira
description: Issue and Project Tracking Software
variant: singer-io
docs: https://hub.meltano.com/extractors/jira
repo: https://github.com/singer-io/tap-jira
pip_url: git+https://github.com/singer-io/tap-jira.git
capabilities:
- discover
- properties
- state
settings_group_validation:
- [username, password, base_url, start_date, user_agent]
- [oauth_client_secret, oauth_client_id, access_token, cloud_id, refresh_token,
start_date, user_agent]
settings:
- name: username
label: Username
description: Your Jira username.
- name: password
kind: password
label: Password
description: Your Jira password.
- name: base_url
label: Base URL
description: The base URL for your Jira instance.
placeholder: Ex. "https://mycompany.atlassian.net"
- name: oauth_client_secret
kind: password
label: OAuth Client Secret
description: The client secret value used for OAuth authentication.
- name: oauth_client_id
label: OAuth Client ID
description: The client ID used for OAuth authentication.
- name: access_token
kind: password
label: Access Token
description: The access token generated for your account.
- name: cloud_id
label: Cloud ID
description: The cloud ID of your JIRA instance.
- name: refresh_token
kind: password
label: Refresh Token
description: The refresh token generated for your account.
- name: start_date
kind: date_iso8601
label: Start Date
description: Specifies the date at which the tap will begin pulling data. This
works only for the streams taht suppport it.
- name: user_agent
value: tap-jira via Meltano
label: User Agent
- name: tap-marketo
namespace: tap_marketo
label: Marketo
description: Marketing automation for account-based marketing
variants:
- name: singer-io
docs: https://hub.meltano.com/extractors/marketo.html
repo: https://github.com/singer-io/tap-marketo
pip_url: git+https://github.com/singer-io/tap-marketo.git
settings:
- name: endpoint
label: Endpoint
- name: client_id
label: Client ID
- name: client_secret
kind: password
label: Client Secret
- name: start_date
kind: date_iso8601
label: Start Date
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: meltano
repo: https://gitlab.com/meltano/tap-marketo
pip_url: git+https://gitlab.com/meltano/tap-marketo.git
settings:
- name: endpoint
- name: identity
- name: client_id
label: Client ID
- name: client_secret
kind: password
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
hidden: true
- name: tap-mongodb
namespace: tap_mongodb
label: MongoDB
description: General purpose, document-based, distributed database
variant: singer-io
docs: https://hub.meltano.com/extractors/mongodb.html
repo: https://github.com/singer-io/tap-mongodb
pip_url: tap-mongodb
capabilities:
- catalog
- discover
- state
settings_group_validation:
- [host, port, user, password, database]
settings:
- name: host
value: localhost
label: Host URL
- name: port
kind: integer
value: 27017
- name: user
- name: password
kind: password
- name: database
label: Database Name
- name: replica_set
- name: ssl
kind: boolean
value: false
label: SSL
value_post_processor: stringify
- name: verify_mode
kind: boolean
value: true
description: SSL Verify Mode
value_post_processor: stringify
- name: include_schemas_in_destination_stream_name
kind: boolean
value: false
description: Forces the stream names to take the form `<database_name>_<collection_name>`
instead of `<collection_name>`
- name: tap-mysql
namespace: tap_mysql
label: MySQL / MariaDB
description: MySQL / MariaDB database extractor
variant: transferwise
docs: https://hub.meltano.com/extractors/mysql.html
repo: https://github.com/transferwise/pipelinewise-tap-mysql
pip_url: pipelinewise-tap-mysql
capabilities:
- catalog
- discover
- state
settings_group_validation:
- [host, port, user, password]
settings:
- name: host
value: localhost
- name: port
kind: integer
value: 3306
- name: user
- name: password
kind: password
- name: database
- name: ssl
kind: boolean
value: false
value_post_processor: stringify
- name: filter_dbs
description: Comma separated list of schemas to extract tables only from particular
schemas and to improve data extraction performance
- name: session_sqls
kind: array
value:
- SET @@session.time_zone="+0:00"
- SET @@session.wait_timeout=28800
- SET @@session.net_read_timeout=3600
- SET @@session.innodb_lock_wait_timeout=3600
description: List of SQL commands to run when a connection made. This allows to
set session variables dynamically, like timeouts.
- name: tap-pendo
namespace: tap_pendo
label: Pendo
description: Product Experience and Digital Adoption Solutions
variant: singer-io
docs: https://hub.meltano.com/extractors/pendo
repo: https://github.com/singer-io/tap-pendo
pip_url: git+https://github.com/singer-io/tap-pendo.git
capabilities:
- catalog
- discover
- state
settings_group_validation:
- [x_pendo_integration_key, period, start_date]
settings:
- name: x_pendo_integration_key
kind: password
label: Integration Key
description: 'This is the integration key generated via the Pendo website: Settings
-> Integrations -> Integration Keys.'
- name: period
kind: options
label: Period
description: This defines how data is aggregated, either on a daily or hourly
basis.
options:
- label: Daily
value: dayRange
- label: Hourly
value: hourRange
- name: lookback_window
kind: integer
value: 0
label: Lookback Window
description: The number of days to use as the lookback window.
- name: include_anonymous_visitors
kind: boolean
value: false
label: Include Anonymous Visitors
description: Defines whether or not to include anonymous visitors in the results.
value_post_processor: stringify
- name: start_date
kind: date_iso8601
label: Start Date
description: This is the default start date value to use if no bookmark is present.
- name: tap-postgres
namespace: tap_postgres
label: PostgreSQL
description: PostgreSQL database extractor
variant: transferwise
docs: https://hub.meltano.com/extractors/postgres.html
repo: https://github.com/transferwise/pipelinewise-tap-postgres
pip_url: pipelinewise-tap-postgres
capabilities:
- catalog
- discover
- state
settings_group_validation:
- [host, port, user, password, dbname]
settings:
- name: host
value: localhost
description: PostgreSQL host
- name: port
kind: integer
value: 5432
description: PostgreSQL port
- name: user
description: PostgreSQL user
- name: password
kind: password
description: PostgreSQL password
- name: dbname
description: PostgreSQL database name
- name: ssl
kind: boolean
value: false
description: Using SSL via postgres `sslmode='require'` option. If the server
does not accept SSL connections or the client certificate is not recognized
the connection will fail
value_post_processor: stringify
- name: filter_schemas
description: Scan only the specified comma-separated schemas to improve the performance
of data extraction
- name: default_replication_method
kind: options
options:
- label: Log-based Incremental Replication
value: LOG_BASED
- label: Key-based Incremental Replication
value: INCREMENTAL
- label: Full Table Replication
value: FULL_TABLE
- name: max_run_seconds
kind: integer
value: 43200
description: Stop running the tap after certain number of seconds
- name: logical_poll_total_seconds
kind: integer
value: 10800
description: Stop running the tap when no data received from wal after certain
number of seconds
- name: break_at_end_lsn
kind: boolean
value: true
description: Stop running the tap if the newly received lsn is after the max lsn
that was detected when the tap started
- name: tap-recharge
namespace: tap_recharge
label: ReCharge
description: Subscription payments platform
variant: singer-io
docs: https://hub.meltano.com/extractors/recharge.html
repo: https://github.com/singer-io/tap-recharge
pip_url: tap-recharge==1.0.3
capabilities:
- catalog
- discover
- state
settings:
- name: access_token
kind: password
description: Private API Token
placeholder: Ex. 1a2b3c4d5e6f
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: user_agent
value: tap-recharge via Meltano
description: User agent to send to ReCharge along with API requests. Typically
includes name of integration and an email address you can be reached at
placeholder: Ex. tap-recharge via Meltano <user@example.com>
- name: tap-salesforce
namespace: tap_salesforce
label: Salesforce
description: Customer-relationship management & customer success platform
variant: meltano
docs: https://hub.meltano.com/extractors/salesforce.html
repo: https://gitlab.com/meltano/tap-salesforce
pip_url: git+https://gitlab.com/meltano/tap-salesforce.git@v1.5.0
capabilities:
- properties
- discover
- state
settings_group_validation:
- [username, password, security_token, start_date]
- [client_id, client_secret, refresh_token, start_date]
settings:
- name: username
placeholder: Ex. me@my-organization.com
- name: password
kind: password
label: Password
placeholder: Ex. *****************
- name: security_token
kind: password
label: Security Token
documentation: https://hub.meltano.com/extractors/salesforce.html#salesforce-setup
description: Your Salesforce Account access token
placeholder: Ex. *****************
- name: client_id
label: Client ID
- name: client_secret
kind: password
- name: refresh_token
kind: password
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: is_sandbox
kind: boolean
value: false
description: Use Salesforce Sandbox
- name: api_type
kind: options
value: REST
label: API Type
options:
- label: REST
value: REST
- label: BULK
value: BULK
- name: select_fields_by_default
kind: boolean
value: true
description: Select by default any new fields discovered in Salesforce objects
- name: state_message_threshold
kind: integer
value: 1000
description: Generate a STATE message every N records
- name: max_workers
kind: integer
value: 8
label: Maximum number of threads to use
select:
- Lead.*
- Contact.*
- User.*
- OpportunityHistory.*
- Account.*
- Opportunity.*
- name: tap-quickbooks
namespace: tap_quickbooks
label: Quickbooks
description: Accounting management platform
variant: hotgluexyz
docs: https://hub.meltano.com/extractors/quickbooks.html
repo: https://github.com/hotgluexyz/tap-quickbooks
pip_url: git+https://github.com/hotgluexyz/tap-quickbooks.git
capabilities:
- catalog
- discover
- state
settings_group_validation:
- - client_id
- client_secret
- refresh_token
- realmId
- start_date
settings:
- name: realmId
label: Realm ID
- name: client_id
kind: password
label: Client ID
- name: client_secret
kind: password
- name: refresh_token
kind: password
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: is_sandbox
kind: boolean
value: false
description: Use Quickbooks Sandbox
- name: select_fields_by_default
kind: boolean
value: true
description: Select by default any new fields discovered in Quickbooks objects
- name: state_message_threshold
kind: integer
value: 1000
description: Generate a STATE message every N records
- name: max_workers
kind: integer
value: 8
label: Maximum number of threads to use
- name: tap-shopify
namespace: tap_shopify
label: Shopify
description: Ecommerce platform
variant: singer-io
docs: https://hub.meltano.com/extractors/shopify.html
repo: https://github.com/singer-io/tap-shopify
pip_url: tap-shopify
capabilities:
- catalog
- discover
- state
settings:
- name: shop
label: Store Subdomain
placeholder: Ex. my-first-store
- name: api_key
kind: password
label: Private App API Password
placeholder: Ex. shppa_1a2b3c4d5e6f
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
select:
# Select everything except for `metafields.*`.
# Selecting `*.*` and `!metafields.*` doesn't work because `*.*` takes
# precedence at the stream level, so we would still get `metafields.id`
# and `metafields.updated_at` since these are `inclusion: available`
- abandoned_checkouts.*
- collects.*
- custom_collections.*
- customers.*
- order_refunds.*
- orders.*
- products.*
- transactions.*
- name: tap-slack
namespace: tap_slack
label: Slack
description: Team communication tool
variant: mashey
docs: https://hub.meltano.com/extractors/slack.html
repo: https://github.com/Mashey/tap-slack
pip_url: git+https://github.com/Mashey/tap-slack.git
capabilities:
- catalog
- discover
- state
settings:
- name: token
kind: password
label: API Token
documentation: https://slack.com/help/articles/215770388-Create-and-regenerate-API-tokens
- name: start_date
kind: date_iso8601
label: Sync Start Date
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: channels
kind: array
label: Channels to Sync
description: By default the tap will sync all channels it has been invited to,
but this can be overriden to limit it ot specific channels. Note this needs
to be channel ID, not the name, as recommended by the Slack API. To get the
ID for a channel, either use the Slack API or find it in the URL.
placeholder: Ex. ["abc123", "def456"]
- name: private_channels
kind: boolean
value: true
label: Join Private Channels
description: Specifies whether to sync private channels or not. Default is true.
- name: join_public_channels
kind: boolean
value: false
label: Join Public Channels
description: Specifies whether to have the tap auto-join all public channels in
your ogranziation. Default is false.
- name: archived_channels
kind: boolean
value: false
label: Sync Archived Channels
description: Specifies whether the tap will sync archived channels or not. Note
that a bot cannot join an archived channel, so unless the bot was added to the
channel prior to it being archived it will not be able to sync the data from
that channel. Default is false.
- name: date_window_size
kind: integer
value: 7
label: Date Window Size
description: Specifies the window size for syncing certain streams (messages,
files, threads). The default is 7 days.
- name: tap-spreadsheets-anywhere
namespace: tap_spreadsheets_anywhere
label: Spreadsheets Anywhere
description: Data extractor for CSV and Excel files from any smart_open supported
transport (S3, SFTP, localhost, etc...)
variant: ets
docs: https://hub.meltano.com/extractors/spreadsheets-anywhere.html
repo: https://github.com/ets/tap-spreadsheets-anywhere
pip_url: git+https://github.com/ets/tap-spreadsheets-anywhere.git
capabilities:
- catalog
- discover
- state
settings:
- name: tables
kind: array
description: An array holding json objects that each describe a set of targeted
source files. See docs for details.
- name: tap-stripe
namespace: tap_stripe
label: Stripe
description: Online payment processing for internet businesses
variants:
- name: singer-io
docs: https://hub.meltano.com/extractors/stripe.html
repo: https://github.com/singer-io/tap-stripe
pip_url: git+https://github.com/singer-io/tap-stripe.git
capabilities:
- catalog
- discover
- state
settings_group_validation:
- - account_id
- client_secret
- start_date
settings:
- name: account_id
label: Account ID
placeholder: Ex. acct_1a2b3c4d5e
- name: client_secret
kind: password
label: Secret API Key
placeholder: Ex. sk_live_1a2b3c4d5e
- name: start_date
kind: date_iso8601
label: Start Date
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: prratek
repo: https://github.com/prratek/tap-stripe
pip_url: git+https://github.com/prratek/tap-stripe.git
capabilities:
- catalog
- discover
- state
settings_group_validation:
- - api_key
- start_date
settings:
- name: api_key
kind: password
label: Secret API Key
placeholder: Ex. sk_live_1a2b3c4d5e
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
# Optional
- name: account_id
label: Account ID
placeholder: Ex. acct_1a2b3c4d5e
- name: meltano
repo: https://github.com/meltano/tap-stripe
pip_url: git+https://github.com/meltano/tap-stripe.git
capabilities:
- catalog
- discover
- state
settings:
- name: account_id
label: Account ID
placeholder: Ex. acct_1a2b3c4d5e
- name: client_secret
kind: password
label: Secret API Key
placeholder: Ex. sk_live_1a2b3c4d5e
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
hidden: true
- name: tap-twilio
namespace: tap_twilio
label: Twilio
description: Cloud communications platform as a service
variant: transferwise
docs: https://hub.meltano.com/extractors/twilio
repo: https://github.com/transferwise/pipelinewise-tap-twilio
pip_url: git+https://github.com/transferwise/pipelinewise-tap-twilio.git
capabilities:
- discover
- catalog
- state
settings_group_validation:
- [account_sid, auth_token, start_date, user_agent]
settings:
- name: account_sid
label: Account String ID
description: This is the String ID of your account which can be found in the account
console at twilio.com/console.
- name: auth_token
kind: password
label: Auth Token
description: This is the authorization token for your account which can be found
in the account console at twilio.com/console.
- name: date_window_days
kind: integer
value: 30
label: Date Window Days
description: This is the integer number of days (between the from and to dates)
for date-windowing through the date-filtered endpoints.
- name: start_date
kind: date_iso8601
label: Start Date
description: This is the absolute beginning date from which incremental loading
on the initial load will start
- name: user_agent
value: tap-twilio via Meltano
label: User Agent
description: This is used to identify the process running the tap.
placeholder: Ex. "tap-twilio <api_user_email@your_company.com>"
- name: tap-zendesk
namespace: tap_zendesk
label: Zendesk
description: Support ticketing system & customer service platform
variants:
- name: twilio-labs
docs: https://hub.meltano.com/extractors/zendesk.html
repo: https://github.com/twilio-labs/twilio-tap-zendesk
pip_url: twilio-tap-zendesk
capabilities:
- catalog
- discover
- state
settings_group_validation:
- [email, api_token, subdomain, start_date]
- [access_token, subdomain, start_date]
settings:
- name: email
kind: email
label: Email
description: This is the email you use to login to your Zendesk dashboard. For
API Authentication, `/token` is automatically appended to the email address
and is not required in the configuration.
placeholder: Ex. me@my-organization.com
- name: api_token
kind: password
label: API Token
documentation: https://support.zendesk.com/hc/en-us/articles/226022787-Generating-a-new-API-token-
description: You can use the API Token authentication which can be generated
from the Zendesk Admin page.
placeholder: Ex. *****************
- name: access_token
kind: password
label: Access Token
documentation: https://support.zendesk.com/hc/en-us/articles/203663836
description: To use OAuth, you will need to fetch an `access_token` from a configured
Zendesk integration.
- name: subdomain
label: Zendesk Subdomain
documentation: https://support.zendesk.com/hc/en-us/articles/221682747-Where-can-I-find-my-Zendesk-subdomain-
description: >
When visiting your Zendesk instance, the URL is structured as follows: `SUBDOMAIN.zendesk.com`.
For example, if the URL is `meltano.zendesk.com`, then the subdomain is `meltano`.
placeholder: Ex. my-subdomain.zendesk.com
- name: start_date
kind: date_iso8601
label: Start Date
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: singer-io
docs: https://hub.meltano.com/extractors/zendesk.html
repo: https://github.com/singer-io/tap-zendesk
pip_url: tap-zendesk
capabilities:
- catalog
- discover
- state
settings_group_validation:
- [email, api_token, subdomain, start_date]
- [access_token, subdomain, start_date]
settings:
- name: email
kind: email
placeholder: Ex. me@my-organization.com
- name: api_token
kind: password
label: API Token
documentation: https://support.zendesk.com/hc/en-us/articles/226022787-Generating-a-new-API-token-
placeholder: Ex. *****************
- name: access_token
kind: password
documentation: https://support.zendesk.com/hc/en-us/articles/203663836
description: OAuth Access Token
- name: subdomain
label: Zendesk Subdomain
documentation: https://support.zendesk.com/hc/en-us/articles/221682747-Where-can-I-find-my-Zendesk-subdomain-
placeholder: Ex. my-subdomain.zendesk.com
- name: start_date
kind: date_iso8601
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
- name: tap-zoom
namespace: tap_zoom
label: Zoom
description: Video conferencing software
variant: mashey
docs: https://hub.meltano.com/extractors/zoom.html
repo: https://github.com/mashey/tap-zoom
pip_url: git+https://github.com/mashey/tap-zoom.git
capabilities:
- catalog
- discover
settings_group_validation:
- [jwt]
- [client_id, client_secret, refresh_token]
settings:
- name: jwt
kind: password
label: JSON Web Token
documentation: https://marketplace.zoom.us/docs/guides/auth/jwt
- name: client_id
documentation: https://marketplace.zoom.us/docs/guides/auth/oauth
- name: client_secret
kind: password
documentation: https://marketplace.zoom.us/docs/guides/auth/oauth
- name: refresh_token
kind: password
documentation: https://marketplace.zoom.us/docs/guides/auth/oauth
- name: tap-solarvista
namespace: tap_solarvista
label: Solarvista Live
description: Solarvista Live is a Field Service Management platform to manage, plan,
and mobilise your engineers.
variant: matatika
docs: https://hub.meltano.com/extractors/solarvista.html
repo: https://github.com/Matatika/tap-solarvista
pip_url: git+https://github.com/Matatika/tap-solarvista.git
capabilities:
- state
- catalog
- discover
settings_group_validation:
- - start_date
- clientId
- code
- account
settings:
- name: datasources
kind: array
label: Datasources
description: The Datasources to sync from Solarvista. Leave list blank to use
the default list of Datasources.
- name: account
label: Account
description: The AccountId for your Solarvista account.
- name: clientId
label: Client ID
description: Your Solarvista Client ID to create your access token and credentials.
- name: code
kind: password
label: Authorization Code
description: Your API authorization code retrieved using your account id, client
id, and access token.
- name: start_date
kind: date_iso8601
label: Start Date
description: Determines how much historical data will be extracted. Please be
aware that the larger the time period and amount of data, the longer the initial
extraction can be expected to take.
# Hidden taps:
- name: tap-carbon-intensity
namespace: tap_carbon_intensity
label: Carbon Emissions Intensity
description: National Grid ESO's Carbon Emissions Intensity API
variant: meltano
repo: https://gitlab.com/meltano/tap-carbon-intensity
pip_url: git+https://gitlab.com/meltano/tap-carbon-intensity.git
capabilities:
- discover
hidden: true
loaders:
- name: target-bigquery
namespace: target_bigquery
label: BigQuery
description: BigQuery loader
variant: adswerve
docs: https://hub.meltano.com/loaders/bigquery.html
repo: https://github.com/adswerve/target-bigquery
pip_url: git+https://github.com/adswerve/target-bigquery.git@0.11.3
settings_group_validation:
- [project_id, dataset_id, location, credentials_path]
settings:
- name: project_id
label: Project Id
description: BigQuery project
- name: dataset_id
value: $MELTANO_EXTRACT__LOAD_SCHEMA
label: Dataset Id
description: BigQuery dataset
- name: location
value: US
label: Location
description: Dataset location
- name: credentials_path
value: $MELTANO_PROJECT_ROOT/client_secrets.json
label: Credentials Path
description: Fully qualified path to `client_secrets.json` for your service account.
- name: validate_records
kind: boolean
value: false
label: Validate Records
description: Validate records
- name: add_metadata_columns
kind: boolean
value: false
label: Add Metadata Columns
description: Add `_time_extracted` and `_time_loaded` metadata columns
- name: replication_method
kind: options
value: append
label: Replication Method
description: Replication method, `append` or `truncate`
options:
- label: Append
value: append
- label: Truncate
value: truncate
- name: table_prefix
label: Table Prefix
description: Add prefix to table name
- name: table_suffix
label: Table Suffix
description: Add suffix to table name
- name: max_cache
value: 50
label: Max Cache
description: Maximum cache size in MB
- name: merge_state_messages
kind: boolean
value: false
label: Merge State Messages
description: Whether to merge multiple state messages from the tap into the state
file or uses the last state message as the state file.
- name: table_config
label: Table Config
description: A path to a file containing the definition of partitioning and clustering.
dialect: bigquery
target_schema: $TARGET_BIGQUERY_DATASET_ID
- name: target-csv
namespace: target_csv
label: Comma Separated Values (CSV)
description: CSV loader
variants:
- name: hotgluexyz
docs: https://hub.meltano.com/loaders/csv.html
repo: https://github.com/hotgluexyz/target-csv
pip_url: git+https://github.com/hotgluexyz/target-csv.git@0.3.3
settings:
- name: destination_path
value: output
description: Sets the destination path the CSV files are written to, relative
to the project root. The directory needs to exist already, it will not be
created automatically. To write CSV files to the project root, set an empty
string (`""`).
- name: delimiter
kind: options
value: ','
description: A one-character string used to separate fields. It defaults to
a comma (,).
options:
- label: Comma (,)
value: ','
- label: Tab ( )
value: \t
- label: Semi-colon (;)
value: ;
- label: Pipe (|)
value: '|'
- name: quotechar
kind: options
value: "'"
description: A one-character string used to quote fields containing special
characters, such as the delimiter or quotechar, or which contain new-line
characters. It defaults to single quote (').
options:
- label: Single Quote (')
value: "'"
- label: Double Quote (")
value: '"'
- name: singer-io
docs: https://hub.meltano.com/loaders/csv.html
repo: https://github.com/singer-io/target-csv
pip_url: target-csv
settings:
- name: destination_path
value: output
description: Sets the destination path the CSV files are written to, relative
to the project root. The directory needs to exist already, it will not be
created automatically. To write CSV files to the project root, set an empty
string (`""`).
- name: delimiter
kind: options
value: ','
description: A one-character string used to separate fields. It defaults to
a comma (,).
options:
- label: Comma (,)
value: ','
- label: Tab ( )
value: \t
- label: Semi-colon (;)
value: ;
- label: Pipe (|)
value: '|'
- name: quotechar
kind: options
value: "'"
description: A one-character string used to quote fields containing special
characters, such as the delimiter or quotechar, or which contain new-line
characters. It defaults to single quote (').
options:
- label: Single Quote (')
value: "'"
- label: Double Quote (")
value: '"'
- name: target-jsonl
namespace: target_jsonl
label: JSON Lines (JSONL)
description: JSONL loader
variant: andyh1203
docs: https://hub.meltano.com/loaders/jsonl.html
repo: https://github.com/andyh1203/target-jsonl
pip_url: target-jsonl
settings:
- name: destination_path
value: output
description: Sets the destination path the JSONL files are written to, relative
to the project root. The directory needs to exist already, it will not be created
automatically. To write JSONL files to the project root, set an empty string
(`""`).
- name: do_timestamp_file
kind: boolean
value: false
label: Include timestamp in file names
description: Specifies if the files should get timestamped
- name: target-postgres
namespace: target_postgres
label: PostgreSQL
description: PostgreSQL database loader
dialect: postgres
target_schema: $TARGET_POSTGRES_SCHEMA
variants:
- name: transferwise
docs: https://hub.meltano.com/loaders/postgres.html
repo: https://github.com/transferwise/pipelinewise-target-postgres
pip_url: pipelinewise-target-postgres
settings_group_validation:
- [host, port, user, password, dbname, default_target_schema]
settings:
- name: host
value: localhost
label: Host
description: PostgreSQL host
- name: port
kind: integer
value: 5432
label: Port
description: PostgreSQL port
- name: user
label: User
description: PostgreSQL user
- name: password
kind: password
label: Password
description: PostgreSQL password
- name: dbname
label: Database Name
description: PostgreSQL database name
- name: ssl
kind: boolean
value: false
label: SSL
value_post_processor: stringify
- name: default_target_schema
aliases: [schema]
env: TARGET_POSTGRES_SCHEMA
value: $MELTANO_EXTRACT__LOAD_SCHEMA
label: Default Target Schema
# Optional settings
description: Name of the schema where the tables will be created. If `schema_mapping`
is not defined then every stream sent by the tap is loaded into this schema.
- name: batch_size_rows
kind: integer
value: 100000
label: Batch Size Rows
description: Maximum number of rows in each batch. At the end of each batch,
the rows in the batch are loaded into Postgres.
- name: flush_all_streams
kind: boolean
value: false
label: Flush All Streams
description: 'Flush and load every stream into Postgres when one batch is full.
Warning: This may trigger the COPY command to use files with low number of
records.'
- name: parallelism
kind: integer
value: 0
label: Parallelism
description: The number of threads used to flush tables. 0 will create a thread
for each stream, up to parallelism_max. -1 will create a thread for each CPU
core. Any other positive number will create that number of threads, up to
parallelism_max.
- name: parallelism_max
kind: integer
value: 16
label: Max Parallelism
description: Max number of parallel threads to use when flushing tables.
- name: default_target_schema_select_permission
label: Default Target Schema Select Permission
description: Grant USAGE privilege on newly created schemas and grant SELECT
privilege on newly created tables to a specific role or a list of roles. If
`schema_mapping` is not defined then every stream sent by the tap is granted
accordingly.
- name: schema_mapping
kind: object
label: Schema Mapping
description: >
Useful if you want to load multiple streams from one tap to multiple Postgres
schemas.
If the tap sends the `stream_id` in `<schema_name>-<table_name>` format then
this option overwrites the `default_target_schema` value.
Note, that using `schema_mapping` you can overwrite the `default_target_schema_select_permission`
value to grant SELECT permissions to different groups per schemas or optionally
you can create indices automatically for the replicated tables.
- name: add_metadata_columns
kind: boolean
value: false
label: Add Metadata Columns
description: Metadata columns add extra row level information about data ingestions,
(i.e. when was the row read in source, when was inserted or deleted in postgres
etc.) Metadata columns are creating automatically by adding extra columns
to the tables with a column prefix `_SDC_`. The column names are following
the stitch naming conventions documented at https://www.stitchdata.com/docs/data-structure/integration-schemas#sdc-columns.
Enabling metadata columns will flag the deleted rows by setting the `_SDC_DELETED_AT`
metadata column. Without the `add_metadata_columns` option the deleted rows
from singer taps will not be recongisable in Postgres.
- name: hard_delete
kind: boolean
value: false
label: Hard Delete
description: When `hard_delete` option is true then DELETE SQL commands will
be performed in Postgres to delete rows in tables. It's achieved by continuously
checking the `_SDC_DELETED_AT` metadata column sent by the singer tap. Due
to deleting rows requires metadata columns, `hard_delete` option automatically
enables the `add_metadata_columns` option as well.
- name: data_flattening_max_level
kind: integer
value: 0
label: Data Flattening Max Level
description: Object type RECORD items from taps can be transformed to flattened
columns by creating columns automatically. When value is 0 (default) then
flattening functionality is turned off.
- name: primary_key_required
kind: boolean
value: true
label: Primary Key Required
description: Log based and Incremental replications on tables with no Primary
Key cause duplicates when merging UPDATE events. When set to true, stop loading
data if no Primary Key is defined.
- name: validate_records
kind: boolean
value: false
label: Validate Records
description: Validate every single record message to the corresponding JSON
schema. This option is disabled by default and invalid RECORD messages will
fail only at load time by Postgres. Enabling this option will detect invalid
records earlier but could cause performance degradation.
- name: temp_dir
label: Temporary Directory
description: '(Default: platform-dependent) Directory of temporary CSV files
with RECORD messages.'
- name: datamill-co
docs: https://hub.meltano.com/loaders/postgres--datamill-co.html
repo: https://github.com/datamill-co/target-postgres
pip_url: singer-target-postgres
settings_group_validation:
- [postgres_host, postgres_port, postgres_database, postgres_username, postgres_password,
postgres_schema]
settings:
- name: postgres_host
value: localhost
- name: postgres_port
kind: integer
value: 5432
- name: postgres_database
- name: postgres_username
- name: postgres_password
kind: password
- name: postgres_schema
aliases: [schema]
value: $MELTANO_EXTRACT__LOAD_SCHEMA
- name: postgres_sslmode
value: prefer
description: 'Refer to the libpq docs for more information about SSL: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-PARAMKEYWORDS'
- name: postgres_sslcert
value: ~/.postgresql/postgresql.crt
description: Only used if a SSL request w/ a client certificate is being made
- name: postgres_sslkey
value: ~/.postgresql/postgresql.key
description: Only used if a SSL request w/ a client certificate is being made
- name: postgres_sslrootcert
value: ~/.postgresql/root.crt
description: Used for authentication of a server SSL certificate
- name: postgres_sslcrl
value: ~/.postgresql/root.crl
description: Used for authentication of a server SSL certificate
- name: invalid_records_detect
kind: boolean
value: true
description: Include `false` in your config to disable `target-postgres` from
crashing on invalid records
- name: invalid_records_threshold
kind: integer
value: 0
description: Include a positive value `n` in your config to allow for `target-postgres`
to encounter at most `n` invalid records per stream before giving up.
- name: disable_collection
kind: boolean
value: false
description: 'Include `true` in your config to disable Singer Usage Logging:
https://github.com/datamill-co/target-postgres#usage-logging'
- name: logging_level
kind: options
value: INFO
description: The level for logging. Set to `DEBUG` to get things like queries
executed, timing of those queries, etc.
options:
- label: Debug
value: DEBUG
- label: Info
value: INFO
- label: Warning
value: WARNING
- label: Error
value: ERROR
- label: Critical
value: CRITICAL
- name: persist_empty_tables
kind: boolean
value: false
description: Whether the Target should create tables which have no records present
in Remote.
- name: max_batch_rows
kind: integer
value: 200000
description: The maximum number of rows to buffer in memory before writing to
the destination table in Postgres
- name: max_buffer_size
kind: integer
value: 104857600
description: 'The maximum number of bytes to buffer in memory before writing
to the destination table in Postgres. Default: 100MB in bytes'
- name: batch_detection_threshold
kind: integer
description: How often, in rows received, to count the buffered rows and bytes
to check if a flush is necessary. There's a slight performance penalty to
checking the buffered records count or bytesize, so this controls how often
this is polled in order to mitigate the penalty. This value is usually not
necessary to set as the default is dynamically adjusted to check reasonably
often.
- name: state_support
kind: boolean
value: true
description: Whether the Target should emit `STATE` messages to stdout for further
consumption. In this mode, which is on by default, STATE messages are buffered
in memory until all the records that occurred before them are flushed according
to the batch flushing schedule the target is configured with.
- name: add_upsert_indexes
kind: boolean
value: true
description: Whether the Target should create column indexes on the important
columns used during data loading. These indexes will make data loading slightly
slower but the deduplication phase much faster. Defaults to on for better
baseline performance.
- name: before_run_sql
description: Raw SQL statement(s) to execute as soon as the connection to Postgres
is opened by the target. Useful for setup like `SET ROLE` or other connection
state that is important.
- name: after_run_sql
description: Raw SQL statement(s) to execute as soon as the connection to Postgres
is opened by the target. Useful for setup like `SET ROLE` or other connection
state that is important.
- name: meltano
original: true
docs: https://hub.meltano.com/loaders/postgres--meltano.html
repo: https://github.com/meltano/target-postgres
pip_url: git+https://github.com/meltano/target-postgres.git
settings_group_validation:
- [url, schema]
- [user, password, host, port, dbname, schema]
settings:
- name: user
aliases: [username]
value: warehouse
- name: password
kind: password
value: warehouse
- name: host
aliases: [address]
value: localhost
- name: port
kind: integer
value: 5502
- name: dbname
aliases: [database]
value: warehouse
label: Database Name
- name: url
label: URL
description: Lets you set `user`, `password`, `host`, `port`, and `dbname` in
one go using a `postgresql://` URI. Takes precedence over the other settings
when set.
- name: schema
value: $MELTANO_EXTRACT__LOAD_SCHEMA
- name: target-snowflake
namespace: target_snowflake
label: Snowflake
description: Snowflake database loader
dialect: snowflake
target_schema: $TARGET_SNOWFLAKE_SCHEMA
variants:
- name: transferwise
docs: https://hub.meltano.com/loaders/snowflake.html
repo: https://github.com/transferwise/pipelinewise-target-snowflake
pip_url: pipelinewise-target-snowflake
settings_group_validation:
- [account, dbname, user, password, warehouse, file_format, default_target_schema]
settings:
- name: account
label: Account
description: Snowflake account name (i.e. rtXXXXX.eu-central-1)
placeholder: E.g. rtXXXXX.eu-central-1
- name: dbname
aliases: [database]
label: DB Name
description: Snowflake Database name
- name: user
aliases: [username]
label: User
description: Snowflake User
- name: password
kind: password
label: Password
description: Snowflake Password
- name: warehouse
label: Warehouse
description: Snowflake virtual warehouse name
- name: file_format
label: File Format
description: The Snowflake file format object name which needs to be manually
created as part of the pre-requirements section of the docs. Has to be the
fully qualified name including the schema. Refer to the docs for more details
https://github.com/transferwise/pipelinewise-target-snowflake#pre-requirements.
# Optional settings
- name: role
label: Role
description: Snowflake role to use. If not defined then the user's default role
will be used.
- name: aws_access_key_id
kind: password
label: AWS Access Key ID
description: S3 Access Key Id. If not provided, `AWS_ACCESS_KEY_ID` environment
variable or IAM role will be used
- name: aws_secret_access_key
kind: password
label: AWS Secret Access Key
description: S3 Secret Access Key. If not provided, `AWS_SECRET_ACCESS_KEY`
environment variable or IAM role will be used
- name: aws_session_token
kind: password
label: AWS Session Token
description: AWS Session token. If not provided, `AWS_SESSION_TOKEN` environment
variable will be used
- name: aws_profile
label: AWS Profile
description: AWS profile name for profile based authentication. If not provided,
`AWS_PROFILE` environment variable will be used.
- name: default_target_schema
aliases: [schema]
value: $MELTANO_EXTRACT__LOAD_SCHEMA
label: Default Target Schema
description: Name of the schema where the tables will be created, without database
prefix. If `schema_mapping` is not defined then every stream sent by the tap
is loaded into this schema.
value_processor: upcase_string
- name: s3_bucket
label: S3 Bucket
description: S3 Bucket name
- name: s3_key_prefix
label: S3 Key Prefix
description: A static prefix before the generated S3 key names. Using prefixes
you can upload files into specific directories in the S3 bucket.
- name: s3_endpoint_url
label: S3 Endpoint URL
description: The complete URL to use for the constructed client. This is allowing
to use non-native s3 account.
- name: s3_region_name
label: S3 Region Name
description: Default region when creating new connections
- name: s3_acl
label: S3 ACL
description: S3 ACL name to set on the uploaded files
- name: stage
label: Stage
description: Named external stage name created at pre-requirements section.
Has to be a fully qualified name including the schema name
- name: batch_size_rows
kind: integer
value: 100000
label: Batch Size Rows
description: Maximum number of rows in each batch. At the end of each batch,
the rows in the batch are loaded into Snowflake.
- name: batch_wait_limit_seconds
kind: integer
label: Batch Wait Limit Seconds
description: Maximum time to wait for batch to reach batch_size_rows.
- name: flush_all_streams
kind: boolean
value: false
label: Flush All Streams
description: 'Flush and load every stream into Snowflake when one batch is full.
Warning: This may trigger the COPY command to use files with low number of
records, and may cause performance problems.'
- name: parallelism
kind: integer
value: 0
label: Parallelism
description: The number of threads used to flush tables. 0 will create a thread
for each stream, up to parallelism_max. -1 will create a thread for each CPU
core. Any other positive number will create that number of threads, up to
parallelism_max.
- name: parallelism_max
kind: integer
value: 16
label: Parallelism Max
description: Max number of parallel threads to use when flushing tables.
- name: default_target_schema_select_permission
label: Default Target Schema Select Permission
description: Grant USAGE privilege on newly created schemas and grant SELECT
privilege on newly created tables to a specific role or a list of roles. If
`schema_mapping` is not defined then every stream sent by the tap is granted
accordingly.
- name: schema_mapping
kind: object
label: Schema Mapping
description: >
Useful if you want to load multiple streams from one tap to multiple Snowflake
schemas.
If the tap sends the `stream_id` in `<schema_name>-<table_name>` format then
this option overwrites the `default_target_schema` value.
Note, that using `schema_mapping` you can overwrite the `default_target_schema_select_permission`
value to grant SELECT permissions to different groups per schemas or optionally
you can create indices automatically for the replicated tables.
- name: disable_table_cache
kind: boolean
value: false
label: Disable Table Cache
description: By default the connector caches the available table structures
in Snowflake at startup. In this way it doesn't need to run additional queries
when ingesting data to check if altering the target tables is required. With
`disable_table_cache` option you can turn off this caching. You will always
see the most recent table structures but will cause an extra query runtime.
- name: client_side_encryption_master_key
kind: password
label: Client Side Encryption Master Key
description: When this is defined, Client-Side Encryption is enabled. The data
in S3 will be encrypted, No third parties, including Amazon AWS and any ISPs,
can see data in the clear. Snowflake COPY command will decrypt the data once
it's in Snowflake. The master key must be 256-bit length and must be encoded
as base64 string.
- name: client_side_encryption_stage_object
label: Client Side Encryption Stage Object
description: Required when `client_side_encryption_master_key` is defined. The
name of the encrypted stage object in Snowflake that created separately and
using the same encryption master key.
- name: add_metadata_columns
kind: boolean
value: false
label: Add Metadata Columns
description: Metadata columns add extra row level information about data ingestions,
(i.e. when was the row read in source, when was inserted or deleted in snowflake
etc.) Metadata columns are creating automatically by adding extra columns
to the tables with a column prefix `_SDC_`. The column names are following
the stitch naming conventions documented at https://www.stitchdata.com/docs/data-structure/integration-schemas#sdc-columns.
Enabling metadata columns will flag the deleted rows by setting the `_SDC_DELETED_AT`
metadata column. Without the `add_metadata_columns` option the deleted rows
from singer taps will not be recongisable in Snowflake.
- name: hard_delete
kind: boolean
value: false
label: Hard Delete
description: When `hard_delete` option is true then DELETE SQL commands will
be performed in Snowflake to delete rows in tables. It's achieved by continuously
checking the `_SDC_DELETED_AT` metadata column sent by the singer tap. Due
to deleting rows requires metadata columns, `hard_delete` option automatically
enables the `add_metadata_columns` option as well.
- name: data_flattening_max_level
kind: integer
value: 0
label: Data Flattening Max Level
description: Object type RECORD items from taps can be loaded into VARIANT columns
as JSON (default) or we can flatten the schema by creating columns automatically.
When value is 0 (default) then flattening functionality is turned off.
- name: primary_key_required
kind: boolean
value: true
label: Primary Key Required
description: Log based and Incremental replications on tables with no Primary
Key cause duplicates when merging UPDATE events. When set to true, stop loading
data if no Primary Key is defined.
- name: validate_records
kind: boolean
value: false
label: Validate Records
description: Validate every single record message to the corresponding JSON
schema. This option is disabled by default and invalid RECORD messages will
fail only at load time by Snowflake. Enabling this option will detect invalid
records earlier but could cause performance degradation.
- name: temp_dir
label: Temporary Directory
description: '(Default: platform-dependent) Directory of temporary CSV files
with RECORD messages.'
- name: no_compression
kind: boolean
value: false
label: No Compression
description: Generate uncompressed CSV files when loading to Snowflake. Normally,
by default GZIP compressed files are generated.
- name: query_tag
label: Query Tag
description: Optional string to tag executed queries in Snowflake. Replaces
tokens `schema` and `table` with the appropriate values. The tags are displayed
in the output of the Snowflake `QUERY_HISTORY`, `QUERY_HISTORY_BY_*` functions.
- name: archive_load_files
kind: boolean
value: false
label: Archive Load Files
description: When enabled, the files loaded to Snowflake will also be stored
in archive_load_files_s3_bucket under the key /{archive_load_files_s3_prefix}/{schema_name}/{table_name}/.
All archived files will have tap, schema, table and archived-by as S3 metadata
keys. When incremental replication is used, the archived files will also have
the following S3 metadata keys - incremental-key, incremental-key-min and
incremental-key-max.
- name: archive_load_files_s3_prefix
label: Archive Load Files S3 Prefix
description: When archive_load_files is enabled, the archived files will be
placed in the archive S3 bucket under this prefix.
- name: archive_load_files_s3_bucket
label: Archive Load Files S3 Bucket
description: When archive_load_files is enabled, the archived files will be
placed in this bucket.
- name: datamill-co
docs: https://hub.meltano.com/loaders/snowflake--datamill-co.html
repo: https://github.com/datamill-co/target-snowflake
pip_url: target-snowflake
settings_group_validation:
- [snowflake_account, snowflake_username, snowflake_password, snowflake_database,
snowflake_warehouse]
settings:
- name: snowflake_account
label: Snowflake Account
description: >
`ACCOUNT` might require the `region` and `cloud` platform where your account
is located, in the form of: `<your_account_name>.<region_id>.<cloud>` (e.g.
`xy12345.east-us-2.azure`)
Refer to Snowflake's documentation about Account: https://docs.snowflake.net/manuals/user-guide/connecting.html#your-snowflake-account-name-and-url
- name: snowflake_username
label: Snowflake Username
- name: snowflake_password
kind: password
label: Snowflake Password
- name: snowflake_role
label: Snowflake Role
description: If not specified, Snowflake will use the user's default role.
- name: snowflake_database
label: Snowflake Database
- name: snowflake_authenticator
value: snowflake
label: Snowflake Authenticator
description: Specifies the authentication provider for snowflake to use. Valud
options are the internal one ("snowflake"), a browser session ("externalbrowser"),
or Okta ("https://<your_okta_account_name>.okta.com"). See the snowflake docs
for more details.
- name: snowflake_warehouse
label: Snowflake Warehouse
# Optional settings
- name: invalid_records_detect
kind: boolean
value: true
label: Invalid Records Detect
description: Include `false` in your config to disable crashing on invalid records
- name: invalid_records_threshold
kind: integer
value: 0
label: Invalid Records Threshold
description: Include a positive value `n` in your config to allow at most `n`
invalid records per stream before giving up.
- name: disable_collection
kind: boolean
value: false
label: Disable Collection
description: 'Include `true` in your config to disable Singer Usage Logging:
https://github.com/datamill-co/target-snowflake#usage-logging'
- name: logging_level
kind: options
value: INFO
label: Logging Level
description: The level for logging. Set to `DEBUG` to get things like queries
executed, timing of those queries, etc.
options:
- label: Debug
value: DEBUG
- label: Info
value: INFO
- label: Warning
value: WARNING
- label: Error
value: ERROR
- label: Critical
value: CRITICAL
- name: persist_empty_tables
kind: boolean
value: false
label: Persist Empty Tables
description: Whether the Target should create tables which have no records present
in Remote.
- name: snowflake_schema
aliases: [schema]
value: $MELTANO_EXTRACT__LOAD_SCHEMA
label: Snowflake Schema
value_processor: upcase_string
- name: state_support
kind: boolean
value: true
label: State Support
description: Whether the Target should emit `STATE` messages to stdout for further
consumption. In this mode, which is on by default, STATE messages are buffered
in memory until all the records that occurred before them are flushed according
to the batch flushing schedule the target is configured with.
- name: target_s3.bucket
label: Target S3 Bucket
description: When included, use S3 to stage files. Bucket where staging files
should be uploaded to.
- name: target_s3.key_prefix
label: Target S3 Key Prefix
description: Prefix for staging file uploads to allow for better delineation
of tmp files
- name: target_s3.aws_access_key_id
kind: password
label: Target S3 AWS Access Key ID
- name: target_s3.aws_secret_access_key
kind: password
label: Target S3 AWS Secret Access Key
- name: meltano
original: true
docs: https://hub.meltano.com/loaders/snowflake--meltano.html
repo: https://gitlab.com/meltano/target-snowflake
pip_url: git+https://gitlab.com/meltano/target-snowflake.git
settings_group_validation:
- [account, username, password, role, database, warehouse, schema]
settings:
- name: account
label: Account
description: Account Name in Snowflake (https://XXXXX.snowflakecomputing.com)
- name: username
label: Username
description: The username you use for logging in
- name: password
kind: password
label: Password
description: The password you use for logging in
- name: role
label: Role
description: Role to be used for loading the data, e.g. `LOADER`. Also this
role is GRANTed usage to all tables and schemas created
- name: database
label: Database
description: The name of the Snowflake database you want to use
- name: warehouse
label: Warehouse
description: The name of the Snowflake warehouse you want to use
- name: schema
value: $MELTANO_EXTRACT__LOAD_SCHEMA
label: Schema
value_processor: upcase_string
- name: batch_size
kind: integer
value: 5000
label: Batch Size
description: How many records are sent to Snowflake at a time?
- name: timestamp_column
value: __loaded_at
label: Timestamp Column
description: Name of the column used for recording the timestamp when Data are
uploaded to Snowflake.
hidden: true
- name: target-sqlite
namespace: target_sqlite
label: SQLite
description: SQLite database loader
variants:
- name: meltanolabs
docs: https://hub.meltano.com/loaders/sqlite.html
repo: https://github.com/MeltanoLabs/target-sqlite
pip_url: git+https://github.com/MeltanoLabs/target-sqlite.git
settings_group_validation:
- [batch_size]
settings:
- name: database
value: warehouse
label: Database Name
description: Name of the SQLite database file to be used or created, relative
to the project root. The `.db` extension is optional and will be added automatically
when omitted.
- name: batch_size
kind: integer
value: 50
label: Batch Size
description: How many records are sent to SQLite at a time?
- name: timestamp_column
value: __loaded_at
label: Timestamp Column
description: Name of the column used for recording the timestamp when Data are
loaded to SQLite.
dialect: sqlite
- name: meltano
repo: https://gitlab.com/meltano/target-sqlite
pip_url: git+https://gitlab.com/meltano/target-sqlite.git
settings_group_validation:
- [batch_size]
settings:
- name: database
value: warehouse
label: Database Name
description: Name of the SQLite database file to be used or created, relative
to the project root. The `.db` extension is optional and will be added automatically
when omitted.
- name: batch_size
kind: integer
value: 50
description: How many records are sent to SQLite at a time?
- name: timestamp_column
value: __loaded_at
description: Name of the column used for recording the timestamp when Data are
loaded to SQLite.
hidden: true
dialect: sqlite
- name: target-redshift
namespace: target_redshift
label: Amazon Redshift
description: Amazon Redshift loader
variant: transferwise
docs: https://hub.meltano.com/loaders/redshift.html
repo: https://github.com/transferwise/pipelinewise-target-redshift
pip_url: pipelinewise-target-redshift
executable: target-redshift
capabilities:
- activate-version
- soft-delete
- hard-delete
- datatype-failsafe
- record-flattening
settings_group_validation:
- [host, port, user, password, dbname, s3_bucket, default_target_schema, aws_profile]
- [host, port, user, password, dbname, s3_bucket, default_target_schema, aws_access_key_id,
aws_secret_access_key]
- [host, port, user, password, dbname, s3_bucket, default_target_schema, aws_session_token]
settings:
- name: host
description: Redshift host
- name: port
kind: integer
value: 5439
description: Redshift port
- name: dbname
label: Database Name
description: Redshift database name
- name: user
label: User name
description: Redshift user name
- name: password
kind: password
description: Redshift password
- name: s3_bucket
label: S3 Bucket name
description: AWS S3 bucket name
- name: default_target_schema
value: $MELTANO_EXTRACT__LOAD_SCHEMA
description: Name of the schema where the tables will be created. If schema_mapping
is not defined then every stream sent by the tap is loaded into this schema.
# Optional settings
- name: aws_profile
label: AWS profile name
description: AWS profile name for profile based authentication. If not provided,
AWS_PROFILE environment variable will be used.
- name: aws_access_key_id
kind: password
label: AWS S3 Access Key ID
description: S3 Access Key Id. Used for S3 and Redshift copy operations. If not
provided, AWS_ACCESS_KEY_ID environment variable will be used.
- name: aws_secret_access_key
kind: password
label: AWS S3 Secret Access Key
description: S3 Secret Access Key. Used for S3 and Redshift copy operations. If
not provided, AWS_SECRET_ACCESS_KEY environment variable will be used.
- name: aws_session_token
kind: password
label: AWS S3 Session Token
description: S3 AWS STS token for temporary credentials. If not provided, AWS_SESSION_TOKEN
environment variable will be used.
- name: aws_redshift_copy_role_arn
label: AWS Redshift COPY role ARN
description: AWS Role ARN to be used for the Redshift COPY operation. Used instead
of the given AWS keys for the COPY operation if provided - the keys are still
used for other S3 operations
- name: s3_acl
label: AWS S3 ACL
description: S3 Object ACL
- name: s3_key_prefix
label: S3 Key Prefix
description: A static prefix before the generated S3 key names. Using prefixes
you can upload files into specific directories in the S3 bucket. Default(None)
- name: copy_options
value: EMPTYASNULL BLANKSASNULL TRIMBLANKS TRUNCATECOLUMNS TIMEFORMAT 'auto' COMPUPDATE
OFF STATUPDATE OFF
label: COPY options
description: >
Parameters to use in the COPY command when loading data to Redshift. Some basic
file formatting parameters are fixed values and not recommended overriding them
by custom values. They are like: `CSV GZIP DELIMITER ',' REMOVEQUOTES ESCAPE`.
- name: batch_size_rows
kind: integer
value: 100000
description: Maximum number of rows in each batch. At the end of each batch, the
rows in the batch are loaded into Redshift.
- name: flush_all_streams
kind: boolean
value: false
description: Flush and load every stream into Redshift when one batch is full.
Warning - This may trigger the COPY command to use files with low number of
records, and may cause performance problems.
- name: parallelism
kind: integer
value: 0
description: The number of threads used to flush tables. 0 will create a thread
for each stream, up to parallelism_max. -1 will create a thread for each CPU
core. Any other positive number will create that number of threads, up to parallelism_max.
- name: max_parallelism
kind: integer
value: 16
description: Max number of parallel threads to use when flushing tables.
- name: default_target_schema_select_permissions
description: Grant USAGE privilege on newly created schemas and grant SELECT privilege
on newly created tables to a specific list of users or groups. If schema_mapping
is not defined then every stream sent by the tap is granted accordingly.
- name: schema_mapping
kind: object
description: Useful if you want to load multiple streams from one tap to multiple
Redshift schemas. If the tap sends the stream_id in <schema_name>-<table_name>
format then this option overwrites the default_target_schema value. Note, that
using schema_mapping you can overwrite the default_target_schema_select_permissions
value to grant SELECT permissions to different groups per schemas or optionally
you can create indices automatically for the replicated tables.
- name: disable_table_cache
kind: boolean
value: false
description: By default the connector caches the available table structures in
Redshift at startup. In this way it doesn't need to run additional queries when
ingesting data to check if altering the target tables is required. With disable_table_cache
option you can turn off this caching. You will always see the most recent table
structures but will cause an extra query runtime.
- name: add_metadata_columns
kind: boolean
value: false
description: Metadata columns add extra row level information about data ingestions,
(i.e. when was the row read in source, when was inserted or deleted in redshift
etc.) Metadata columns are creating automatically by adding extra columns to
the tables with a column prefix _SDC_. The metadata columns are documented at
https://transferwise.github.io/pipelinewise/data_structure/sdc-columns.html.
Enabling metadata columns will flag the deleted rows by setting the _SDC_DELETED_AT
metadata column. Without the add_metadata_columns option the deleted rows from
singer taps will not be recongisable in Redshift.
- name: hard_delete
kind: boolean
value: false
description: When hard_delete option is true then DELETE SQL commands will be
performed in Redshift to delete rows in tables. It's achieved by continuously
checking the _SDC_DELETED_AT metadata column sent by the singer tap. Due to
deleting rows requires metadata columns, hard_delete option automatically enables
the add_metadata_columns option as well.
- name: data_flattening_max_level
kind: integer
value: 0
description: Object type RECORD items from taps can be loaded into VARIANT columns
as JSON (default) or we can flatten the schema by creating columns automatically.
When value is 0 (default) then flattening functionality is turned off.
- name: primary_key_required
kind: boolean
value: true
description: Log based and Incremental replications on tables with no Primary
Key cause duplicates when merging UPDATE events. When set to true, stop loading
data if no Primary Key is defined.
- name: validate_records
kind: boolean
value: false
description: Validate every single record message to the corresponding JSON schema.
This option is disabled by default and invalid RECORD messages will fail only
at load time by Redshift. Enabling this option will detect invalid records earlier
but could cause performance degradation.
- name: skip_updates
kind: boolean
value: false
description: Do not update existing records when Primary Key is defined. Useful
to improve performance when records are immutable, e.g. events
- name: compression
kind: options
description: The compression method to use when writing files to S3 and running
Redshift COPY.
options:
- label: None
value: ''
- label: gzip
value: gzip
- label: bzip2
value: bzip2
- name: slices
kind: integer
value: 1
description: The number of slices to split files into prior to running COPY on
Redshift. This should be set to the number of Redshift slices. The number of
slices per node depends on the node size of the cluster - run SELECT COUNT(DISTINCT
slice) slices FROM stv_slices to calculate this. Defaults to 1.
- name: temp_dir
label: Temp directory
description: '(Default: platform-dependent) Directory of temporary CSV files with
RECORD messages.'
dialect: redshift
target_schema: $TARGET_REDSHIFT_SCHEMA
transforms:
- name: tap-adwords
namespace: tap_adwords
vars:
schema: "{{ env_var('DBT_SOURCE_SCHEMA', 'tap_adwords') }}"
variant: meltano
repo: https://gitlab.com/meltano/dbt-tap-adwords
pip_url: https://gitlab.com/meltano/dbt-tap-adwords.git@config-version-2
- name: tap-carbon-intensity
namespace: tap_carbon_intensity
vars:
entry_table: "{{ env_var('DBT_SOURCE_SCHEMA', 'tap_carbon_intensity') }}.entry"
generationmix_table: "{{ env_var('DBT_SOURCE_SCHEMA', 'tap_carbon_intensity')\
\ }}.generationmix"
region_table: "{{ env_var('DBT_SOURCE_SCHEMA', 'tap_carbon_intensity') }}.region"
variant: meltano
repo: https://gitlab.com/meltano/dbt-tap-carbon-intensity
pip_url: https://gitlab.com/meltano/dbt-tap-carbon-intensity.git@config-version-2
- name: tap-facebook
namespace: tap_facebook
vars:
schema: "{{ env_var('DBT_SOURCE_SCHEMA', 'tap_facebook') }}"
variant: meltano
repo: https://gitlab.com/meltano/dbt-tap-facebook
pip_url: https://gitlab.com/meltano/dbt-tap-facebook.git@config-version-2
- name: tap-gitlab
namespace: tap_gitlab
vars:
schema: "{{ env_var('DBT_SOURCE_SCHEMA', 'tap_gitlab') }}"
ultimate_license: "{{ env_var('GITLAB_API_ULTIMATE_LICENSE', False) }}"
variant: meltano
repo: https://gitlab.com/meltano/dbt-tap-gitlab
pip_url: https://gitlab.com/meltano/dbt-tap-gitlab.git@config-version-2
- name: tap-google-analytics
namespace: tap_google_analytics
vars:
schema: "{{ env_var('DBT_SOURCE_SCHEMA', 'tap_google_analytics') }}"
variant: meltano
repo: https://gitlab.com/meltano/dbt-tap-google-analytics
pip_url: https://gitlab.com/meltano/dbt-tap-google-analytics.git@config-version-2
- name: tap-salesforce
namespace: tap_salesforce
vars:
schema: "{{ env_var('DBT_SOURCE_SCHEMA', 'tap_salesforce') }}"
variant: meltano
repo: https://gitlab.com/meltano/dbt-tap-salesforce
pip_url: https://gitlab.com/meltano/dbt-tap-salesforce.git@config-version-2
- name: tap-shopify
namespace: tap_shopify
vars:
schema: "{{ env_var('DBT_SOURCE_SCHEMA', 'tap_shopify') }}"
variant: meltano
repo: https://gitlab.com/meltano/dbt-tap-shopify
pip_url: https://gitlab.com/meltano/dbt-tap-shopify.git@config-version-2
- name: tap-stripe
namespace: tap_stripe
vars:
livemode: false
schema: "{{ env_var('DBT_SOURCE_SCHEMA', 'tap_stripe') }}"
variant: meltano
repo: https://gitlab.com/meltano/dbt-tap-stripe
pip_url: https://gitlab.com/meltano/dbt-tap-stripe.git@config-version-2
- name: tap-zendesk
namespace: tap_zendesk
vars:
schema: "{{ env_var('DBT_SOURCE_SCHEMA', 'tap_zendesk') }}"
variant: meltano
repo: https://gitlab.com/meltano/dbt-tap-zendesk
pip_url: https://gitlab.com/meltano/dbt-tap-zendesk.git@config-version-2
orchestrators:
- name: airflow
namespace: airflow
docs: https://docs.meltano.com/guide/orchestration
repo: https://github.com/apache/airflow
pip_url: apache-airflow==2.1.2 --constraint https://raw.githubusercontent.com/apache/airflow/constraints-2.1.2/constraints-${MELTANO__PYTHON_VERSION}.txt
settings:
- name: core.dags_folder
env: AIRFLOW__CORE__DAGS_FOLDER
value: $MELTANO_PROJECT_ROOT/orchestrate/dags
- name: core.plugins_folder
env: AIRFLOW__CORE__PLUGINS_FOLDER
value: $MELTANO_PROJECT_ROOT/orchestrate/plugins
- name: core.sql_alchemy_conn
env: AIRFLOW__CORE__SQL_ALCHEMY_CONN
value: sqlite:///$MELTANO_PROJECT_ROOT/.meltano/orchestrators/airflow/airflow.db
- name: core.load_examples
env: AIRFLOW__CORE__LOAD_EXAMPLES
value: false
- name: core.dags_are_paused_at_creation
env: AIRFLOW__CORE__DAGS_ARE_PAUSED_AT_CREATION
value: false
requires:
files:
- name: airflow
variant: meltano
transformers:
- name: dbt
namespace: dbt
label: dbt
variant: dbt-labs
docs: https://docs.meltano.com/guide/transformation
repo: https://github.com/dbt-labs/dbt-core
pip_url: dbt-core~=1.0.0 dbt-postgres~=1.0.0 dbt-redshift~=1.0.0 dbt-snowflake~=1.0.0
dbt-bigquery~=1.0.0
settings:
- name: project_dir
value: $MELTANO_PROJECT_ROOT/transform
label: Project Directory
- name: profiles_dir
env: DBT_PROFILES_DIR
value: $MELTANO_PROJECT_ROOT/transform/profile
label: Profiles Directory
- name: target
value: $MELTANO_LOAD__DIALECT
label: Target
- name: source_schema
value: $MELTANO_LOAD__TARGET_SCHEMA
label: Source Schema
- name: target_schema
value: analytics
label: Target Schema
- name: models
value: $MELTANO_TRANSFORM__PACKAGE_NAME $MELTANO_EXTRACTOR_NAMESPACE my_meltano_project
label: Models
commands:
clean:
args: clean
description: Delete all folders in the clean-targets list (usually the dbt_modules
and target directories.)
compile:
args: compile --models $DBT_MODELS
description: Generates executable SQL from source model, test, and analysis
files. Compiled SQL files are written to the target/ directory.
deps:
args: deps
description: Pull the most recent version of the dependencies listed in packages.yml
run:
args: run --models $DBT_MODELS
description: Compile SQL and execute against the current target database.
seed:
args: seed
description: Load data from csv files into your data warehouse.
snapshot:
args: snapshot
description: Execute snapshots defined in your project.
test:
args: test
description: Runs tests on data in deployed models.
requires:
files:
- name: dbt
variant: meltano
- name: dbt-snowflake
namespace: dbt_snowflake
variant: dbt-labs
docs: https://docs.meltano.com/guide/transformation
repo: https://github.com/dbt-labs/dbt-snowflake
pip_url: dbt-core~=1.0.0 dbt-snowflake~=1.0.0
executable: dbt
settings:
- name: project_dir
value: $MELTANO_PROJECT_ROOT/transform
label: Projects Directory
- name: profiles_dir
env: DBT_PROFILES_DIR
value: $MELTANO_PROJECT_ROOT/transform/profiles/snowflake
label: Profiles Directory
- name: account
kind: string
label: Account
description: The snowflake account to connect to.
- name: user
kind: string
label: User
description: The user to connect as.
- name: password
kind: password
label: Password
description: The user password to authenticate with.
- name: role
kind: string
label: Role
description: The user role to assume.
- name: warehouse
kind: string
label: Warehouse
description: The compute warehouse to use when building models.
- name: database
kind: string
label: Database
description: The database to create models in.
- name: schema
kind: string
label: Schema
description: The schema to build models into by default.
commands:
clean:
args: clean
description: Delete all folders in the clean-targets list (usually the dbt_modules
and target directories.)
compile:
args: compile
description: Generates executable SQL from source model, test, and analysis
files. Compiled SQL files are written to the target/ directory.
deps:
args: deps
description: Pull the most recent version of the dependencies listed in packages.yml
run:
args: run
description: Compile SQL and execute against the current target database.
seed:
args: seed
description: Load data from csv files into your data warehouse.
snapshot:
args: snapshot
description: Execute snapshots defined in your project.
test:
args: test
description: Runs tests on data in deployed models.
requires:
files:
- name: files-dbt-snowflake
variant: meltano
- name: dbt-postgres
namespace: dbt_postgres
label: dbt Postgres
variant: dbt-labs
docs: https://docs.meltano.com/guide/transformation
repo: https://github.com/dbt-labs/dbt-core
pip_url: dbt-core~=1.0.0 dbt-postgres~=1.0.0
executable: dbt
settings:
- name: project_dir
value: $MELTANO_PROJECT_ROOT/transform
label: Projects Directory
- name: profiles_dir
env: DBT_PROFILES_DIR
# Postgres connection settings are set via `config:` blocks and mapped to `profiles.yml`
value: $MELTANO_PROJECT_ROOT/transform/profiles/postgres
label: Profiles Directory
- name: host
kind: string
label: Host
description: |
The postgres host to connect to.
- name: user
kind: string
label: User
description: |
The user to connect as.
- name: password
kind: password
label: Password
description: |
The password to connect with.
- name: port
kind: integer
label: Port
description: |
The port to connect to.
- name: dbname
aliases: [database]
kind: string
label: Database
description: |
The db to connect to.
- name: schema
kind: string
label: Schema
description: |
The schema to use.
- name: keepalives_idle
kind: integer
label: Keep Alives Idle
description: |
Seconds between TCP keepalive packets.
- name: search_path
kind: string
label: Search Path
description: |
Overrides the default search path.
- name: role
kind: string
label: Role
description: |
Role for dbt to assume when executing queries.
- name: sslmode
kind: array
label: SSL Mode
description: |
SSL Mode used to connect to the database.
commands:
clean:
args: clean
description: Delete all folders in the clean-targets list (usually the dbt_modules
and target directories.)
compile:
args: compile
description: Generates executable SQL from source model, test, and analysis
files. Compiled SQL files are written to the target/ directory.
deps:
args: deps
description: Pull the most recent version of the dependencies listed in packages.yml
run:
args: run
description: Compile SQL and execute against the current target database.
seed:
args: seed
description: Load data from csv files into your data warehouse.
snapshot:
args: snapshot
description: Execute snapshots defined in your project.
test:
args: test
description: Runs tests on data in deployed models.
requires:
files:
- name: files-dbt-postgres
variant: meltano
- name: dbt-redshift
namespace: dbt_redshift
label: dbt Redshift
variant: dbt-labs
docs: https://docs.meltano.com/guide/transformation
repo: https://github.com/dbt-labs/dbt-redshift
pip_url: dbt-core~=1.0.0 dbt-redshift~=1.0.0
executable: dbt
settings:
- name: project_dir
value: $MELTANO_PROJECT_ROOT/transform
label: Project Directory
- name: profiles_dir
env: DBT_PROFILES_DIR
# Redshift connection settings are set via `config:` blocks and mapped to `profiles.yml`
value: $MELTANO_PROJECT_ROOT/transform/profiles/redshift
label: Profiles Directory
- name: auth_method
kind: string
label: Authentication Method
description: |
The auth method to use (to use iam, set to "iam".
Omit to use password-based auth.)
- name: cluster_id
kind: string
label: Cluster ID
description: |
The cluster id.
- name: host
kind: string
label: Host
description: |
The host for the cluster.
- name: password
kind: password
label: Password
description: |
The password, if using password-based auth.
- name: user
kind: string
label: User
description: |
The user to connect as.
- name: port
kind: integer
label: Port
description: |
The port to connect to.
- name: dbname
aliases: [database]
kind: string
label: Database
description: |
The name of the db to connect to.
- name: schema
kind: string
label: Schema
description: |
The schema to use.
- name: iam_profile
kind: string
label: IAM Profile
description: |
The IAM profile to use.
- name: iam_duration_seconds
kind: integer
label: IAM Duration Seconds
description: |
Duration of the IAM session.
- name: autocreate
kind: boolean
label: Autocreate
description: |
Whether or not to automatically create entities.
- name: db_groups
kind: array
label: Databse Groups
description: |
Database groups to use.
- name: keepalives_idle
kind: integer
label: Keep Alives Idle
description: |
Seconds between TCP keepalive packets
- name: search_path
kind: string
label: Search Path
description: |
The search path to use (use of this setting is not recommended)
- name: sslmode
kind: array
label: SSL Mode
description: |
SSL MOde used to connect to Redshift.
- name: ra3_node
kind: boolean
label: RA3 Node
description: |
Enables cross-database sources.
commands:
clean:
args: clean
description: Delete all folders in the clean-targets list (usually the dbt_modules
and target directories.)
compile:
args: compile
description: Generates executable SQL from source model, test, and analysis
files. Compiled SQL files are written to the target/ directory.
deps:
args: deps
description: Pull the most recent version of the dependencies listed in packages.yml
run:
args: run
description: Compile SQL and execute against the current target database.
seed:
args: seed
description: Load data from csv files into your data warehouse.
snapshot:
args: snapshot
description: Execute snapshots defined in your project.
test:
args: test
description: Runs tests on data in deployed models.
requires:
files:
- name: files-dbt-redshift
variant: meltano
- name: dbt-bigquery
namespace: dbt_bigquery
label: dbt BigQuery
variant: dbt-labs
docs: https://docs.meltano.com/guide/transformation
repo: https://github.com/dbt-labs/dbt-bigquery
pip_url: dbt-core~=1.0.0 dbt-bigquery~=1.0.0
executable: dbt
settings:
- name: project_dir
value: $MELTANO_PROJECT_ROOT/transform
label: Project Directory
- name: profiles_dir
env: DBT_PROFILES_DIR
# BigQuery connection settings are set via `config:` blocks and mapped to `profiles.yml`
value: $MELTANO_PROJECT_ROOT/transform/profiles/bigquery
label: Profiles Directory
- name: auth_method
kind: string
label: Authentication Method
description: |
The auth method to use. One of: "oauth", "oauth-secrets", or "service-account"
- name: project
aliases: [database]
kind: string
label: Project
description: |
The BigQuery project ID.
- name: dataset
aliases: [schema]
kind: string
description: |
The dataset to use.
- name: refresh_token
kind: password
label: Refresh Token
description: |
The refresh token, if authenticating via oauth-secrets method.
- name: client_id
aliases: [user]
kind: string
label: Client ID
description: |
The client id to use, if authenticating via oauth-secrets method.
- name: client_secret
aliases: [password]
kind: password
label: Client Secret
description: |
The client secret to use, if authenticating via oauth-secrets method.
- name: token_uri
kind: string
label: Token URI
description: |
The token redirect URI
- name: keyfile
kind: string
description: |
The path to the `keyfile.json`` to use, if authenticating via service-account method.
commands:
clean:
args: clean
description: Delete all folders in the clean-targets list (usually the dbt_modules
and target directories.)
compile:
args: compile
description: Generates executable SQL from source model, test, and analysis
files. Compiled SQL files are written to the target/ directory.
deps:
args: deps
description: Pull the most recent version of the dependencies listed in packages.yml
run:
args: run
description: Compile SQL and execute against the current target database.
seed:
args: seed
description: Load data from csv files into your data warehouse.
snapshot:
args: snapshot
description: Execute snapshots defined in your project.
test:
args: test
description: Runs tests on data in deployed models.
requires:
files:
- name: files-dbt-bigquery
variant: meltano
files:
- name: airflow
namespace: airflow
update:
orchestrate/dags/meltano.py: true
variant: meltano
repo: https://gitlab.com/meltano/files-airflow
pip_url: git+https://gitlab.com/meltano/files-airflow.git
- name: dbt
namespace: dbt
variant: meltano
repo: https://gitlab.com/meltano/files-dbt
pip_url: git+https://gitlab.com/meltano/files-dbt.git@3120-deprecate-env-aliases-config-v2
- name: docker
namespace: docker
variant: meltano
repo: https://gitlab.com/meltano/files-docker
pip_url: git+https://gitlab.com/meltano/files-docker.git
- name: docker-compose
namespace: docker_compose
variant: meltano
repo: https://gitlab.com/meltano/files-docker-compose
pip_url: git+https://gitlab.com/meltano/files-docker-compose.git
- name: gitlab-ci
namespace: gitlab_ci
variant: meltano
repo: https://gitlab.com/meltano/files-gitlab-ci
pip_url: git+https://gitlab.com/meltano/files-gitlab-ci.git
- name: great_expectations
namespace: great_expectations
variant: meltano
repo: https://gitlab.com/meltano/files-great-expectations
pip_url: git+https://gitlab.com/meltano/files-great-expectations
- name: dbt-snowflake
namespace: files_dbt_snowflake
variant: meltano
repo: https://gitlab.com/meltano/files-dbt-snowflake
pip_url: git+https://gitlab.com/meltano/files-dbt-snowflake
- name: dbt-postgres
namespace: files_dbt_postgres
variant: meltano
repo: https://gitlab.com/meltano/files-dbt-postgres
pip_url: git+https://gitlab.com/meltano/files-dbt-postgres
- name: dbt-redshift
namespace: files_dbt_redshift
variant: meltano
repo: https://gitlab.com/meltano/files-dbt-redshift
pip_url: git+https://gitlab.com/meltano/files-dbt-redshift
- name: dbt-bigquery
namespace: files_dbt_bigquery
variant: meltano
repo: https://gitlab.com/meltano/files-dbt-bigquery
pip_url: git+https://gitlab.com/meltano/files-dbt-bigquery
utilities:
- name: sqlfluff
namespace: sqlfluff
pip_url: sqlfluff[dbt]
commands:
lint:
args: lint
description: Lint SQL in transform models
- name: great_expectations
namespace: great_expectations
pip_url: great_expectations
executable: great_expectations
settings:
- name: ge_home
env: GE_HOME
value: $MELTANO_PROJECT_ROOT/utilities/great_expectations
- name: superset
namespace: superset
label: Superset
variant: apache
docs: https://docs.meltano.com/guide/analysis
repo: https://github.com/apache/superset
pip_url: apache-superset==1.5.0 markupsafe==2.0.1
description: A modern, enterprise-ready business intelligence web application.
logo_url: /assets/logos/utilities/superset.png
settings:
- name: ui.bind_host
value: 0.0.0.0
- name: ui.port
value: 8088
- name: ui.timeout
value: 60
- name: ui.workers
value: 4
- name: SQLALCHEMY_DATABASE_URI
value: sqlite:///$MELTANO_PROJECT_ROOT/.meltano/utilities/superset/superset.db
- name: SECRET_KEY
kind: password
value: thisisnotapropersecretkey
commands:
ui:
args: --bind $SUPERSET_UI_BIND_HOST:$SUPERSET_UI_PORT --timeout $SUPERSET_UI_TIMEOUT
--workers $SUPERSET_UI_WORKERS superset.app:create_app()
description: Start the Superset UI. Will be available on the configured `ui.bind_host`
and `ui.port`, which default to `http://localhost:4000`
executable: gunicorn
create-admin:
args: fab create-admin
description: Create an admin user.
load_examples:
args: load_examples
description: Load examples.
mappers:
- name: transform-field
namespace: transform_field
variant: transferwise
repo: https://github.com/transferwise/pipelinewise-transform-field
pip_url: pipelinewise-transform-field
executable: transform-field
- name: meltano-map-transformer
namespace: meltano_map_transformer
variant: meltano
repo: https://github.com/MeltanoLabs/meltano-map-transform
pip_url: git+https://github.com/MeltanoLabs/meltano-map-transform.git
executable: meltano-map-transform
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T203555--tap-okta--target-jsonl | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T203555--tap-okta--target-jsonl/5cbbad48-8a14-4e50-ad46-ba91469c20d4/elt.log | 2023-12-14T20:35:55.548396Z [info ] Running extract & load... name=meltano run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl
2023-12-14T20:35:55.640047Z [warning ] No state was found, complete import.
2023-12-14T20:35:56.318379Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Added 'applicationusers' as child stream to 'applications' cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.318646Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Added 'applicationgroups' as child stream to 'applications' cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.318723Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Added 'groupmembers' as child stream to 'groups' cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.318778Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Added 'factors' as child stream to 'users' cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.318853Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Added 'signonpolicyrules' as child stream to 'signonpolicies' cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.318902Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Added 'passwordpolicyrules' as child stream to 'passwordpolicies' cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.318979Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Added 'userroles' as child stream to 'users' cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319024Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'adminemailnotificationsettings'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319065Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'applicationgroups'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319107Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'applications'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319145Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'applicationusers'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319183Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'customroles'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319221Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'factors'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319262Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'features'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319319Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'groupmembers'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319360Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'groups'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319397Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'logstream'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319432Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'networkzones'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319468Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'orgprivacyoktasupport'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319503Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'passwordpolicies'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319537Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'passwordpolicyrules'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319573Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'securitynotificationsettings'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319610Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'signonpolicies'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319648Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'signonpolicyrules'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319684Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'thirdpartyadminsettings'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319721Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'threatsconfiguration'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319758Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Skipping deselected stream 'userroles'. cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319794Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Beginning full_table sync of 'users'... cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319835Z [info ] 2023-12-14 12:35:56,318 | INFO | tap-okta | Tap has custom mapper. Using 1 provided map(s). cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.319964Z [info ] 2023-12-14 12:35:56,318 | INFO | singer_sdk.metrics | METRIC: {"type": "counter", "metric": "http_request_count", "value": 0, "tags": {"stream": "users", "endpoint": "/api/v1/users"}} cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320091Z [info ] 2023-12-14 12:35:56,319 | INFO | singer_sdk.metrics | METRIC: {"type": "timer", "metric": "sync_duration", "value": 0.0002391338348388672, "tags": {"stream": "users", "context": {}, "status": "failed"}} cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320146Z [info ] 2023-12-14 12:35:56,319 | INFO | singer_sdk.metrics | METRIC: {"type": "counter", "metric": "record_count", "value": 0, "tags": {"stream": "users", "context": {}}} cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320197Z [info ] Traceback (most recent call last): cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320241Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/models.py", line 434, in prepare_url cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320289Z [info ] scheme, auth, host, port, path, query, fragment = parse_url(url) cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320325Z [info ] ^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320360Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/urllib3/util/url.py", line 397, in parse_url cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320398Z [info ] return six.raise_from(LocationParseError(source_url), None) cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320434Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320466Z [info ] File "<string>", line 3, in raise_from cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320504Z [info ] urllib3.exceptions.LocationParseError: Failed to parse: https://“https://mravaloie-admin.oktapreview.com”/api/v1/users cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320553Z [info ] cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320602Z [info ] During handling of the above exception, another exception occurred: cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320641Z [info ] cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320800Z [info ] Traceback (most recent call last): cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320838Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/tap-okta", line 8, in <module> cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.320922Z [info ] sys.exit(Tapokta.cli()) cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321004Z [info ] ^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321102Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1157, in __call__ cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321153Z [info ] return self.main(*args, **kwargs) cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321198Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321238Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1078, in main cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321279Z [info ] rv = self.invoke(ctx) cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321319Z [info ] ^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321358Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1434, in invoke cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321467Z [info ] return ctx.invoke(self.callback, **ctx.params) cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321516Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321561Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 783, in invoke cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321606Z [info ] return __callback(*args, **kwargs) cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321643Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321678Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/tap_base.py", line 533, in cli cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321782Z [info ] tap.sync_all() cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321863Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/tap_base.py", line 423, in sync_all cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321918Z [info ] stream.sync() cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.321983Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/core.py", line 1194, in sync cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.322130Z [info ] for _ in self._sync_records(context=context): cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.322175Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/core.py", line 1093, in _sync_records cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.322284Z [info ] for record_result in self.get_records(current_context): cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.322334Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/src/tap_okta/okta.py", line 179, in get_records cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.322647Z [info ] for record in self.request_records(context): cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.322691Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 374, in request_records cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.322790Z [info ] prepared_request = self.prepare_request( cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.322837Z [info ] ^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.322874Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/src/tap_okta/okta.py", line 87, in prepare_request cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.322952Z [info ] return super().prepare_request(context, next_page_token) cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.322993Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.323028Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 348, in prepare_request cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.323075Z [info ] return self.build_prepared_request( cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.323113Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.323148Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 320, in build_prepared_request cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.323216Z [info ] return self.requests_session.prepare_request(request) cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.323260Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.323314Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/sessions.py", line 484, in prepare_request cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.323506Z [info ] p.prepare( cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.325901Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/models.py", line 368, in prepare cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.326048Z [info ] self.prepare_url(url, params) cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.326132Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/models.py", line 436, in prepare_url cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.326419Z [info ] raise InvalidURL(*e.args) cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.326469Z [info ] requests.exceptions.InvalidURL: Failed to parse: https://“https://mravaloie-admin.oktapreview.com”/api/v1/users cmd_type=extractor name=tap-okta run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.364341Z [error ] Extraction failed code=1 message=requests.exceptions.InvalidURL: Failed to parse: https://“https://mravaloie-admin.oktapreview.com”/api/v1/users name=meltano run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl
2023-12-14T20:35:56.364652Z [info ] ELT could not be completed: Extractor failed. cmd_type=elt name=meltano run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.364720Z [info ] For more detailed log messages re-run the command using 'meltano --log-level=debug ...' CLI flag. cmd_type=elt name=meltano run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.364776Z [info ] Note that you can also check the generated log file at '/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T203555--tap-okta--target-jsonl/5cbbad48-8a14-4e50-ad46-ba91469c20d4/elt.log'. cmd_type=elt name=meltano run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:56.364825Z [info ] For more information on debugging and logging: https://docs.meltano.com/reference/command-line-interface#debugging cmd_type=elt name=meltano run_id=5cbbad48-8a14-4e50-ad46-ba91469c20d4 state_id=2023-12-14T203555--tap-okta--target-jsonl stdio=stderr
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T203524--tap-okta--target-jsonl | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T203524--tap-okta--target-jsonl/dfd99928-512a-4e0f-941f-d792179c413b/elt.log | 2023-12-14T20:35:25.335365Z [info ] Running extract & load... name=meltano run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl
2023-12-14T20:35:25.429844Z [warning ] No state was found, complete import.
2023-12-14T20:35:26.154173Z [info ] 2023-12-14 12:35:26,153 | INFO | tap-okta | Added 'applicationusers' as child stream to 'applications' cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.154466Z [info ] 2023-12-14 12:35:26,153 | INFO | tap-okta | Added 'applicationgroups' as child stream to 'applications' cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.154545Z [info ] 2023-12-14 12:35:26,153 | INFO | tap-okta | Added 'groupmembers' as child stream to 'groups' cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.154599Z [info ] 2023-12-14 12:35:26,153 | INFO | tap-okta | Added 'factors' as child stream to 'users' cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.154665Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Added 'signonpolicyrules' as child stream to 'signonpolicies' cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.154711Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Added 'passwordpolicyrules' as child stream to 'passwordpolicies' cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.154762Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Added 'userroles' as child stream to 'users' cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.154859Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'adminemailnotificationsettings'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.154908Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'applicationgroups'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.154950Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'applications'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.154988Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'applicationusers'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155025Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'customroles'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155061Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'factors'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155100Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'features'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155151Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'groupmembers'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155188Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'groups'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155230Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'logstream'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155273Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'networkzones'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155312Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'orgprivacyoktasupport'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155346Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'passwordpolicies'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155382Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'passwordpolicyrules'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155418Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'securitynotificationsettings'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155454Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'signonpolicies'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155492Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'signonpolicyrules'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155530Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'thirdpartyadminsettings'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155565Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'threatsconfiguration'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155601Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Skipping deselected stream 'userroles'. cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155636Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Beginning full_table sync of 'users'... cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.155678Z [info ] 2023-12-14 12:35:26,154 | INFO | tap-okta | Tap has custom mapper. Using 1 provided map(s). cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.257859Z [info ] 2023-12-14 12:35:26,257 | INFO | singer_sdk.metrics | METRIC: {"type": "counter", "metric": "http_request_count", "value": 0, "tags": {"stream": "users", "endpoint": "/api/v1/users"}} cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.257988Z [info ] 2023-12-14 12:35:26,257 | INFO | singer_sdk.metrics | METRIC: {"type": "timer", "metric": "sync_duration", "value": 0.10316991806030273, "tags": {"stream": "users", "context": {}, "status": "failed"}} cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.258043Z [info ] 2023-12-14 12:35:26,257 | INFO | singer_sdk.metrics | METRIC: {"type": "counter", "metric": "record_count", "value": 0, "tags": {"stream": "users", "context": {}}} cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.258215Z [info ] Traceback (most recent call last): cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.258347Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/tap-okta", line 8, in <module> cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.258488Z [info ] sys.exit(Tapokta.cli()) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.258551Z [info ] ^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.258596Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1157, in __call__ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.258961Z [info ] return self.main(*args, **kwargs) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.259033Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.259071Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1078, in main cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.259197Z [info ] rv = self.invoke(ctx) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.259264Z [info ] ^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.259301Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1434, in invoke cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.259376Z [info ] return ctx.invoke(self.callback, **ctx.params) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.259668Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.259740Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 783, in invoke cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.259852Z [info ] return __callback(*args, **kwargs) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.259950Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.260001Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/tap_base.py", line 533, in cli cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.260171Z [info ] tap.sync_all() cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.260219Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/tap_base.py", line 423, in sync_all cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.260312Z [info ] stream.sync() cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.260395Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/core.py", line 1194, in sync cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.260603Z [info ] for _ in self._sync_records(context=context): cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.260649Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/core.py", line 1093, in _sync_records cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.260829Z [info ] for record_result in self.get_records(current_context): cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.260868Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/src/tap_okta/okta.py", line 179, in get_records cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.261134Z [info ] for record in self.request_records(context): cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.261195Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 378, in request_records cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.261303Z [info ] resp = decorated_request(prepared_request, context) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.261400Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.261450Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/backoff/_sync.py", line 105, in retry cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.261537Z [info ] ret = target(*args, **kwargs) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.261597Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.261639Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 265, in _request cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.261714Z [info ] response = self.requests_session.send(prepared_request, timeout=self.timeout) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.261758Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.261795Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/sessions.py", line 701, in send cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.262082Z [info ] r = adapter.send(request, **kwargs) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.262147Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.262188Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/adapters.py", line 487, in send cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.262266Z [info ] resp = conn.urlopen( cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.262314Z [info ] ^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.262376Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/urllib3/connectionpool.py", line 715, in urlopen cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.262635Z [info ] httplib_response = self._make_request( cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.263426Z [info ] ^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.263480Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/urllib3/connectionpool.py", line 416, in _make_request cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.263567Z [info ] conn.request(method, url, **httplib_request_kw) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.263722Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/urllib3/connection.py", line 244, in request cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.263766Z [info ] super(HTTPConnection, self).request(method, url, body=body, headers=headers) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.263806Z [info ] File "/Users/nchebolu/.asdf/installs/python/3.11.1/lib/python3.11/http/client.py", line 1282, in request cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.263900Z [info ] self._send_request(method, url, body, headers, encode_chunked) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.263951Z [info ] File "/Users/nchebolu/.asdf/installs/python/3.11.1/lib/python3.11/http/client.py", line 1323, in _send_request cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.264000Z [info ] self.putheader(hdr, value) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.264041Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/urllib3/connection.py", line 224, in putheader cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.264109Z [info ] _HTTPConnection.putheader(self, header, *values) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.264147Z [info ] File "/Users/nchebolu/.asdf/installs/python/3.11.1/lib/python3.11/http/client.py", line 1255, in putheader cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.264219Z [info ] values[i] = one_value.encode('latin-1') cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.264264Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.264300Z [info ] UnicodeEncodeError: 'latin-1' codec can't encode character '\u201c' in position 5: ordinal not in range(256) cmd_type=extractor name=tap-okta run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.309933Z [error ] Extraction failed code=1 message=UnicodeEncodeError: 'latin-1' codec can't encode character '\u201c' in position 5: ordinal not in range(256) name=meltano run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl
2023-12-14T20:35:26.310190Z [info ] ELT could not be completed: Extractor failed. cmd_type=elt name=meltano run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.310247Z [info ] For more detailed log messages re-run the command using 'meltano --log-level=debug ...' CLI flag. cmd_type=elt name=meltano run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.310296Z [info ] Note that you can also check the generated log file at '/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T203524--tap-okta--target-jsonl/dfd99928-512a-4e0f-941f-d792179c413b/elt.log'. cmd_type=elt name=meltano run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:26.310339Z [info ] For more information on debugging and logging: https://docs.meltano.com/reference/command-line-interface#debugging cmd_type=elt name=meltano run_id=dfd99928-512a-4e0f-941f-d792179c413b state_id=2023-12-14T203524--tap-okta--target-jsonl stdio=stderr
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T203458--tap-okta--target-jsonl | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T203458--tap-okta--target-jsonl/9c38a32f-67f3-41de-8c00-fdbc34fd40f6/elt.log | 2023-12-14T20:34:59.588925Z [info ] Running extract & load... name=meltano run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl
2023-12-14T20:34:59.684061Z [warning ] No state was found, complete import.
2023-12-14T20:35:00.411057Z [info ] 2023-12-14 12:35:00,410 | INFO | tap-okta | Added 'applicationusers' as child stream to 'applications' cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411346Z [info ] 2023-12-14 12:35:00,410 | INFO | tap-okta | Added 'applicationgroups' as child stream to 'applications' cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411419Z [info ] 2023-12-14 12:35:00,410 | INFO | tap-okta | Added 'groupmembers' as child stream to 'groups' cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411472Z [info ] 2023-12-14 12:35:00,410 | INFO | tap-okta | Added 'factors' as child stream to 'users' cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411547Z [info ] 2023-12-14 12:35:00,410 | INFO | tap-okta | Added 'signonpolicyrules' as child stream to 'signonpolicies' cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411594Z [info ] 2023-12-14 12:35:00,410 | INFO | tap-okta | Added 'passwordpolicyrules' as child stream to 'passwordpolicies' cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411675Z [info ] 2023-12-14 12:35:00,410 | INFO | tap-okta | Added 'userroles' as child stream to 'users' cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411721Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'adminemailnotificationsettings'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411765Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'applicationgroups'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411804Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'applications'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411843Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'applicationusers'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411881Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'customroles'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411920Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'factors'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.411959Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'features'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412012Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'groupmembers'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412051Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'groups'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412092Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'logstream'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412137Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'networkzones'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412173Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'orgprivacyoktasupport'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412213Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'passwordpolicies'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412250Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'passwordpolicyrules'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412285Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'securitynotificationsettings'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412320Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'signonpolicies'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412359Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'signonpolicyrules'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412393Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'thirdpartyadminsettings'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412428Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'threatsconfiguration'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412461Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Skipping deselected stream 'userroles'. cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412496Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Beginning full_table sync of 'users'... cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.412537Z [info ] 2023-12-14 12:35:00,411 | INFO | tap-okta | Tap has custom mapper. Using 1 provided map(s). cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.538551Z [info ] 2023-12-14 12:35:00,538 | INFO | singer_sdk.metrics | METRIC: {"type": "counter", "metric": "http_request_count", "value": 0, "tags": {"stream": "users", "endpoint": "/api/v1/users"}} cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.538699Z [info ] 2023-12-14 12:35:00,538 | INFO | singer_sdk.metrics | METRIC: {"type": "timer", "metric": "sync_duration", "value": 0.12699413299560547, "tags": {"stream": "users", "context": {}, "status": "failed"}} cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.538759Z [info ] 2023-12-14 12:35:00,538 | INFO | singer_sdk.metrics | METRIC: {"type": "counter", "metric": "record_count", "value": 0, "tags": {"stream": "users", "context": {}}} cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.538847Z [info ] Traceback (most recent call last): cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.538892Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/tap-okta", line 8, in <module> cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.538935Z [info ] sys.exit(Tapokta.cli()) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.538974Z [info ] ^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.539016Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1157, in __call__ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.540308Z [info ] return self.main(*args, **kwargs) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.540402Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.540452Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1078, in main cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.540558Z [info ] rv = self.invoke(ctx) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.540642Z [info ] ^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.540692Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1434, in invoke cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.540804Z [info ] return ctx.invoke(self.callback, **ctx.params) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.540987Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.541027Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 783, in invoke cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.541123Z [info ] return __callback(*args, **kwargs) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.541179Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.541218Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/tap_base.py", line 533, in cli cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.541389Z [info ] tap.sync_all() cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.541464Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/tap_base.py", line 423, in sync_all cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.541553Z [info ] stream.sync() cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.541602Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/core.py", line 1194, in sync cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.541925Z [info ] for _ in self._sync_records(context=context): cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.542011Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/core.py", line 1093, in _sync_records cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.542104Z [info ] for record_result in self.get_records(current_context): cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.542157Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/src/tap_okta/okta.py", line 179, in get_records cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.542410Z [info ] for record in self.request_records(context): cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.542467Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 378, in request_records cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.542599Z [info ] resp = decorated_request(prepared_request, context) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.542676Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.542724Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/backoff/_sync.py", line 105, in retry cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.542891Z [info ] ret = target(*args, **kwargs) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.542960Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.543006Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 265, in _request cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.543089Z [info ] response = self.requests_session.send(prepared_request, timeout=self.timeout) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.543134Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.543177Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/sessions.py", line 701, in send cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.543571Z [info ] r = adapter.send(request, **kwargs) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.543646Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.543696Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/adapters.py", line 487, in send cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.544719Z [info ] resp = conn.urlopen( cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.544761Z [info ] ^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.544803Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/urllib3/connectionpool.py", line 715, in urlopen cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545031Z [info ] httplib_response = self._make_request( cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545071Z [info ] ^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545107Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/urllib3/connectionpool.py", line 416, in _make_request cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545144Z [info ] conn.request(method, url, **httplib_request_kw) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545178Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/urllib3/connection.py", line 244, in request cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545214Z [info ] super(HTTPConnection, self).request(method, url, body=body, headers=headers) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545248Z [info ] File "/Users/nchebolu/.asdf/installs/python/3.11.1/lib/python3.11/http/client.py", line 1282, in request cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545390Z [info ] self._send_request(method, url, body, headers, encode_chunked) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545457Z [info ] File "/Users/nchebolu/.asdf/installs/python/3.11.1/lib/python3.11/http/client.py", line 1323, in _send_request cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545613Z [info ] self.putheader(hdr, value) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545675Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/urllib3/connection.py", line 224, in putheader cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545756Z [info ] _HTTPConnection.putheader(self, header, *values) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545798Z [info ] File "/Users/nchebolu/.asdf/installs/python/3.11.1/lib/python3.11/http/client.py", line 1255, in putheader cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.545980Z [info ] values[i] = one_value.encode('latin-1') cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.546055Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.546098Z [info ] UnicodeEncodeError: 'latin-1' codec can't encode character '\u201c' in position 5: ordinal not in range(256) cmd_type=extractor name=tap-okta run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.589785Z [error ] Extraction failed code=1 message=UnicodeEncodeError: 'latin-1' codec can't encode character '\u201c' in position 5: ordinal not in range(256) name=meltano run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl
2023-12-14T20:35:00.590037Z [info ] ELT could not be completed: Extractor failed. cmd_type=elt name=meltano run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.590097Z [info ] For more detailed log messages re-run the command using 'meltano --log-level=debug ...' CLI flag. cmd_type=elt name=meltano run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.590147Z [info ] Note that you can also check the generated log file at '/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T203458--tap-okta--target-jsonl/9c38a32f-67f3-41de-8c00-fdbc34fd40f6/elt.log'. cmd_type=elt name=meltano run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:35:00.590194Z [info ] For more information on debugging and logging: https://docs.meltano.com/reference/command-line-interface#debugging cmd_type=elt name=meltano run_id=9c38a32f-67f3-41de-8c00-fdbc34fd40f6 state_id=2023-12-14T203458--tap-okta--target-jsonl stdio=stderr
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T203655--tap-okta--target-jsonl | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T203655--tap-okta--target-jsonl/84f8d218-4561-4035-b016-43b1c47fb583/elt.log | 2023-12-14T20:36:56.373694Z [info ] Running extract & load... name=meltano run_id=84f8d218-4561-4035-b016-43b1c47fb583 state_id=2023-12-14T203655--tap-okta--target-jsonl
2023-12-14T20:36:56.469331Z [warning ] No state was found, complete import.
2023-12-14T20:36:56.860920Z [info ] ELT could not be completed: Cannot start extractor: Catalog discovery failed: command ['/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/tap-okta', '--config', '/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/run/elt/2023-12-14T203655--tap-okta--target-jsonl/84f8d218-4561-4035-b016-43b1c47fb583/tap.846d331a-e821-4703-a861-e434e4d9e389.config.json', '--discover'] returned 1 with stderr: cmd_type=elt name=meltano run_id=84f8d218-4561-4035-b016-43b1c47fb583 state_id=2023-12-14T203655--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:36:56.861126Z [info ] Traceback (most recent call last): cmd_type=elt name=meltano run_id=84f8d218-4561-4035-b016-43b1c47fb583 state_id=2023-12-14T203655--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:36:56.861202Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/tap-okta", line 5, in <module> cmd_type=elt name=meltano run_id=84f8d218-4561-4035-b016-43b1c47fb583 state_id=2023-12-14T203655--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:36:56.861258Z [info ] from tap_okta.tap import Tapokta cmd_type=elt name=meltano run_id=84f8d218-4561-4035-b016-43b1c47fb583 state_id=2023-12-14T203655--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:36:56.861306Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/src/tap_okta/tap.py", line 9, in <module> cmd_type=elt name=meltano run_id=84f8d218-4561-4035-b016-43b1c47fb583 state_id=2023-12-14T203655--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:36:56.861357Z [info ] from tap_okta.streams import ( cmd_type=elt name=meltano run_id=84f8d218-4561-4035-b016-43b1c47fb583 state_id=2023-12-14T203655--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:36:56.861406Z [info ] ImportError: cannot import name 'AuthenticatorsStream' from 'tap_okta.streams' (/Users/nchebolu/work/raptor/taps/tap-okta/src/tap_okta/streams.py) cmd_type=elt name=meltano run_id=84f8d218-4561-4035-b016-43b1c47fb583 state_id=2023-12-14T203655--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:36:56.861456Z [info ] . cmd_type=elt name=meltano run_id=84f8d218-4561-4035-b016-43b1c47fb583 state_id=2023-12-14T203655--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:36:56.861499Z [info ] For more detailed log messages re-run the command using 'meltano --log-level=debug ...' CLI flag. cmd_type=elt name=meltano run_id=84f8d218-4561-4035-b016-43b1c47fb583 state_id=2023-12-14T203655--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:36:56.861540Z [info ] Note that you can also check the generated log file at '/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T203655--tap-okta--target-jsonl/84f8d218-4561-4035-b016-43b1c47fb583/elt.log'. cmd_type=elt name=meltano run_id=84f8d218-4561-4035-b016-43b1c47fb583 state_id=2023-12-14T203655--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:36:56.861580Z [info ] For more information on debugging and logging: https://docs.meltano.com/reference/command-line-interface#debugging cmd_type=elt name=meltano run_id=84f8d218-4561-4035-b016-43b1c47fb583 state_id=2023-12-14T203655--tap-okta--target-jsonl stdio=stderr
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T202704--tap-okta--target-jsonl | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T202704--tap-okta--target-jsonl/8bb68179-1c6a-4582-83bd-70aa2cb23805/elt.log | 2023-12-14T20:27:04.729268Z [info ] Running extract & load... name=meltano run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl
2023-12-14T20:27:04.825576Z [warning ] No state was found, complete import.
2023-12-14T20:27:05.578971Z [info ] 2023-12-14 12:27:05,578 | INFO | tap-okta | Added 'applicationusers' as child stream to 'applications' cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.579281Z [info ] 2023-12-14 12:27:05,578 | INFO | tap-okta | Added 'applicationgroups' as child stream to 'applications' cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.579360Z [info ] 2023-12-14 12:27:05,578 | INFO | tap-okta | Added 'groupmembers' as child stream to 'groups' cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.579416Z [info ] 2023-12-14 12:27:05,578 | INFO | tap-okta | Added 'factors' as child stream to 'users' cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.579469Z [info ] 2023-12-14 12:27:05,578 | INFO | tap-okta | Added 'signonpolicyrules' as child stream to 'signonpolicies' cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.579533Z [info ] 2023-12-14 12:27:05,578 | INFO | tap-okta | Added 'passwordpolicyrules' as child stream to 'passwordpolicies' cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.579587Z [info ] 2023-12-14 12:27:05,578 | INFO | tap-okta | Added 'userroles' as child stream to 'users' cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.579718Z [info ] 2023-12-14 12:27:05,578 | INFO | tap-okta | Skipping deselected stream 'adminemailnotificationsettings'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.579799Z [info ] 2023-12-14 12:27:05,578 | INFO | tap-okta | Skipping deselected stream 'applicationgroups'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.579856Z [info ] 2023-12-14 12:27:05,578 | INFO | tap-okta | Skipping deselected stream 'applications'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.579907Z [info ] 2023-12-14 12:27:05,578 | INFO | tap-okta | Skipping deselected stream 'applicationusers'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.579951Z [info ] 2023-12-14 12:27:05,578 | INFO | tap-okta | Skipping deselected stream 'customroles'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.579994Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'factors'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580035Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'features'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580092Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'groupmembers'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580132Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'groups'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580174Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'logstream'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580219Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'networkzones'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580280Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'orgprivacyoktasupport'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580314Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'passwordpolicies'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580350Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'passwordpolicyrules'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580384Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'securitynotificationsettings'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580422Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'signonpolicies'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580457Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'signonpolicyrules'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580498Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'thirdpartyadminsettings'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580532Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'threatsconfiguration'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580574Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Skipping deselected stream 'userroles'. cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580611Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Beginning full_table sync of 'users'... cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580660Z [info ] 2023-12-14 12:27:05,579 | INFO | tap-okta | Tap has custom mapper. Using 1 provided map(s). cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580840Z [info ] 2023-12-14 12:27:05,579 | INFO | singer_sdk.metrics | METRIC: {"type": "counter", "metric": "http_request_count", "value": 0, "tags": {"stream": "users", "endpoint": "/api/v1/users"}} cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580895Z [info ] 2023-12-14 12:27:05,579 | INFO | singer_sdk.metrics | METRIC: {"type": "timer", "metric": "sync_duration", "value": 0.0006389617919921875, "tags": {"stream": "users", "context": {}, "status": "failed"}} cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580943Z [info ] 2023-12-14 12:27:05,580 | INFO | singer_sdk.metrics | METRIC: {"type": "counter", "metric": "record_count", "value": 0, "tags": {"stream": "users", "context": {}}} cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.580985Z [info ] Traceback (most recent call last): cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581025Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/models.py", line 434, in prepare_url cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581063Z [info ] scheme, auth, host, port, path, query, fragment = parse_url(url) cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581100Z [info ] ^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581134Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/urllib3/util/url.py", line 397, in parse_url cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581181Z [info ] return six.raise_from(LocationParseError(source_url), None) cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581217Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581251Z [info ] File "<string>", line 3, in raise_from cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581289Z [info ] urllib3.exceptions.LocationParseError: Failed to parse: https://“https://mravaloie-admin.oktapreview.com”/api/v1/users cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581338Z [info ] cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581380Z [info ] During handling of the above exception, another exception occurred: cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581414Z [info ] cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581598Z [info ] Traceback (most recent call last): cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581636Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/tap-okta", line 8, in <module> cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581727Z [info ] sys.exit(Tapokta.cli()) cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581764Z [info ] ^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581798Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1157, in __call__ cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581834Z [info ] return self.main(*args, **kwargs) cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581868Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.581913Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1078, in main cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582004Z [info ] rv = self.invoke(ctx) cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582084Z [info ] ^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582133Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1434, in invoke cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582223Z [info ] return ctx.invoke(self.callback, **ctx.params) cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582268Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582308Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 783, in invoke cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582379Z [info ] return __callback(*args, **kwargs) cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582437Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582483Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/tap_base.py", line 533, in cli cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582715Z [info ] tap.sync_all() cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582768Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/tap_base.py", line 423, in sync_all cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582847Z [info ] stream.sync() cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.582893Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/core.py", line 1194, in sync cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.583163Z [info ] for _ in self._sync_records(context=context): cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.583210Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/core.py", line 1093, in _sync_records cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.583312Z [info ] for record_result in self.get_records(current_context): cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.583356Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/src/tap_okta/okta.py", line 179, in get_records cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.583578Z [info ] for record in self.request_records(context): cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.583617Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 374, in request_records cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.583725Z [info ] prepared_request = self.prepare_request( cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.585666Z [info ] ^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.585742Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/src/tap_okta/okta.py", line 87, in prepare_request cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.585862Z [info ] return super().prepare_request(context, next_page_token) cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586300Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586345Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 348, in prepare_request cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586383Z [info ] return self.build_prepared_request( cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586420Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586455Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 320, in build_prepared_request cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586491Z [info ] return self.requests_session.prepare_request(request) cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586525Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586560Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/sessions.py", line 484, in prepare_request cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586596Z [info ] p.prepare( cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586631Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/models.py", line 368, in prepare cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586667Z [info ] self.prepare_url(url, params) cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586702Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/models.py", line 436, in prepare_url cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586885Z [info ] raise InvalidURL(*e.args) cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.586948Z [info ] requests.exceptions.InvalidURL: Failed to parse: https://“https://mravaloie-admin.oktapreview.com”/api/v1/users cmd_type=extractor name=tap-okta run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.632667Z [error ] Extraction failed code=1 message=requests.exceptions.InvalidURL: Failed to parse: https://“https://mravaloie-admin.oktapreview.com”/api/v1/users name=meltano run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl
2023-12-14T20:27:05.632939Z [info ] ELT could not be completed: Extractor failed. cmd_type=elt name=meltano run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.633002Z [info ] For more detailed log messages re-run the command using 'meltano --log-level=debug ...' CLI flag. cmd_type=elt name=meltano run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.633047Z [info ] Note that you can also check the generated log file at '/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T202704--tap-okta--target-jsonl/8bb68179-1c6a-4582-83bd-70aa2cb23805/elt.log'. cmd_type=elt name=meltano run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:27:05.633088Z [info ] For more information on debugging and logging: https://docs.meltano.com/reference/command-line-interface#debugging cmd_type=elt name=meltano run_id=8bb68179-1c6a-4582-83bd-70aa2cb23805 state_id=2023-12-14T202704--tap-okta--target-jsonl stdio=stderr
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T202312--tap-okta--target-jsonl | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T202312--tap-okta--target-jsonl/68edba20-1414-453c-a925-72b7f1d0d175/elt.log | 2023-12-14T20:23:12.709765Z [info ] Running extract & load... name=meltano run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl
2023-12-14T20:23:12.805789Z [warning ] No state was found, complete import.
2023-12-14T20:23:14.307997Z [info ] 2023-12-14 12:23:14,307 | INFO | tap-okta | Added 'applicationusers' as child stream to 'applications' cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.308316Z [info ] 2023-12-14 12:23:14,307 | INFO | tap-okta | Added 'applicationgroups' as child stream to 'applications' cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.308394Z [info ] 2023-12-14 12:23:14,307 | INFO | tap-okta | Added 'groupmembers' as child stream to 'groups' cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.308448Z [info ] 2023-12-14 12:23:14,307 | INFO | tap-okta | Added 'factors' as child stream to 'users' cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.308516Z [info ] 2023-12-14 12:23:14,307 | INFO | tap-okta | Added 'signonpolicyrules' as child stream to 'signonpolicies' cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.308567Z [info ] 2023-12-14 12:23:14,307 | INFO | tap-okta | Added 'passwordpolicyrules' as child stream to 'passwordpolicies' cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.308618Z [info ] 2023-12-14 12:23:14,307 | INFO | tap-okta | Added 'userroles' as child stream to 'users' cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.308736Z [info ] 2023-12-14 12:23:14,307 | INFO | tap-okta | Skipping deselected stream 'adminemailnotificationsettings'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.308851Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'applicationgroups'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.308905Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'applications'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.308947Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'applicationusers'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.308988Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'customroles'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309024Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'factors'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309064Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'features'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309114Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'groupmembers'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309150Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'groups'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309363Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'logstream'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309404Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'networkzones'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309440Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'orgauthenticators'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309476Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'orgprivacyoktasupport'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309509Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'passwordpolicies'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309541Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Skipping deselected stream 'passwordpolicyrules'. cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309574Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Beginning full_table sync of 'securitynotificationsettings'... cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309609Z [info ] 2023-12-14 12:23:14,308 | INFO | tap-okta | Tap has custom mapper. Using 1 provided map(s). cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309641Z [info ] 2023-12-14 12:23:14,308 | INFO | singer_sdk.metrics | METRIC: {"type": "counter", "metric": "http_request_count", "value": 0, "tags": {"stream": "securitynotificationsettings", "endpoint": "/api/internal/org/settings/security-notification-settings"}} cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309686Z [info ] 2023-12-14 12:23:14,308 | INFO | singer_sdk.metrics | METRIC: {"type": "timer", "metric": "sync_duration", "value": 0.0002970695495605469, "tags": {"stream": "securitynotificationsettings", "context": {}, "status": "failed"}} cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309796Z [info ] 2023-12-14 12:23:14,308 | INFO | singer_sdk.metrics | METRIC: {"type": "counter", "metric": "record_count", "value": 0, "tags": {"stream": "securitynotificationsettings", "context": {}}} cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.309895Z [info ] Traceback (most recent call last): cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310000Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/models.py", line 434, in prepare_url cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310057Z [info ] scheme, auth, host, port, path, query, fragment = parse_url(url) cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310103Z [info ] ^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310145Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/urllib3/util/url.py", line 397, in parse_url cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310185Z [info ] return six.raise_from(LocationParseError(source_url), None) cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310225Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310260Z [info ] File "<string>", line 3, in raise_from cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310295Z [info ] urllib3.exceptions.LocationParseError: Failed to parse: https://“https://mravaloie-admin.oktapreview.com”/api/internal/org/settings/security-notification-settings cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310346Z [info ] cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310389Z [info ] During handling of the above exception, another exception occurred: cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310424Z [info ] cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310459Z [info ] Traceback (most recent call last): cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310496Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/tap-okta", line 8, in <module> cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310534Z [info ] sys.exit(Tapokta.cli()) cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310579Z [info ] ^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310615Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1157, in __call__ cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310655Z [info ] return self.main(*args, **kwargs) cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310694Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310733Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1078, in main cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310773Z [info ] rv = self.invoke(ctx) cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310810Z [info ] ^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310844Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 1434, in invoke cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310893Z [info ] return ctx.invoke(self.callback, **ctx.params) cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310933Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.310969Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/click/core.py", line 783, in invoke cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311036Z [info ] return __callback(*args, **kwargs) cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311073Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311110Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/tap_base.py", line 533, in cli cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311144Z [info ] tap.sync_all() cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311180Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/tap_base.py", line 423, in sync_all cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311222Z [info ] stream.sync() cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311260Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/core.py", line 1194, in sync cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311297Z [info ] for _ in self._sync_records(context=context): cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311334Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/core.py", line 1093, in _sync_records cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311369Z [info ] for record_result in self.get_records(current_context): cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311403Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/src/tap_okta/okta.py", line 179, in get_records cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311442Z [info ] for record in self.request_records(context): cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311476Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 374, in request_records cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311512Z [info ] prepared_request = self.prepare_request( cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311547Z [info ] ^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311581Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/src/tap_okta/okta.py", line 87, in prepare_request cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311655Z [info ] return super().prepare_request(context, next_page_token) cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311691Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311725Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 348, in prepare_request cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311762Z [info ] return self.build_prepared_request( cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311807Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311842Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/singer_sdk/streams/rest.py", line 320, in build_prepared_request cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311878Z [info ] return self.requests_session.prepare_request(request) cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311912Z [info ] ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311946Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/sessions.py", line 484, in prepare_request cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.311979Z [info ] p.prepare( cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.312013Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/models.py", line 368, in prepare cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.312047Z [info ] self.prepare_url(url, params) cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.312083Z [info ] File "/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests/models.py", line 436, in prepare_url cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.312170Z [info ] raise InvalidURL(*e.args) cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.312263Z [info ] requests.exceptions.InvalidURL: Failed to parse: https://“https://mravaloie-admin.oktapreview.com”/api/internal/org/settings/security-notification-settings cmd_type=extractor name=tap-okta run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.497679Z [error ] Extraction failed code=1 message=requests.exceptions.InvalidURL: Failed to parse: https://“https://mravaloie-admin.oktapreview.com”/api/internal/org/settings/security-notification-settings name=meltano run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl
2023-12-14T20:23:14.497986Z [info ] ELT could not be completed: Extractor failed. cmd_type=elt name=meltano run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.498055Z [info ] For more detailed log messages re-run the command using 'meltano --log-level=debug ...' CLI flag. cmd_type=elt name=meltano run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.498108Z [info ] Note that you can also check the generated log file at '/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/logs/elt/2023-12-14T202312--tap-okta--target-jsonl/68edba20-1414-453c-a925-72b7f1d0d175/elt.log'. cmd_type=elt name=meltano run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
2023-12-14T20:23:14.498156Z [info ] For more information on debugging and logging: https://docs.meltano.com/reference/command-line-interface#debugging cmd_type=elt name=meltano run_id=68edba20-1414-453c-a925-72b7f1d0d175 state_id=2023-12-14T202312--tap-okta--target-jsonl stdio=stderr
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/run/bin | #!/Users/nchebolu/work/raptor/.venv/bin/python3
# -*- coding: utf-8 -*-
import re
import sys
from meltano.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/run/elt/2023-12-14T203555--tap-okta--target-jsonl | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/run/elt/2023-12-14T203555--tap-okta--target-jsonl/5cbbad48-8a14-4e50-ad46-ba91469c20d4/tap.properties.json | {
"streams": [
{
"tap_stream_id": "adminemailnotificationsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "adminemailnotificationsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "applicationgroups",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applicationgroups",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "applications",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applications",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "applicationusers",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applicationusers",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "customroles",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "customroles",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "factors",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "factors",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "features",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "features",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "groupmembers",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "groupmembers",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "groups",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "groups",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "logstream",
"replication_key": "published",
"replication_method": "INCREMENTAL",
"key_properties": [
"uuid"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "logstream",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"uuid"
],
"valid-replication-keys": [
"published"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "networkzones",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "networkzones",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "orgprivacyoktasupport",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "orgprivacyoktasupport",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "passwordpolicies",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "passwordpolicies",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "passwordpolicyrules",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "passwordpolicyrules",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "securitynotificationsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "securitynotificationsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "signonpolicies",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "signonpolicies",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "signonpolicyrules",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "signonpolicyrules",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "thirdpartyadminsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "thirdpartyadminsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "threatsconfiguration",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "threatsconfiguration",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "userroles",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "userroles",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "users",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "users",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": true,
"table-key-properties": [
"id"
]
}
}
],
"selected": true
}
]
} | 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/run/elt/2023-12-14T203524--tap-okta--target-jsonl | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/run/elt/2023-12-14T203524--tap-okta--target-jsonl/dfd99928-512a-4e0f-941f-d792179c413b/tap.properties.json | {
"streams": [
{
"tap_stream_id": "adminemailnotificationsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "adminemailnotificationsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "applicationgroups",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applicationgroups",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "applications",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applications",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "applicationusers",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applicationusers",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "customroles",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "customroles",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "factors",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "factors",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "features",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "features",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "groupmembers",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "groupmembers",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "groups",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "groups",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "logstream",
"replication_key": "published",
"replication_method": "INCREMENTAL",
"key_properties": [
"uuid"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "logstream",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"uuid"
],
"valid-replication-keys": [
"published"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "networkzones",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "networkzones",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "orgprivacyoktasupport",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "orgprivacyoktasupport",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "passwordpolicies",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "passwordpolicies",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "passwordpolicyrules",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "passwordpolicyrules",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "securitynotificationsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "securitynotificationsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "signonpolicies",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "signonpolicies",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "signonpolicyrules",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "signonpolicyrules",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "thirdpartyadminsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "thirdpartyadminsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "threatsconfiguration",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "threatsconfiguration",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "userroles",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "userroles",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "users",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "users",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": true,
"table-key-properties": [
"id"
]
}
}
],
"selected": true
}
]
} | 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/run/elt/2023-12-14T203458--tap-okta--target-jsonl | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/run/elt/2023-12-14T203458--tap-okta--target-jsonl/9c38a32f-67f3-41de-8c00-fdbc34fd40f6/tap.properties.json | {
"streams": [
{
"tap_stream_id": "adminemailnotificationsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "adminemailnotificationsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "applicationgroups",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applicationgroups",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "applications",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applications",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "applicationusers",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applicationusers",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "customroles",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "customroles",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "factors",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "factors",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "features",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "features",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "groupmembers",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "groupmembers",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "groups",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "groups",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "logstream",
"replication_key": "published",
"replication_method": "INCREMENTAL",
"key_properties": [
"uuid"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "logstream",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"uuid"
],
"valid-replication-keys": [
"published"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "networkzones",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "networkzones",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "orgprivacyoktasupport",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "orgprivacyoktasupport",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "passwordpolicies",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "passwordpolicies",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "passwordpolicyrules",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "passwordpolicyrules",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "securitynotificationsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "securitynotificationsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "signonpolicies",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "signonpolicies",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "signonpolicyrules",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "signonpolicyrules",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "thirdpartyadminsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "thirdpartyadminsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "threatsconfiguration",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "threatsconfiguration",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "userroles",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "userroles",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "users",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "users",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": true,
"table-key-properties": [
"id"
]
}
}
],
"selected": true
}
]
} | 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/run/elt/2023-12-14T202704--tap-okta--target-jsonl | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/run/elt/2023-12-14T202704--tap-okta--target-jsonl/8bb68179-1c6a-4582-83bd-70aa2cb23805/tap.properties.json | {
"streams": [
{
"tap_stream_id": "adminemailnotificationsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "adminemailnotificationsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "applicationgroups",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applicationgroups",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "applications",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applications",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "applicationusers",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applicationusers",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "customroles",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "customroles",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "factors",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "factors",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "features",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "features",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "groupmembers",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "groupmembers",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "groups",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "groups",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "logstream",
"replication_key": "published",
"replication_method": "INCREMENTAL",
"key_properties": [
"uuid"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "logstream",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"uuid"
],
"valid-replication-keys": [
"published"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "networkzones",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "networkzones",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "orgprivacyoktasupport",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "orgprivacyoktasupport",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "passwordpolicies",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "passwordpolicies",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "passwordpolicyrules",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "passwordpolicyrules",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "securitynotificationsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "securitynotificationsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "signonpolicies",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "signonpolicies",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "signonpolicyrules",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "signonpolicyrules",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "thirdpartyadminsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "thirdpartyadminsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "threatsconfiguration",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "threatsconfiguration",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "userroles",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "userroles",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "users",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "users",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": true,
"table-key-properties": [
"id"
]
}
}
],
"selected": true
}
]
} | 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/run/elt/2023-12-14T202312--tap-okta--target-jsonl | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/run/elt/2023-12-14T202312--tap-okta--target-jsonl/68edba20-1414-453c-a925-72b7f1d0d175/tap.properties.json | {
"streams": [
{
"tap_stream_id": "adminemailnotificationsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "adminemailnotificationsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "applicationgroups",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applicationgroups",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "applications",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applications",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "applicationusers",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "applicationusers",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "customroles",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "customroles",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "factors",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "factors",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "features",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "features",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "groupmembers",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "groupmembers",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "groups",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "groups",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "logstream",
"replication_key": "published",
"replication_method": "INCREMENTAL",
"key_properties": [
"uuid"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "logstream",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"uuid"
],
"valid-replication-keys": [
"published"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "networkzones",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "networkzones",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "orgauthenticators",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "orgauthenticators",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "orgprivacyoktasupport",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "orgprivacyoktasupport",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "passwordpolicies",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "passwordpolicies",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "passwordpolicyrules",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "passwordpolicyrules",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "securitynotificationsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "securitynotificationsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": true,
"table-key-properties": []
}
}
],
"selected": true
},
{
"tap_stream_id": "signonpolicies",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "signonpolicies",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "signonpolicyrules",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "signonpolicyrules",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "thirdpartyadminsettings",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "thirdpartyadminsettings",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "threatsconfiguration",
"replication_method": "FULL_TABLE",
"key_properties": [],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "threatsconfiguration",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": []
}
}
],
"selected": false
},
{
"tap_stream_id": "userroles",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "userroles",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": false,
"table-key-properties": [
"id"
]
}
}
],
"selected": false
},
{
"tap_stream_id": "users",
"replication_method": "FULL_TABLE",
"key_properties": [
"id"
],
"schema": {
"properties": {},
"type": "object",
"additionalProperties": true
},
"stream": "users",
"metadata": [
{
"breadcrumb": [],
"metadata": {
"inclusion": "available",
"selected": true,
"table-key-properties": [
"id"
]
}
}
],
"selected": true
}
]
} | 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/.meltano_plugin_fingerprint | b20bc82186fca3d755c7404d6ad1a9e400d08e76f5fee81d11f76c0b63eb75fc | 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/pyvenv.cfg | home = /Users/nchebolu/.asdf/installs/python/3.11.1/bin
implementation = CPython
version_info = 3.11.1.final.0
virtualenv = 20.25.0
include-system-site-packages = false
base-prefix = /Users/nchebolu/.asdf/installs/python/3.11.1
base-exec-prefix = /Users/nchebolu/.asdf/installs/python/3.11.1
base-executable = /Users/nchebolu/.asdf/installs/python/3.11.1/bin/python3.11
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/wheel3.11 | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/wheel3 | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/activate.ps1 | $script:THIS_PATH = $myinvocation.mycommand.path
$script:BASE_DIR = Split-Path (Resolve-Path "$THIS_PATH/..") -Parent
function global:deactivate([switch] $NonDestructive) {
if (Test-Path variable:_OLD_VIRTUAL_PATH) {
$env:PATH = $variable:_OLD_VIRTUAL_PATH
Remove-Variable "_OLD_VIRTUAL_PATH" -Scope global
}
if (Test-Path function:_old_virtual_prompt) {
$function:prompt = $function:_old_virtual_prompt
Remove-Item function:\_old_virtual_prompt
}
if ($env:VIRTUAL_ENV) {
Remove-Item env:VIRTUAL_ENV -ErrorAction SilentlyContinue
}
if ($env:VIRTUAL_ENV_PROMPT) {
Remove-Item env:VIRTUAL_ENV_PROMPT -ErrorAction SilentlyContinue
}
if (!$NonDestructive) {
# Self destruct!
Remove-Item function:deactivate
Remove-Item function:pydoc
}
}
function global:pydoc {
python -m pydoc $args
}
# unset irrelevant variables
deactivate -nondestructive
$VIRTUAL_ENV = $BASE_DIR
$env:VIRTUAL_ENV = $VIRTUAL_ENV
if ("" -ne "") {
$env:VIRTUAL_ENV_PROMPT = ""
}
else {
$env:VIRTUAL_ENV_PROMPT = $( Split-Path $env:VIRTUAL_ENV -Leaf )
}
New-Variable -Scope global -Name _OLD_VIRTUAL_PATH -Value $env:PATH
$env:PATH = "$env:VIRTUAL_ENV/bin:" + $env:PATH
if (!$env:VIRTUAL_ENV_DISABLE_PROMPT) {
function global:_old_virtual_prompt {
""
}
$function:_old_virtual_prompt = $function:prompt
function global:prompt {
# Add the custom prefix to the existing prompt
$previous_prompt_value = & $function:_old_virtual_prompt
("(" + $env:VIRTUAL_ENV_PROMPT + ") " + $previous_prompt_value)
}
}
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/dotenv | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from dotenv.__main__ import cli
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/pip3 | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/pip-3.11 | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/activate.fish | # This file must be used using `source bin/activate.fish` *within a running fish ( http://fishshell.com ) session*.
# Do not run it directly.
function _bashify_path -d "Converts a fish path to something bash can recognize"
set fishy_path $argv
set bashy_path $fishy_path[1]
for path_part in $fishy_path[2..-1]
set bashy_path "$bashy_path:$path_part"
end
echo $bashy_path
end
function _fishify_path -d "Converts a bash path to something fish can recognize"
echo $argv | tr ':' '\n'
end
function deactivate -d 'Exit virtualenv mode and return to the normal environment.'
# reset old environment variables
if test -n "$_OLD_VIRTUAL_PATH"
# https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling
if test (echo $FISH_VERSION | head -c 1) -lt 3
set -gx PATH (_fishify_path "$_OLD_VIRTUAL_PATH")
else
set -gx PATH $_OLD_VIRTUAL_PATH
end
set -e _OLD_VIRTUAL_PATH
end
if test -n "$_OLD_VIRTUAL_PYTHONHOME"
set -gx PYTHONHOME "$_OLD_VIRTUAL_PYTHONHOME"
set -e _OLD_VIRTUAL_PYTHONHOME
end
if test -n "$_OLD_FISH_PROMPT_OVERRIDE"
and functions -q _old_fish_prompt
# Set an empty local `$fish_function_path` to allow the removal of `fish_prompt` using `functions -e`.
set -l fish_function_path
# Erase virtualenv's `fish_prompt` and restore the original.
functions -e fish_prompt
functions -c _old_fish_prompt fish_prompt
functions -e _old_fish_prompt
set -e _OLD_FISH_PROMPT_OVERRIDE
end
set -e VIRTUAL_ENV
set -e VIRTUAL_ENV_PROMPT
if test "$argv[1]" != 'nondestructive'
# Self-destruct!
functions -e pydoc
functions -e deactivate
functions -e _bashify_path
functions -e _fishify_path
end
end
# Unset irrelevant variables.
deactivate nondestructive
set -gx VIRTUAL_ENV '/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv'
# https://github.com/fish-shell/fish-shell/issues/436 altered PATH handling
if test (echo $FISH_VERSION | head -c 1) -lt 3
set -gx _OLD_VIRTUAL_PATH (_bashify_path $PATH)
else
set -gx _OLD_VIRTUAL_PATH $PATH
end
set -gx PATH "$VIRTUAL_ENV"'/bin' $PATH
# Prompt override provided?
# If not, just use the environment name.
if test -n ''
set -gx VIRTUAL_ENV_PROMPT ''
else
set -gx VIRTUAL_ENV_PROMPT (basename "$VIRTUAL_ENV")
end
# Unset `$PYTHONHOME` if set.
if set -q PYTHONHOME
set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME
set -e PYTHONHOME
end
function pydoc
python -m pydoc $argv
end
if test -z "$VIRTUAL_ENV_DISABLE_PROMPT"
# Copy the current `fish_prompt` function as `_old_fish_prompt`.
functions -c fish_prompt _old_fish_prompt
function fish_prompt
# Run the user's prompt first; it might depend on (pipe)status.
set -l prompt (_old_fish_prompt)
printf '(%s) ' $VIRTUAL_ENV_PROMPT
string join -- \n $prompt # handle multi-line prompts
end
set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV"
end
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/tap-okta | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from tap_okta.tap import Tapokta
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(Tapokta.cli())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/wheel | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/jsonpath_ng | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jsonpath_ng.bin.jsonpath import entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(entry_point())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/wheel-3.11 | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/activate_this.py | """
Activate virtualenv for current interpreter:
Use exec(open(this_file).read(), {'__file__': this_file}).
This can be used when you must use an existing Python interpreter, not the virtualenv bin/python.
""" # noqa: D415
from __future__ import annotations
import os
import site
import sys
try:
abs_file = os.path.abspath(__file__)
except NameError as exc:
msg = "You must use exec(open(this_file).read(), {'__file__': this_file})"
raise AssertionError(msg) from exc
bin_dir = os.path.dirname(abs_file)
base = bin_dir[: -len("bin") - 1] # strip away the bin part from the __file__, plus the path separator
# prepend bin to PATH (this file is inside the bin directory)
os.environ["PATH"] = os.pathsep.join([bin_dir, *os.environ.get("PATH", "").split(os.pathsep)])
os.environ["VIRTUAL_ENV"] = base # virtual env is right above bin directory
os.environ["VIRTUAL_ENV_PROMPT"] = "" or os.path.basename(base) # noqa: SIM222
# add the virtual environments libraries to the host python import mechanism
prev_length = len(sys.path)
for lib in "../lib/python3.11/site-packages".split(os.pathsep):
path = os.path.realpath(os.path.join(bin_dir, lib))
site.addsitedir(path.decode("utf-8") if "" else path)
sys.path[:] = sys.path[prev_length:] + sys.path[0:prev_length]
sys.real_prefix = sys.prefix
sys.prefix = base
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/pip | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/jsonschema | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from jsonschema.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/pip3.11 | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from pip._internal.cli.main import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/activate | # This file must be used with "source bin/activate" *from bash*
# you cannot run it directly
if [ "${BASH_SOURCE-}" = "$0" ]; then
echo "You must source this script: \$ source $0" >&2
exit 33
fi
deactivate () {
unset -f pydoc >/dev/null 2>&1 || true
# reset old environment variables
# ! [ -z ${VAR+_} ] returns true if VAR is declared at all
if ! [ -z "${_OLD_VIRTUAL_PATH:+_}" ] ; then
PATH="$_OLD_VIRTUAL_PATH"
export PATH
unset _OLD_VIRTUAL_PATH
fi
if ! [ -z "${_OLD_VIRTUAL_PYTHONHOME+_}" ] ; then
PYTHONHOME="$_OLD_VIRTUAL_PYTHONHOME"
export PYTHONHOME
unset _OLD_VIRTUAL_PYTHONHOME
fi
# The hash command must be called to get it to forget past
# commands. Without forgetting past commands the $PATH changes
# we made may not be respected
hash -r 2>/dev/null
if ! [ -z "${_OLD_VIRTUAL_PS1+_}" ] ; then
PS1="$_OLD_VIRTUAL_PS1"
export PS1
unset _OLD_VIRTUAL_PS1
fi
unset VIRTUAL_ENV
unset VIRTUAL_ENV_PROMPT
if [ ! "${1-}" = "nondestructive" ] ; then
# Self destruct!
unset -f deactivate
fi
}
# unset irrelevant variables
deactivate nondestructive
VIRTUAL_ENV='/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv'
if ([ "$OSTYPE" = "cygwin" ] || [ "$OSTYPE" = "msys" ]) && $(command -v cygpath &> /dev/null) ; then
VIRTUAL_ENV=$(cygpath -u "$VIRTUAL_ENV")
fi
export VIRTUAL_ENV
_OLD_VIRTUAL_PATH="$PATH"
PATH="$VIRTUAL_ENV/bin:$PATH"
export PATH
if [ "x" != x ] ; then
VIRTUAL_ENV_PROMPT=""
else
VIRTUAL_ENV_PROMPT=$(basename "$VIRTUAL_ENV")
fi
export VIRTUAL_ENV_PROMPT
# unset PYTHONHOME if set
if ! [ -z "${PYTHONHOME+_}" ] ; then
_OLD_VIRTUAL_PYTHONHOME="$PYTHONHOME"
unset PYTHONHOME
fi
if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT-}" ] ; then
_OLD_VIRTUAL_PS1="${PS1-}"
PS1="(${VIRTUAL_ENV_PROMPT}) ${PS1-}"
export PS1
fi
# Make sure to unalias pydoc if it's already there
alias pydoc 2>/dev/null >/dev/null && unalias pydoc || true
pydoc () {
python -m pydoc "$@"
}
# The hash command must be called to get it to forget past
# commands. Without forgetting past commands the $PATH changes
# we made may not be respected
hash -r 2>/dev/null
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/pytest11 | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from singer_sdk import testing
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(testing.pytest_plugin())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/activate.nu | # virtualenv activation module
# Activate with `overlay use activate.nu`
# Deactivate with `deactivate`, as usual
#
# To customize the overlay name, you can call `overlay use activate.nu as foo`,
# but then simply `deactivate` won't work because it is just an alias to hide
# the "activate" overlay. You'd need to call `overlay hide foo` manually.
export-env {
def is-string [x] {
($x | describe) == 'string'
}
def has-env [...names] {
$names | each {|n|
$n in $env
} | all {|i| $i == true}
}
# Emulates a `test -z`, but btter as it handles e.g 'false'
def is-env-true [name: string] {
if (has-env $name) {
# Try to parse 'true', '0', '1', and fail if not convertible
let parsed = (do -i { $env | get $name | into bool })
if ($parsed | describe) == 'bool' {
$parsed
} else {
not ($env | get -i $name | is-empty)
}
} else {
false
}
}
let virtual_env = '/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv'
let bin = 'bin'
let is_windows = ($nu.os-info.family) == 'windows'
let path_name = (if (has-env 'Path') {
'Path'
} else {
'PATH'
}
)
let venv_path = ([$virtual_env $bin] | path join)
let new_path = ($env | get $path_name | prepend $venv_path)
# If there is no default prompt, then use the env name instead
let virtual_env_prompt = (if ('' | is-empty) {
($virtual_env | path basename)
} else {
''
})
let new_env = {
$path_name : $new_path
VIRTUAL_ENV : $virtual_env
VIRTUAL_ENV_PROMPT : $virtual_env_prompt
}
let new_env = (if (is-env-true 'VIRTUAL_ENV_DISABLE_PROMPT') {
$new_env
} else {
# Creating the new prompt for the session
let virtual_prefix = $'(char lparen)($virtual_env_prompt)(char rparen) '
# Back up the old prompt builder
let old_prompt_command = (if (has-env 'PROMPT_COMMAND') {
$env.PROMPT_COMMAND
} else {
''
})
let new_prompt = (if (has-env 'PROMPT_COMMAND') {
if 'closure' in ($old_prompt_command | describe) {
{|| $'($virtual_prefix)(do $old_prompt_command)' }
} else {
{|| $'($virtual_prefix)($old_prompt_command)' }
}
} else {
{|| $'($virtual_prefix)' }
})
$new_env | merge {
PROMPT_COMMAND : $new_prompt
VIRTUAL_PREFIX : $virtual_prefix
}
})
# Environment variables that will be loaded as the virtual env
load-env $new_env
}
export alias pydoc = python -m pydoc
export alias deactivate = overlay hide activate
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/normalizer | #!/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from charset_normalizer.cli import cli_detect
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(cli_detect())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/bin/activate.csh | # This file must be used with "source bin/activate.csh" *from csh*.
# You cannot run it directly.
# Created by Davide Di Blasi <davidedb@gmail.com>.
set newline='\
'
alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH:q" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT:q" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; unsetenv VIRTUAL_ENV_PROMPT; test "\!:*" != "nondestructive" && unalias deactivate && unalias pydoc'
# Unset irrelevant variables.
deactivate nondestructive
setenv VIRTUAL_ENV '/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv'
set _OLD_VIRTUAL_PATH="$PATH:q"
setenv PATH "$VIRTUAL_ENV:q/bin:$PATH:q"
if ('' != "") then
setenv VIRTUAL_ENV_PROMPT ''
else
setenv VIRTUAL_ENV_PROMPT "$VIRTUAL_ENV:t:q"
endif
if ( $?VIRTUAL_ENV_DISABLE_PROMPT ) then
if ( $VIRTUAL_ENV_DISABLE_PROMPT == "" ) then
set do_prompt = "1"
else
set do_prompt = "0"
endif
else
set do_prompt = "1"
endif
if ( $do_prompt == "1" ) then
# Could be in a non-interactive environment,
# in which case, $prompt is undefined and we wouldn't
# care about the prompt anyway.
if ( $?prompt ) then
set _OLD_VIRTUAL_PROMPT="$prompt:q"
if ( "$prompt:q" =~ *"$newline:q"* ) then
:
else
set prompt = '('"$VIRTUAL_ENV_PROMPT:q"') '"$prompt:q"
endif
endif
endif
unset env_name
unset do_prompt
alias pydoc python -m pydoc
rehash
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11 | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/inflection.py | # -*- coding: utf-8 -*-
"""
inflection
~~~~~~~~~~~~
A port of Ruby on Rails' inflector to Python.
:copyright: (c) 2012-2020 by Janne Vanhala
:license: MIT, see LICENSE for more details.
"""
import re
import unicodedata
__version__ = '0.5.0'
PLURALS = [
(r"(?i)(quiz)$", r'\1zes'),
(r"(?i)^(oxen)$", r'\1'),
(r"(?i)^(ox)$", r'\1en'),
(r"(?i)(m|l)ice$", r'\1ice'),
(r"(?i)(m|l)ouse$", r'\1ice'),
(r"(?i)(passer)s?by$", r'\1sby'),
(r"(?i)(matr|vert|ind)(?:ix|ex)$", r'\1ices'),
(r"(?i)(x|ch|ss|sh)$", r'\1es'),
(r"(?i)([^aeiouy]|qu)y$", r'\1ies'),
(r"(?i)(hive)$", r'\1s'),
(r"(?i)([lr])f$", r'\1ves'),
(r"(?i)([^f])fe$", r'\1ves'),
(r"(?i)sis$", 'ses'),
(r"(?i)([ti])a$", r'\1a'),
(r"(?i)([ti])um$", r'\1a'),
(r"(?i)(buffal|potat|tomat)o$", r'\1oes'),
(r"(?i)(bu)s$", r'\1ses'),
(r"(?i)(alias|status)$", r'\1es'),
(r"(?i)(octop|vir)i$", r'\1i'),
(r"(?i)(octop|vir)us$", r'\1i'),
(r"(?i)^(ax|test)is$", r'\1es'),
(r"(?i)s$", 's'),
(r"$", 's'),
]
SINGULARS = [
(r"(?i)(database)s$", r'\1'),
(r"(?i)(quiz)zes$", r'\1'),
(r"(?i)(matr)ices$", r'\1ix'),
(r"(?i)(vert|ind)ices$", r'\1ex'),
(r"(?i)(passer)sby$", r'\1by'),
(r"(?i)^(ox)en", r'\1'),
(r"(?i)(alias|status)(es)?$", r'\1'),
(r"(?i)(octop|vir)(us|i)$", r'\1us'),
(r"(?i)^(a)x[ie]s$", r'\1xis'),
(r"(?i)(cris|test)(is|es)$", r'\1is'),
(r"(?i)(shoe)s$", r'\1'),
(r"(?i)(o)es$", r'\1'),
(r"(?i)(bus)(es)?$", r'\1'),
(r"(?i)(m|l)ice$", r'\1ouse'),
(r"(?i)(x|ch|ss|sh)es$", r'\1'),
(r"(?i)(m)ovies$", r'\1ovie'),
(r"(?i)(s)eries$", r'\1eries'),
(r"(?i)([^aeiouy]|qu)ies$", r'\1y'),
(r"(?i)([lr])ves$", r'\1f'),
(r"(?i)(tive)s$", r'\1'),
(r"(?i)(hive)s$", r'\1'),
(r"(?i)([^f])ves$", r'\1fe'),
(r"(?i)(t)he(sis|ses)$", r"\1hesis"),
(r"(?i)(s)ynop(sis|ses)$", r"\1ynopsis"),
(r"(?i)(p)rogno(sis|ses)$", r"\1rognosis"),
(r"(?i)(p)arenthe(sis|ses)$", r"\1arenthesis"),
(r"(?i)(d)iagno(sis|ses)$", r"\1iagnosis"),
(r"(?i)(b)a(sis|ses)$", r"\1asis"),
(r"(?i)(a)naly(sis|ses)$", r"\1nalysis"),
(r"(?i)([ti])a$", r'\1um'),
(r"(?i)(n)ews$", r'\1ews'),
(r"(?i)(ss)$", r'\1'),
(r"(?i)s$", ''),
]
UNCOUNTABLES = {
'equipment',
'fish',
'information',
'jeans',
'money',
'rice',
'series',
'sheep',
'species'}
def _irregular(singular: str, plural: str) -> None:
"""
A convenience function to add appropriate rules to plurals and singular
for irregular words.
:param singular: irregular word in singular form
:param plural: irregular word in plural form
"""
def caseinsensitive(string: str) -> str:
return ''.join('[' + char + char.upper() + ']' for char in string)
if singular[0].upper() == plural[0].upper():
PLURALS.insert(0, (
r"(?i)({}){}$".format(singular[0], singular[1:]),
r'\1' + plural[1:]
))
PLURALS.insert(0, (
r"(?i)({}){}$".format(plural[0], plural[1:]),
r'\1' + plural[1:]
))
SINGULARS.insert(0, (
r"(?i)({}){}$".format(plural[0], plural[1:]),
r'\1' + singular[1:]
))
else:
PLURALS.insert(0, (
r"{}{}$".format(singular[0].upper(),
caseinsensitive(singular[1:])),
plural[0].upper() + plural[1:]
))
PLURALS.insert(0, (
r"{}{}$".format(singular[0].lower(),
caseinsensitive(singular[1:])),
plural[0].lower() + plural[1:]
))
PLURALS.insert(0, (
r"{}{}$".format(plural[0].upper(), caseinsensitive(plural[1:])),
plural[0].upper() + plural[1:]
))
PLURALS.insert(0, (
r"{}{}$".format(plural[0].lower(), caseinsensitive(plural[1:])),
plural[0].lower() + plural[1:]
))
SINGULARS.insert(0, (
r"{}{}$".format(plural[0].upper(), caseinsensitive(plural[1:])),
singular[0].upper() + singular[1:]
))
SINGULARS.insert(0, (
r"{}{}$".format(plural[0].lower(), caseinsensitive(plural[1:])),
singular[0].lower() + singular[1:]
))
def camelize(string: str, uppercase_first_letter: bool = True) -> str:
"""
Convert strings to CamelCase.
Examples::
>>> camelize("device_type")
'DeviceType'
>>> camelize("device_type", False)
'deviceType'
:func:`camelize` can be thought of as a inverse of :func:`underscore`,
although there are some cases where that does not hold::
>>> camelize(underscore("IOError"))
'IoError'
:param uppercase_first_letter: if set to `True` :func:`camelize` converts
strings to UpperCamelCase. If set to `False` :func:`camelize` produces
lowerCamelCase. Defaults to `True`.
"""
if uppercase_first_letter:
return re.sub(r"(?:^|_)(.)", lambda m: m.group(1).upper(), string)
else:
return string[0].lower() + camelize(string)[1:]
def dasherize(word: str) -> str:
"""Replace underscores with dashes in the string.
Example::
>>> dasherize("puni_puni")
'puni-puni'
"""
return word.replace('_', '-')
def humanize(word: str) -> str:
"""
Capitalize the first word and turn underscores into spaces and strip a
trailing ``"_id"``, if any. Like :func:`titleize`, this is meant for
creating pretty output.
Examples::
>>> humanize("employee_salary")
'Employee salary'
>>> humanize("author_id")
'Author'
"""
word = re.sub(r"_id$", "", word)
word = word.replace('_', ' ')
word = re.sub(r"(?i)([a-z\d]*)", lambda m: m.group(1).lower(), word)
word = re.sub(r"^\w", lambda m: m.group(0).upper(), word)
return word
def ordinal(number: int) -> str:
"""
Return the suffix that should be added to a number to denote the position
in an ordered sequence such as 1st, 2nd, 3rd, 4th.
Examples::
>>> ordinal(1)
'st'
>>> ordinal(2)
'nd'
>>> ordinal(1002)
'nd'
>>> ordinal(1003)
'rd'
>>> ordinal(-11)
'th'
>>> ordinal(-1021)
'st'
"""
number = abs(int(number))
if number % 100 in (11, 12, 13):
return "th"
else:
return {
1: "st",
2: "nd",
3: "rd",
}.get(number % 10, "th")
def ordinalize(number: int) -> str:
"""
Turn a number into an ordinal string used to denote the position in an
ordered sequence such as 1st, 2nd, 3rd, 4th.
Examples::
>>> ordinalize(1)
'1st'
>>> ordinalize(2)
'2nd'
>>> ordinalize(1002)
'1002nd'
>>> ordinalize(1003)
'1003rd'
>>> ordinalize(-11)
'-11th'
>>> ordinalize(-1021)
'-1021st'
"""
return "{}{}".format(number, ordinal(number))
def parameterize(string: str, separator: str = '-') -> str:
"""
Replace special characters in a string so that it may be used as part of a
'pretty' URL.
Example::
>>> parameterize(u"Donald E. Knuth")
'donald-e-knuth'
"""
string = transliterate(string)
# Turn unwanted chars into the separator
string = re.sub(r"(?i)[^a-z0-9\-_]+", separator, string)
if separator:
re_sep = re.escape(separator)
# No more than one of the separator in a row.
string = re.sub(r'%s{2,}' % re_sep, separator, string)
# Remove leading/trailing separator.
string = re.sub(r"(?i)^{sep}|{sep}$".format(sep=re_sep), '', string)
return string.lower()
def pluralize(word: str) -> str:
"""
Return the plural form of a word.
Examples::
>>> pluralize("posts")
'posts'
>>> pluralize("octopus")
'octopi'
>>> pluralize("sheep")
'sheep'
>>> pluralize("CamelOctopus")
'CamelOctopi'
"""
if not word or word.lower() in UNCOUNTABLES:
return word
else:
for rule, replacement in PLURALS:
if re.search(rule, word):
return re.sub(rule, replacement, word)
return word
def singularize(word: str) -> str:
"""
Return the singular form of a word, the reverse of :func:`pluralize`.
Examples::
>>> singularize("posts")
'post'
>>> singularize("octopi")
'octopus'
>>> singularize("sheep")
'sheep'
>>> singularize("word")
'word'
>>> singularize("CamelOctopi")
'CamelOctopus'
"""
for inflection in UNCOUNTABLES:
if re.search(r'(?i)\b(%s)\Z' % inflection, word):
return word
for rule, replacement in SINGULARS:
if re.search(rule, word):
return re.sub(rule, replacement, word)
return word
def tableize(word: str) -> str:
"""
Create the name of a table like Rails does for models to table names. This
method uses the :func:`pluralize` method on the last word in the string.
Examples::
>>> tableize('RawScaledScorer')
'raw_scaled_scorers'
>>> tableize('egg_and_ham')
'egg_and_hams'
>>> tableize('fancyCategory')
'fancy_categories'
"""
return pluralize(underscore(word))
def titleize(word: str) -> str:
"""
Capitalize all the words and replace some characters in the string to
create a nicer looking title. :func:`titleize` is meant for creating pretty
output.
Examples::
>>> titleize("man from the boondocks")
'Man From The Boondocks'
>>> titleize("x-men: the last stand")
'X Men: The Last Stand'
>>> titleize("TheManWithoutAPast")
'The Man Without A Past'
>>> titleize("raiders_of_the_lost_ark")
'Raiders Of The Lost Ark'
"""
return re.sub(
r"\b('?\w)",
lambda match: match.group(1).capitalize(),
humanize(underscore(word)).title()
)
def transliterate(string: str) -> str:
"""
Replace non-ASCII characters with an ASCII approximation. If no
approximation exists, the non-ASCII character is ignored. The string must
be ``unicode``.
Examples::
>>> transliterate('älämölö')
'alamolo'
>>> transliterate('Ærøskøbing')
'rskbing'
"""
normalized = unicodedata.normalize('NFKD', string)
return normalized.encode('ascii', 'ignore').decode('ascii')
def underscore(word: str) -> str:
"""
Make an underscored, lowercase form from the expression in the string.
Example::
>>> underscore("DeviceType")
'device_type'
As a rule of thumb you can think of :func:`underscore` as the inverse of
:func:`camelize`, though there are cases where that does not hold::
>>> camelize(underscore("IOError"))
'IoError'
"""
word = re.sub(r"([A-Z]+)([A-Z][a-z])", r'\1_\2', word)
word = re.sub(r"([a-z\d])([A-Z])", r'\1_\2', word)
word = word.replace("-", "_")
return word.lower()
_irregular('person', 'people')
_irregular('man', 'men')
_irregular('human', 'humans')
_irregular('child', 'children')
_irregular('sex', 'sexes')
_irregular('move', 'moves')
_irregular('cow', 'kine')
_irregular('zombie', 'zombies')
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11 | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/appdirs.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2005-2010 ActiveState Software Inc.
# Copyright (c) 2013 Eddy Petrișor
"""Utilities for determining application-specific dirs.
See <http://github.com/ActiveState/appdirs> for details and usage.
"""
# Dev Notes:
# - MSDN on where to store app data files:
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
__version__ = "1.4.4"
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
import sys
import os
PY3 = sys.version_info[0] == 3
if PY3:
unicode = str
if sys.platform.startswith('java'):
import platform
os_name = platform.java_ver()[3][0]
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
system = 'win32'
elif os_name.startswith('Mac'): # "Mac OS X", etc.
system = 'darwin'
else: # "Linux", "SunOS", "FreeBSD", etc.
# Setting this to "linux2" is not ideal, but only Windows or Mac
# are actually checked for and the rest of the module expects
# *sys.platform* style strings.
system = 'linux2'
else:
system = sys.platform
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user data directories are:
Mac OS X: ~/Library/Application Support/<AppName>
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
That means, by default "~/.local/share/<AppName>".
"""
if system == "win32":
if appauthor is None:
appauthor = appname
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
path = os.path.normpath(_get_win_folder(const))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('~/Library/Application Support/')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of data dirs should be
returned. By default, the first item from XDG_DATA_DIRS is
returned, or '/usr/local/share/<AppName>',
if XDG_DATA_DIRS is not set
Typical site data directories are:
Mac OS X: /Library/Application Support/<AppName>
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
For Unix, this is using the $XDG_DATA_DIRS[0] default.
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
elif system == 'darwin':
path = os.path.expanduser('/Library/Application Support')
if appname:
path = os.path.join(path, appname)
else:
# XDG default for $XDG_DATA_DIRS
# only first, if multipath is False
path = os.getenv('XDG_DATA_DIRS',
os.pathsep.join(['/usr/local/share', '/usr/share']))
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
if appname and version:
path = os.path.join(path, version)
return path
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific config dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user config directories are:
Mac OS X: same as user_data_dir
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
That means, by default "~/.config/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
r"""Return full path to the user-shared data dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"multipath" is an optional parameter only applicable to *nix
which indicates that the entire list of config dirs should be
returned. By default, the first item from XDG_CONFIG_DIRS is
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
Typical site config directories are:
Mac OS X: same as site_data_dir
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
$XDG_CONFIG_DIRS
Win *: same as site_data_dir
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
"""
if system in ["win32", "darwin"]:
path = site_data_dir(appname, appauthor)
if appname and version:
path = os.path.join(path, version)
else:
# XDG default for $XDG_CONFIG_DIRS
# only first, if multipath is False
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
if appname:
if version:
appname = os.path.join(appname, version)
pathlist = [os.sep.join([x, appname]) for x in pathlist]
if multipath:
path = os.pathsep.join(pathlist)
else:
path = pathlist[0]
return path
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific cache dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Cache" to the base app data dir for Windows. See
discussion below.
Typical user cache directories are:
Mac OS X: ~/Library/Caches/<AppName>
Unix: ~/.cache/<AppName> (XDG default)
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
On Windows the only suggestion in the MSDN docs is that local settings go in
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
app data dir (the default returned by `user_data_dir` above). Apps typically
put cache data somewhere *under* the given dir here. Some examples:
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
...\Acme\SuperApp\Cache\1.0
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
This can be disabled with the `opinion=False` option.
"""
if system == "win32":
if appauthor is None:
appauthor = appname
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
if appname:
if appauthor is not False:
path = os.path.join(path, appauthor, appname)
else:
path = os.path.join(path, appname)
if opinion:
path = os.path.join(path, "Cache")
elif system == 'darwin':
path = os.path.expanduser('~/Library/Caches')
if appname:
path = os.path.join(path, appname)
else:
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
r"""Return full path to the user-specific state dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"roaming" (boolean, default False) can be set True to use the Windows
roaming appdata directory. That means that for users on a Windows
network setup for roaming profiles, this user data will be
sync'd on login. See
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
for a discussion of issues.
Typical user state directories are:
Mac OS X: same as user_data_dir
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
Win *: same as user_data_dir
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
to extend the XDG spec and support $XDG_STATE_HOME.
That means, by default "~/.local/state/<AppName>".
"""
if system in ["win32", "darwin"]:
path = user_data_dir(appname, appauthor, None, roaming)
else:
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
if appname:
path = os.path.join(path, appname)
if appname and version:
path = os.path.join(path, version)
return path
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
r"""Return full path to the user-specific log dir for this application.
"appname" is the name of application.
If None, just the system directory is returned.
"appauthor" (only used on Windows) is the name of the
appauthor or distributing body for this application. Typically
it is the owning company name. This falls back to appname. You may
pass False to disable it.
"version" is an optional version path element to append to the
path. You might want to use this if you want multiple versions
of your app to be able to run independently. If used, this
would typically be "<major>.<minor>".
Only applied when appname is present.
"opinion" (boolean) can be False to disable the appending of
"Logs" to the base app data dir for Windows, and "log" to the
base cache dir for Unix. See discussion below.
Typical user log directories are:
Mac OS X: ~/Library/Logs/<AppName>
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
On Windows the only suggestion in the MSDN docs is that local settings
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
examples of what some windows apps use for a logs dir.)
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
value for Windows and appends "log" to the user cache dir for Unix.
This can be disabled with the `opinion=False` option.
"""
if system == "darwin":
path = os.path.join(
os.path.expanduser('~/Library/Logs'),
appname)
elif system == "win32":
path = user_data_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "Logs")
else:
path = user_cache_dir(appname, appauthor, version)
version = False
if opinion:
path = os.path.join(path, "log")
if appname and version:
path = os.path.join(path, version)
return path
class AppDirs(object):
"""Convenience wrapper for getting application dirs."""
def __init__(self, appname=None, appauthor=None, version=None,
roaming=False, multipath=False):
self.appname = appname
self.appauthor = appauthor
self.version = version
self.roaming = roaming
self.multipath = multipath
@property
def user_data_dir(self):
return user_data_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_data_dir(self):
return site_data_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_config_dir(self):
return user_config_dir(self.appname, self.appauthor,
version=self.version, roaming=self.roaming)
@property
def site_config_dir(self):
return site_config_dir(self.appname, self.appauthor,
version=self.version, multipath=self.multipath)
@property
def user_cache_dir(self):
return user_cache_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_state_dir(self):
return user_state_dir(self.appname, self.appauthor,
version=self.version)
@property
def user_log_dir(self):
return user_log_dir(self.appname, self.appauthor,
version=self.version)
#---- internal support stuff
def _get_win_folder_from_registry(csidl_name):
"""This is a fallback technique at best. I'm not sure if using the
registry for this guarantees us the correct answer for all CSIDL_*
names.
"""
if PY3:
import winreg as _winreg
else:
import _winreg
shell_folder_name = {
"CSIDL_APPDATA": "AppData",
"CSIDL_COMMON_APPDATA": "Common AppData",
"CSIDL_LOCAL_APPDATA": "Local AppData",
}[csidl_name]
key = _winreg.OpenKey(
_winreg.HKEY_CURRENT_USER,
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
)
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
return dir
def _get_win_folder_with_pywin32(csidl_name):
from win32com.shell import shellcon, shell
dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0)
# Try to make this a unicode path because SHGetFolderPath does
# not return unicode strings when there is unicode data in the
# path.
try:
dir = unicode(dir)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
try:
import win32api
dir = win32api.GetShortPathName(dir)
except ImportError:
pass
except UnicodeError:
pass
return dir
def _get_win_folder_with_ctypes(csidl_name):
import ctypes
csidl_const = {
"CSIDL_APPDATA": 26,
"CSIDL_COMMON_APPDATA": 35,
"CSIDL_LOCAL_APPDATA": 28,
}[csidl_name]
buf = ctypes.create_unicode_buffer(1024)
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in buf:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf2 = ctypes.create_unicode_buffer(1024)
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
buf = buf2
return buf.value
def _get_win_folder_with_jna(csidl_name):
import array
from com.sun import jna
from com.sun.jna.platform import win32
buf_size = win32.WinDef.MAX_PATH * 2
buf = array.zeros('c', buf_size)
shell = win32.Shell32.INSTANCE
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
# Downgrade to short path name if have highbit chars. See
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
has_high_char = False
for c in dir:
if ord(c) > 255:
has_high_char = True
break
if has_high_char:
buf = array.zeros('c', buf_size)
kernel = win32.Kernel32.INSTANCE
if kernel.GetShortPathName(dir, buf, buf_size):
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
return dir
if system == "win32":
try:
import win32com.shell
_get_win_folder = _get_win_folder_with_pywin32
except ImportError:
try:
from ctypes import windll
_get_win_folder = _get_win_folder_with_ctypes
except ImportError:
try:
import com.sun.jna
_get_win_folder = _get_win_folder_with_jna
except ImportError:
_get_win_folder = _get_win_folder_from_registry
#---- self test code
if __name__ == "__main__":
appname = "MyApp"
appauthor = "MyCompany"
props = ("user_data_dir",
"user_config_dir",
"user_cache_dir",
"user_state_dir",
"user_log_dir",
"site_data_dir",
"site_config_dir")
print("-- app dirs %s --" % __version__)
print("-- app dirs (with optional 'version')")
dirs = AppDirs(appname, appauthor, version="1.0")
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'version')")
dirs = AppDirs(appname, appauthor)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (without optional 'appauthor')")
dirs = AppDirs(appname)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
print("\n-- app dirs (with disabled 'appauthor')")
dirs = AppDirs(appname, appauthor=False)
for prop in props:
print("%s: %s" % (prop, getattr(dirs, prop)))
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11 | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/_virtualenv.py | """Patches that are applied at runtime to the virtual environment."""
from __future__ import annotations
import os
import sys
from contextlib import suppress
VIRTUALENV_PATCH_FILE = os.path.join(__file__)
def patch_dist(dist):
"""
Distutils allows user to configure some arguments via a configuration file:
https://docs.python.org/3/install/index.html#distutils-configuration-files.
Some of this arguments though don't make sense in context of the virtual environment files, let's fix them up.
""" # noqa: D205
# we cannot allow some install config as that would get packages installed outside of the virtual environment
old_parse_config_files = dist.Distribution.parse_config_files
def parse_config_files(self, *args, **kwargs):
result = old_parse_config_files(self, *args, **kwargs)
install = self.get_option_dict("install")
if "prefix" in install: # the prefix governs where to install the libraries
install["prefix"] = VIRTUALENV_PATCH_FILE, os.path.abspath(sys.prefix)
for base in ("purelib", "platlib", "headers", "scripts", "data"):
key = f"install_{base}"
if key in install: # do not allow global configs to hijack venv paths
install.pop(key, None)
return result
dist.Distribution.parse_config_files = parse_config_files
# Import hook that patches some modules to ignore configuration values that break package installation in case
# of virtual environments.
_DISTUTILS_PATCH = "distutils.dist", "setuptools.dist"
# https://docs.python.org/3/library/importlib.html#setting-up-an-importer
class _Finder:
"""A meta path finder that allows patching the imported distutils modules."""
fullname = None
# lock[0] is threading.Lock(), but initialized lazily to avoid importing threading very early at startup,
# because there are gevent-based applications that need to be first to import threading by themselves.
# See https://github.com/pypa/virtualenv/issues/1895 for details.
lock = [] # noqa: RUF012
def find_spec(self, fullname, path, target=None): # noqa: ARG002
if fullname in _DISTUTILS_PATCH and self.fullname is None:
# initialize lock[0] lazily
if len(self.lock) == 0:
import threading
lock = threading.Lock()
# there is possibility that two threads T1 and T2 are simultaneously running into find_spec,
# observing .lock as empty, and further going into hereby initialization. However due to the GIL,
# list.append() operation is atomic and this way only one of the threads will "win" to put the lock
# - that every thread will use - into .lock[0].
# https://docs.python.org/3/faq/library.html#what-kinds-of-global-value-mutation-are-thread-safe
self.lock.append(lock)
from functools import partial
from importlib.util import find_spec
with self.lock[0]:
self.fullname = fullname
try:
spec = find_spec(fullname, path)
if spec is not None:
# https://www.python.org/dev/peps/pep-0451/#how-loading-will-work
is_new_api = hasattr(spec.loader, "exec_module")
func_name = "exec_module" if is_new_api else "load_module"
old = getattr(spec.loader, func_name)
func = self.exec_module if is_new_api else self.load_module
if old is not func:
with suppress(AttributeError): # C-Extension loaders are r/o such as zipimporter with <3.7
setattr(spec.loader, func_name, partial(func, old))
return spec
finally:
self.fullname = None
return None
@staticmethod
def exec_module(old, module):
old(module)
if module.__name__ in _DISTUTILS_PATCH:
patch_dist(module)
@staticmethod
def load_module(old, name):
module = old(name)
if module.__name__ in _DISTUTILS_PATCH:
patch_dist(module)
return module
sys.meta_path.insert(0, _Finder())
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11 | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/distutils-precedence.pth | import os; var = 'SETUPTOOLS_USE_DISTUTILS'; enabled = os.environ.get(var, 'stdlib') == 'local'; enabled and __import__('_distutils_hack').add_shim();
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11 | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/_virtualenv.pth | import _virtualenv | 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11 | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/six.py | # Copyright (c) 2010-2020 Benjamin Peterson
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Utilities for writing code that runs on Python 2 and 3"""
from __future__ import absolute_import
import functools
import itertools
import operator
import sys
import types
__author__ = "Benjamin Peterson <benjamin@python.org>"
__version__ = "1.16.0"
# Useful for very coarse version differentiation.
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
PY34 = sys.version_info[0:2] >= (3, 4)
if PY3:
string_types = str,
integer_types = int,
class_types = type,
text_type = str
binary_type = bytes
MAXSIZE = sys.maxsize
else:
string_types = basestring,
integer_types = (int, long)
class_types = (type, types.ClassType)
text_type = unicode
binary_type = str
if sys.platform.startswith("java"):
# Jython always uses 32 bits.
MAXSIZE = int((1 << 31) - 1)
else:
# It's possible to have sizeof(long) != sizeof(Py_ssize_t).
class X(object):
def __len__(self):
return 1 << 31
try:
len(X())
except OverflowError:
# 32-bit
MAXSIZE = int((1 << 31) - 1)
else:
# 64-bit
MAXSIZE = int((1 << 63) - 1)
del X
if PY34:
from importlib.util import spec_from_loader
else:
spec_from_loader = None
def _add_doc(func, doc):
"""Add documentation to a function."""
func.__doc__ = doc
def _import_module(name):
"""Import module, returning the module after the last dot."""
__import__(name)
return sys.modules[name]
class _LazyDescr(object):
def __init__(self, name):
self.name = name
def __get__(self, obj, tp):
result = self._resolve()
setattr(obj, self.name, result) # Invokes __set__.
try:
# This is a bit ugly, but it avoids running this again by
# removing this descriptor.
delattr(obj.__class__, self.name)
except AttributeError:
pass
return result
class MovedModule(_LazyDescr):
def __init__(self, name, old, new=None):
super(MovedModule, self).__init__(name)
if PY3:
if new is None:
new = name
self.mod = new
else:
self.mod = old
def _resolve(self):
return _import_module(self.mod)
def __getattr__(self, attr):
_module = self._resolve()
value = getattr(_module, attr)
setattr(self, attr, value)
return value
class _LazyModule(types.ModuleType):
def __init__(self, name):
super(_LazyModule, self).__init__(name)
self.__doc__ = self.__class__.__doc__
def __dir__(self):
attrs = ["__doc__", "__name__"]
attrs += [attr.name for attr in self._moved_attributes]
return attrs
# Subclasses should override this
_moved_attributes = []
class MovedAttribute(_LazyDescr):
def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None):
super(MovedAttribute, self).__init__(name)
if PY3:
if new_mod is None:
new_mod = name
self.mod = new_mod
if new_attr is None:
if old_attr is None:
new_attr = name
else:
new_attr = old_attr
self.attr = new_attr
else:
self.mod = old_mod
if old_attr is None:
old_attr = name
self.attr = old_attr
def _resolve(self):
module = _import_module(self.mod)
return getattr(module, self.attr)
class _SixMetaPathImporter(object):
"""
A meta path importer to import six.moves and its submodules.
This class implements a PEP302 finder and loader. It should be compatible
with Python 2.5 and all existing versions of Python3
"""
def __init__(self, six_module_name):
self.name = six_module_name
self.known_modules = {}
def _add_module(self, mod, *fullnames):
for fullname in fullnames:
self.known_modules[self.name + "." + fullname] = mod
def _get_module(self, fullname):
return self.known_modules[self.name + "." + fullname]
def find_module(self, fullname, path=None):
if fullname in self.known_modules:
return self
return None
def find_spec(self, fullname, path, target=None):
if fullname in self.known_modules:
return spec_from_loader(fullname, self)
return None
def __get_module(self, fullname):
try:
return self.known_modules[fullname]
except KeyError:
raise ImportError("This loader does not know module " + fullname)
def load_module(self, fullname):
try:
# in case of a reload
return sys.modules[fullname]
except KeyError:
pass
mod = self.__get_module(fullname)
if isinstance(mod, MovedModule):
mod = mod._resolve()
else:
mod.__loader__ = self
sys.modules[fullname] = mod
return mod
def is_package(self, fullname):
"""
Return true, if the named module is a package.
We need this method to get correct spec objects with
Python 3.4 (see PEP451)
"""
return hasattr(self.__get_module(fullname), "__path__")
def get_code(self, fullname):
"""Return None
Required, if is_package is implemented"""
self.__get_module(fullname) # eventually raises ImportError
return None
get_source = get_code # same as get_code
def create_module(self, spec):
return self.load_module(spec.name)
def exec_module(self, module):
pass
_importer = _SixMetaPathImporter(__name__)
class _MovedItems(_LazyModule):
"""Lazy loading of moved objects"""
__path__ = [] # mark as package
_moved_attributes = [
MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"),
MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"),
MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"),
MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"),
MovedAttribute("intern", "__builtin__", "sys"),
MovedAttribute("map", "itertools", "builtins", "imap", "map"),
MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"),
MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"),
MovedAttribute("getoutput", "commands", "subprocess"),
MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"),
MovedAttribute("reduce", "__builtin__", "functools"),
MovedAttribute("shlex_quote", "pipes", "shlex", "quote"),
MovedAttribute("StringIO", "StringIO", "io"),
MovedAttribute("UserDict", "UserDict", "collections"),
MovedAttribute("UserList", "UserList", "collections"),
MovedAttribute("UserString", "UserString", "collections"),
MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"),
MovedAttribute("zip", "itertools", "builtins", "izip", "zip"),
MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"),
MovedModule("builtins", "__builtin__"),
MovedModule("configparser", "ConfigParser"),
MovedModule("collections_abc", "collections", "collections.abc" if sys.version_info >= (3, 3) else "collections"),
MovedModule("copyreg", "copy_reg"),
MovedModule("dbm_gnu", "gdbm", "dbm.gnu"),
MovedModule("dbm_ndbm", "dbm", "dbm.ndbm"),
MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread" if sys.version_info < (3, 9) else "_thread"),
MovedModule("http_cookiejar", "cookielib", "http.cookiejar"),
MovedModule("http_cookies", "Cookie", "http.cookies"),
MovedModule("html_entities", "htmlentitydefs", "html.entities"),
MovedModule("html_parser", "HTMLParser", "html.parser"),
MovedModule("http_client", "httplib", "http.client"),
MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"),
MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"),
MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"),
MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"),
MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"),
MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"),
MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"),
MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"),
MovedModule("cPickle", "cPickle", "pickle"),
MovedModule("queue", "Queue"),
MovedModule("reprlib", "repr"),
MovedModule("socketserver", "SocketServer"),
MovedModule("_thread", "thread", "_thread"),
MovedModule("tkinter", "Tkinter"),
MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"),
MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"),
MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"),
MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"),
MovedModule("tkinter_tix", "Tix", "tkinter.tix"),
MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"),
MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"),
MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"),
MovedModule("tkinter_colorchooser", "tkColorChooser",
"tkinter.colorchooser"),
MovedModule("tkinter_commondialog", "tkCommonDialog",
"tkinter.commondialog"),
MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"),
MovedModule("tkinter_font", "tkFont", "tkinter.font"),
MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"),
MovedModule("tkinter_tksimpledialog", "tkSimpleDialog",
"tkinter.simpledialog"),
MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"),
MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"),
MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"),
MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"),
MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"),
MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"),
]
# Add windows specific modules.
if sys.platform == "win32":
_moved_attributes += [
MovedModule("winreg", "_winreg"),
]
for attr in _moved_attributes:
setattr(_MovedItems, attr.name, attr)
if isinstance(attr, MovedModule):
_importer._add_module(attr, "moves." + attr.name)
del attr
_MovedItems._moved_attributes = _moved_attributes
moves = _MovedItems(__name__ + ".moves")
_importer._add_module(moves, "moves")
class Module_six_moves_urllib_parse(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_parse"""
_urllib_parse_moved_attributes = [
MovedAttribute("ParseResult", "urlparse", "urllib.parse"),
MovedAttribute("SplitResult", "urlparse", "urllib.parse"),
MovedAttribute("parse_qs", "urlparse", "urllib.parse"),
MovedAttribute("parse_qsl", "urlparse", "urllib.parse"),
MovedAttribute("urldefrag", "urlparse", "urllib.parse"),
MovedAttribute("urljoin", "urlparse", "urllib.parse"),
MovedAttribute("urlparse", "urlparse", "urllib.parse"),
MovedAttribute("urlsplit", "urlparse", "urllib.parse"),
MovedAttribute("urlunparse", "urlparse", "urllib.parse"),
MovedAttribute("urlunsplit", "urlparse", "urllib.parse"),
MovedAttribute("quote", "urllib", "urllib.parse"),
MovedAttribute("quote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote", "urllib", "urllib.parse"),
MovedAttribute("unquote_plus", "urllib", "urllib.parse"),
MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"),
MovedAttribute("urlencode", "urllib", "urllib.parse"),
MovedAttribute("splitquery", "urllib", "urllib.parse"),
MovedAttribute("splittag", "urllib", "urllib.parse"),
MovedAttribute("splituser", "urllib", "urllib.parse"),
MovedAttribute("splitvalue", "urllib", "urllib.parse"),
MovedAttribute("uses_fragment", "urlparse", "urllib.parse"),
MovedAttribute("uses_netloc", "urlparse", "urllib.parse"),
MovedAttribute("uses_params", "urlparse", "urllib.parse"),
MovedAttribute("uses_query", "urlparse", "urllib.parse"),
MovedAttribute("uses_relative", "urlparse", "urllib.parse"),
]
for attr in _urllib_parse_moved_attributes:
setattr(Module_six_moves_urllib_parse, attr.name, attr)
del attr
Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes
_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"),
"moves.urllib_parse", "moves.urllib.parse")
class Module_six_moves_urllib_error(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_error"""
_urllib_error_moved_attributes = [
MovedAttribute("URLError", "urllib2", "urllib.error"),
MovedAttribute("HTTPError", "urllib2", "urllib.error"),
MovedAttribute("ContentTooShortError", "urllib", "urllib.error"),
]
for attr in _urllib_error_moved_attributes:
setattr(Module_six_moves_urllib_error, attr.name, attr)
del attr
Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes
_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"),
"moves.urllib_error", "moves.urllib.error")
class Module_six_moves_urllib_request(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_request"""
_urllib_request_moved_attributes = [
MovedAttribute("urlopen", "urllib2", "urllib.request"),
MovedAttribute("install_opener", "urllib2", "urllib.request"),
MovedAttribute("build_opener", "urllib2", "urllib.request"),
MovedAttribute("pathname2url", "urllib", "urllib.request"),
MovedAttribute("url2pathname", "urllib", "urllib.request"),
MovedAttribute("getproxies", "urllib", "urllib.request"),
MovedAttribute("Request", "urllib2", "urllib.request"),
MovedAttribute("OpenerDirector", "urllib2", "urllib.request"),
MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"),
MovedAttribute("ProxyHandler", "urllib2", "urllib.request"),
MovedAttribute("BaseHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"),
MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"),
MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"),
MovedAttribute("FileHandler", "urllib2", "urllib.request"),
MovedAttribute("FTPHandler", "urllib2", "urllib.request"),
MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"),
MovedAttribute("UnknownHandler", "urllib2", "urllib.request"),
MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"),
MovedAttribute("urlretrieve", "urllib", "urllib.request"),
MovedAttribute("urlcleanup", "urllib", "urllib.request"),
MovedAttribute("URLopener", "urllib", "urllib.request"),
MovedAttribute("FancyURLopener", "urllib", "urllib.request"),
MovedAttribute("proxy_bypass", "urllib", "urllib.request"),
MovedAttribute("parse_http_list", "urllib2", "urllib.request"),
MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"),
]
for attr in _urllib_request_moved_attributes:
setattr(Module_six_moves_urllib_request, attr.name, attr)
del attr
Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes
_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"),
"moves.urllib_request", "moves.urllib.request")
class Module_six_moves_urllib_response(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_response"""
_urllib_response_moved_attributes = [
MovedAttribute("addbase", "urllib", "urllib.response"),
MovedAttribute("addclosehook", "urllib", "urllib.response"),
MovedAttribute("addinfo", "urllib", "urllib.response"),
MovedAttribute("addinfourl", "urllib", "urllib.response"),
]
for attr in _urllib_response_moved_attributes:
setattr(Module_six_moves_urllib_response, attr.name, attr)
del attr
Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes
_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"),
"moves.urllib_response", "moves.urllib.response")
class Module_six_moves_urllib_robotparser(_LazyModule):
"""Lazy loading of moved objects in six.moves.urllib_robotparser"""
_urllib_robotparser_moved_attributes = [
MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"),
]
for attr in _urllib_robotparser_moved_attributes:
setattr(Module_six_moves_urllib_robotparser, attr.name, attr)
del attr
Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes
_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"),
"moves.urllib_robotparser", "moves.urllib.robotparser")
class Module_six_moves_urllib(types.ModuleType):
"""Create a six.moves.urllib namespace that resembles the Python 3 namespace"""
__path__ = [] # mark as package
parse = _importer._get_module("moves.urllib_parse")
error = _importer._get_module("moves.urllib_error")
request = _importer._get_module("moves.urllib_request")
response = _importer._get_module("moves.urllib_response")
robotparser = _importer._get_module("moves.urllib_robotparser")
def __dir__(self):
return ['parse', 'error', 'request', 'response', 'robotparser']
_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"),
"moves.urllib")
def add_move(move):
"""Add an item to six.moves."""
setattr(_MovedItems, move.name, move)
def remove_move(name):
"""Remove item from six.moves."""
try:
delattr(_MovedItems, name)
except AttributeError:
try:
del moves.__dict__[name]
except KeyError:
raise AttributeError("no such move, %r" % (name,))
if PY3:
_meth_func = "__func__"
_meth_self = "__self__"
_func_closure = "__closure__"
_func_code = "__code__"
_func_defaults = "__defaults__"
_func_globals = "__globals__"
else:
_meth_func = "im_func"
_meth_self = "im_self"
_func_closure = "func_closure"
_func_code = "func_code"
_func_defaults = "func_defaults"
_func_globals = "func_globals"
try:
advance_iterator = next
except NameError:
def advance_iterator(it):
return it.next()
next = advance_iterator
try:
callable = callable
except NameError:
def callable(obj):
return any("__call__" in klass.__dict__ for klass in type(obj).__mro__)
if PY3:
def get_unbound_function(unbound):
return unbound
create_bound_method = types.MethodType
def create_unbound_method(func, cls):
return func
Iterator = object
else:
def get_unbound_function(unbound):
return unbound.im_func
def create_bound_method(func, obj):
return types.MethodType(func, obj, obj.__class__)
def create_unbound_method(func, cls):
return types.MethodType(func, None, cls)
class Iterator(object):
def next(self):
return type(self).__next__(self)
callable = callable
_add_doc(get_unbound_function,
"""Get the function out of a possibly unbound function""")
get_method_function = operator.attrgetter(_meth_func)
get_method_self = operator.attrgetter(_meth_self)
get_function_closure = operator.attrgetter(_func_closure)
get_function_code = operator.attrgetter(_func_code)
get_function_defaults = operator.attrgetter(_func_defaults)
get_function_globals = operator.attrgetter(_func_globals)
if PY3:
def iterkeys(d, **kw):
return iter(d.keys(**kw))
def itervalues(d, **kw):
return iter(d.values(**kw))
def iteritems(d, **kw):
return iter(d.items(**kw))
def iterlists(d, **kw):
return iter(d.lists(**kw))
viewkeys = operator.methodcaller("keys")
viewvalues = operator.methodcaller("values")
viewitems = operator.methodcaller("items")
else:
def iterkeys(d, **kw):
return d.iterkeys(**kw)
def itervalues(d, **kw):
return d.itervalues(**kw)
def iteritems(d, **kw):
return d.iteritems(**kw)
def iterlists(d, **kw):
return d.iterlists(**kw)
viewkeys = operator.methodcaller("viewkeys")
viewvalues = operator.methodcaller("viewvalues")
viewitems = operator.methodcaller("viewitems")
_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.")
_add_doc(itervalues, "Return an iterator over the values of a dictionary.")
_add_doc(iteritems,
"Return an iterator over the (key, value) pairs of a dictionary.")
_add_doc(iterlists,
"Return an iterator over the (key, [values]) pairs of a dictionary.")
if PY3:
def b(s):
return s.encode("latin-1")
def u(s):
return s
unichr = chr
import struct
int2byte = struct.Struct(">B").pack
del struct
byte2int = operator.itemgetter(0)
indexbytes = operator.getitem
iterbytes = iter
import io
StringIO = io.StringIO
BytesIO = io.BytesIO
del io
_assertCountEqual = "assertCountEqual"
if sys.version_info[1] <= 1:
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
else:
_assertRaisesRegex = "assertRaisesRegex"
_assertRegex = "assertRegex"
_assertNotRegex = "assertNotRegex"
else:
def b(s):
return s
# Workaround for standalone backslash
def u(s):
return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape")
unichr = unichr
int2byte = chr
def byte2int(bs):
return ord(bs[0])
def indexbytes(buf, i):
return ord(buf[i])
iterbytes = functools.partial(itertools.imap, ord)
import StringIO
StringIO = BytesIO = StringIO.StringIO
_assertCountEqual = "assertItemsEqual"
_assertRaisesRegex = "assertRaisesRegexp"
_assertRegex = "assertRegexpMatches"
_assertNotRegex = "assertNotRegexpMatches"
_add_doc(b, """Byte literal""")
_add_doc(u, """Text literal""")
def assertCountEqual(self, *args, **kwargs):
return getattr(self, _assertCountEqual)(*args, **kwargs)
def assertRaisesRegex(self, *args, **kwargs):
return getattr(self, _assertRaisesRegex)(*args, **kwargs)
def assertRegex(self, *args, **kwargs):
return getattr(self, _assertRegex)(*args, **kwargs)
def assertNotRegex(self, *args, **kwargs):
return getattr(self, _assertNotRegex)(*args, **kwargs)
if PY3:
exec_ = getattr(moves.builtins, "exec")
def reraise(tp, value, tb=None):
try:
if value is None:
value = tp()
if value.__traceback__ is not tb:
raise value.with_traceback(tb)
raise value
finally:
value = None
tb = None
else:
def exec_(_code_, _globs_=None, _locs_=None):
"""Execute code in a namespace."""
if _globs_ is None:
frame = sys._getframe(1)
_globs_ = frame.f_globals
if _locs_ is None:
_locs_ = frame.f_locals
del frame
elif _locs_ is None:
_locs_ = _globs_
exec("""exec _code_ in _globs_, _locs_""")
exec_("""def reraise(tp, value, tb=None):
try:
raise tp, value, tb
finally:
tb = None
""")
if sys.version_info[:2] > (3,):
exec_("""def raise_from(value, from_value):
try:
raise value from from_value
finally:
value = None
""")
else:
def raise_from(value, from_value):
raise value
print_ = getattr(moves.builtins, "print", None)
if print_ is None:
def print_(*args, **kwargs):
"""The new-style print function for Python 2.4 and 2.5."""
fp = kwargs.pop("file", sys.stdout)
if fp is None:
return
def write(data):
if not isinstance(data, basestring):
data = str(data)
# If the file has an encoding, encode unicode with it.
if (isinstance(fp, file) and
isinstance(data, unicode) and
fp.encoding is not None):
errors = getattr(fp, "errors", None)
if errors is None:
errors = "strict"
data = data.encode(fp.encoding, errors)
fp.write(data)
want_unicode = False
sep = kwargs.pop("sep", None)
if sep is not None:
if isinstance(sep, unicode):
want_unicode = True
elif not isinstance(sep, str):
raise TypeError("sep must be None or a string")
end = kwargs.pop("end", None)
if end is not None:
if isinstance(end, unicode):
want_unicode = True
elif not isinstance(end, str):
raise TypeError("end must be None or a string")
if kwargs:
raise TypeError("invalid keyword arguments to print()")
if not want_unicode:
for arg in args:
if isinstance(arg, unicode):
want_unicode = True
break
if want_unicode:
newline = unicode("\n")
space = unicode(" ")
else:
newline = "\n"
space = " "
if sep is None:
sep = space
if end is None:
end = newline
for i, arg in enumerate(args):
if i:
write(sep)
write(arg)
write(end)
if sys.version_info[:2] < (3, 3):
_print = print_
def print_(*args, **kwargs):
fp = kwargs.get("file", sys.stdout)
flush = kwargs.pop("flush", False)
_print(*args, **kwargs)
if flush and fp is not None:
fp.flush()
_add_doc(reraise, """Reraise an exception.""")
if sys.version_info[0:2] < (3, 4):
# This does exactly the same what the :func:`py3:functools.update_wrapper`
# function does on Python versions after 3.2. It sets the ``__wrapped__``
# attribute on ``wrapper`` object and it doesn't raise an error if any of
# the attributes mentioned in ``assigned`` and ``updated`` are missing on
# ``wrapped`` object.
def _update_wrapper(wrapper, wrapped,
assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
for attr in assigned:
try:
value = getattr(wrapped, attr)
except AttributeError:
continue
else:
setattr(wrapper, attr, value)
for attr in updated:
getattr(wrapper, attr).update(getattr(wrapped, attr, {}))
wrapper.__wrapped__ = wrapped
return wrapper
_update_wrapper.__doc__ = functools.update_wrapper.__doc__
def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS,
updated=functools.WRAPPER_UPDATES):
return functools.partial(_update_wrapper, wrapped=wrapped,
assigned=assigned, updated=updated)
wraps.__doc__ = functools.wraps.__doc__
else:
wraps = functools.wraps
def with_metaclass(meta, *bases):
"""Create a base class with a metaclass."""
# This requires a bit of explanation: the basic idea is to make a dummy
# metaclass for one level of class instantiation that replaces itself with
# the actual metaclass.
class metaclass(type):
def __new__(cls, name, this_bases, d):
if sys.version_info[:2] >= (3, 7):
# This version introduced PEP 560 that requires a bit
# of extra care (we mimic what is done by __build_class__).
resolved_bases = types.resolve_bases(bases)
if resolved_bases is not bases:
d['__orig_bases__'] = bases
else:
resolved_bases = bases
return meta(name, resolved_bases, d)
@classmethod
def __prepare__(cls, name, this_bases):
return meta.__prepare__(name, bases)
return type.__new__(metaclass, 'temporary_class', (), {})
def add_metaclass(metaclass):
"""Class decorator for creating a class with a metaclass."""
def wrapper(cls):
orig_vars = cls.__dict__.copy()
slots = orig_vars.get('__slots__')
if slots is not None:
if isinstance(slots, str):
slots = [slots]
for slots_var in slots:
orig_vars.pop(slots_var)
orig_vars.pop('__dict__', None)
orig_vars.pop('__weakref__', None)
if hasattr(cls, '__qualname__'):
orig_vars['__qualname__'] = cls.__qualname__
return metaclass(cls.__name__, cls.__bases__, orig_vars)
return wrapper
def ensure_binary(s, encoding='utf-8', errors='strict'):
"""Coerce **s** to six.binary_type.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> encoded to `bytes`
- `bytes` -> `bytes`
"""
if isinstance(s, binary_type):
return s
if isinstance(s, text_type):
return s.encode(encoding, errors)
raise TypeError("not expecting type '%s'" % type(s))
def ensure_str(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to `str`.
For Python 2:
- `unicode` -> encoded to `str`
- `str` -> `str`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
# Optimization: Fast return for the common case.
if type(s) is str:
return s
if PY2 and isinstance(s, text_type):
return s.encode(encoding, errors)
elif PY3 and isinstance(s, binary_type):
return s.decode(encoding, errors)
elif not isinstance(s, (text_type, binary_type)):
raise TypeError("not expecting type '%s'" % type(s))
return s
def ensure_text(s, encoding='utf-8', errors='strict'):
"""Coerce *s* to six.text_type.
For Python 2:
- `unicode` -> `unicode`
- `str` -> `unicode`
For Python 3:
- `str` -> `str`
- `bytes` -> decoded to `str`
"""
if isinstance(s, binary_type):
return s.decode(encoding, errors)
elif isinstance(s, text_type):
return s
else:
raise TypeError("not expecting type '%s'" % type(s))
def python_2_unicode_compatible(klass):
"""
A class decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if PY2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
# Complete the moves implementation.
# This code is at the end of this module to speed up module loading.
# Turn this module into a package.
__path__ = [] # required for PEP 302 and PEP 451
__package__ = __name__ # see PEP 366 @ReservedAssignment
if globals().get("__spec__") is not None:
__spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable
# Remove other six meta path importers, since they cause problems. This can
# happen if six is removed from sys.modules and then reloaded. (Setuptools does
# this for some reason.)
if sys.meta_path:
for i, importer in enumerate(sys.meta_path):
# Here's some real nastiness: Another "instance" of the six module might
# be floating around. Therefore, we can't use isinstance() to check for
# the six meta path importer, since the other six instance will have
# inserted an importer with different class.
if (type(importer).__name__ == "_SixMetaPathImporter" and
importer.name == __name__):
del sys.meta_path[i]
break
del i, importer
# Finally, add the importer to the meta path import hook.
sys.meta_path.append(_importer)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11 | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/typing_extensions.py | import abc
import collections
import collections.abc
import functools
import inspect
import operator
import sys
import types as _types
import typing
import warnings
__all__ = [
# Super-special typing primitives.
'Any',
'ClassVar',
'Concatenate',
'Final',
'LiteralString',
'ParamSpec',
'ParamSpecArgs',
'ParamSpecKwargs',
'Self',
'Type',
'TypeVar',
'TypeVarTuple',
'Unpack',
# ABCs (from collections.abc).
'Awaitable',
'AsyncIterator',
'AsyncIterable',
'Coroutine',
'AsyncGenerator',
'AsyncContextManager',
'Buffer',
'ChainMap',
# Concrete collection types.
'ContextManager',
'Counter',
'Deque',
'DefaultDict',
'NamedTuple',
'OrderedDict',
'TypedDict',
# Structural checks, a.k.a. protocols.
'SupportsAbs',
'SupportsBytes',
'SupportsComplex',
'SupportsFloat',
'SupportsIndex',
'SupportsInt',
'SupportsRound',
# One-off things.
'Annotated',
'assert_never',
'assert_type',
'clear_overloads',
'dataclass_transform',
'deprecated',
'Doc',
'get_overloads',
'final',
'get_args',
'get_origin',
'get_original_bases',
'get_protocol_members',
'get_type_hints',
'IntVar',
'is_protocol',
'is_typeddict',
'Literal',
'NewType',
'overload',
'override',
'Protocol',
'reveal_type',
'runtime',
'runtime_checkable',
'Text',
'TypeAlias',
'TypeAliasType',
'TypeGuard',
'TYPE_CHECKING',
'Never',
'NoReturn',
'ReadOnly',
'Required',
'NotRequired',
# Pure aliases, have always been in typing
'AbstractSet',
'AnyStr',
'BinaryIO',
'Callable',
'Collection',
'Container',
'Dict',
'ForwardRef',
'FrozenSet',
'Generator',
'Generic',
'Hashable',
'IO',
'ItemsView',
'Iterable',
'Iterator',
'KeysView',
'List',
'Mapping',
'MappingView',
'Match',
'MutableMapping',
'MutableSequence',
'MutableSet',
'Optional',
'Pattern',
'Reversible',
'Sequence',
'Set',
'Sized',
'TextIO',
'Tuple',
'Union',
'ValuesView',
'cast',
'no_type_check',
'no_type_check_decorator',
]
# for backward compatibility
PEP_560 = True
GenericMeta = type
# The functions below are modified copies of typing internal helpers.
# They are needed by _ProtocolMeta and they provide support for PEP 646.
class _Sentinel:
def __repr__(self):
return "<sentinel>"
_marker = _Sentinel()
def _check_generic(cls, parameters, elen=_marker):
"""Check correct count for parameters of a generic cls (internal helper).
This gives a nice error message in case of count mismatch.
"""
if not elen:
raise TypeError(f"{cls} is not a generic class")
if elen is _marker:
if not hasattr(cls, "__parameters__") or not cls.__parameters__:
raise TypeError(f"{cls} is not a generic class")
elen = len(cls.__parameters__)
alen = len(parameters)
if alen != elen:
if hasattr(cls, "__parameters__"):
parameters = [p for p in cls.__parameters__ if not _is_unpack(p)]
num_tv_tuples = sum(isinstance(p, TypeVarTuple) for p in parameters)
if (num_tv_tuples > 0) and (alen >= elen - num_tv_tuples):
return
raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};"
f" actual {alen}, expected {elen}")
if sys.version_info >= (3, 10):
def _should_collect_from_parameters(t):
return isinstance(
t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType)
)
elif sys.version_info >= (3, 9):
def _should_collect_from_parameters(t):
return isinstance(t, (typing._GenericAlias, _types.GenericAlias))
else:
def _should_collect_from_parameters(t):
return isinstance(t, typing._GenericAlias) and not t._special
def _collect_type_vars(types, typevar_types=None):
"""Collect all type variable contained in types in order of
first appearance (lexicographic order). For example::
_collect_type_vars((T, List[S, T])) == (T, S)
"""
if typevar_types is None:
typevar_types = typing.TypeVar
tvars = []
for t in types:
if (
isinstance(t, typevar_types) and
t not in tvars and
not _is_unpack(t)
):
tvars.append(t)
if _should_collect_from_parameters(t):
tvars.extend([t for t in t.__parameters__ if t not in tvars])
return tuple(tvars)
NoReturn = typing.NoReturn
# Some unconstrained type variables. These are used by the container types.
# (These are not for export.)
T = typing.TypeVar('T') # Any type.
KT = typing.TypeVar('KT') # Key type.
VT = typing.TypeVar('VT') # Value type.
T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers.
T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant.
if sys.version_info >= (3, 11):
from typing import Any
else:
class _AnyMeta(type):
def __instancecheck__(self, obj):
if self is Any:
raise TypeError("typing_extensions.Any cannot be used with isinstance()")
return super().__instancecheck__(obj)
def __repr__(self):
if self is Any:
return "typing_extensions.Any"
return super().__repr__()
class Any(metaclass=_AnyMeta):
"""Special type indicating an unconstrained type.
- Any is compatible with every type.
- Any assumed to have all methods.
- All values assumed to be instances of Any.
Note that all the above statements are true from the point of view of
static type checkers. At runtime, Any should not be used with instance
checks.
"""
def __new__(cls, *args, **kwargs):
if cls is Any:
raise TypeError("Any cannot be instantiated")
return super().__new__(cls, *args, **kwargs)
ClassVar = typing.ClassVar
class _ExtensionsSpecialForm(typing._SpecialForm, _root=True):
def __repr__(self):
return 'typing_extensions.' + self._name
Final = typing.Final
if sys.version_info >= (3, 11):
final = typing.final
else:
# @final exists in 3.8+, but we backport it for all versions
# before 3.11 to keep support for the __final__ attribute.
# See https://bugs.python.org/issue46342
def final(f):
"""This decorator can be used to indicate to type checkers that
the decorated method cannot be overridden, and decorated class
cannot be subclassed. For example:
class Base:
@final
def done(self) -> None:
...
class Sub(Base):
def done(self) -> None: # Error reported by type checker
...
@final
class Leaf:
...
class Other(Leaf): # Error reported by type checker
...
There is no runtime checking of these properties. The decorator
sets the ``__final__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
"""
try:
f.__final__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return f
def IntVar(name):
return typing.TypeVar(name)
# A Literal bug was fixed in 3.11.0, 3.10.1 and 3.9.8
if sys.version_info >= (3, 10, 1):
Literal = typing.Literal
else:
def _flatten_literal_params(parameters):
"""An internal helper for Literal creation: flatten Literals among parameters"""
params = []
for p in parameters:
if isinstance(p, _LiteralGenericAlias):
params.extend(p.__args__)
else:
params.append(p)
return tuple(params)
def _value_and_type_iter(params):
for p in params:
yield p, type(p)
class _LiteralGenericAlias(typing._GenericAlias, _root=True):
def __eq__(self, other):
if not isinstance(other, _LiteralGenericAlias):
return NotImplemented
these_args_deduped = set(_value_and_type_iter(self.__args__))
other_args_deduped = set(_value_and_type_iter(other.__args__))
return these_args_deduped == other_args_deduped
def __hash__(self):
return hash(frozenset(_value_and_type_iter(self.__args__)))
class _LiteralForm(_ExtensionsSpecialForm, _root=True):
def __init__(self, doc: str):
self._name = 'Literal'
self._doc = self.__doc__ = doc
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
parameters = _flatten_literal_params(parameters)
val_type_pairs = list(_value_and_type_iter(parameters))
try:
deduped_pairs = set(val_type_pairs)
except TypeError:
# unhashable parameters
pass
else:
# similar logic to typing._deduplicate on Python 3.9+
if len(deduped_pairs) < len(val_type_pairs):
new_parameters = []
for pair in val_type_pairs:
if pair in deduped_pairs:
new_parameters.append(pair[0])
deduped_pairs.remove(pair)
assert not deduped_pairs, deduped_pairs
parameters = tuple(new_parameters)
return _LiteralGenericAlias(self, parameters)
Literal = _LiteralForm(doc="""\
A type that can be used to indicate to type checkers
that the corresponding value has a value literally equivalent
to the provided parameter. For example:
var: Literal[4] = 4
The type checker understands that 'var' is literally equal to
the value 4 and no other value.
Literal[...] cannot be subclassed. There is no runtime
checking verifying that the parameter is actually a value
instead of a type.""")
_overload_dummy = typing._overload_dummy
if hasattr(typing, "get_overloads"): # 3.11+
overload = typing.overload
get_overloads = typing.get_overloads
clear_overloads = typing.clear_overloads
else:
# {module: {qualname: {firstlineno: func}}}
_overload_registry = collections.defaultdict(
functools.partial(collections.defaultdict, dict)
)
def overload(func):
"""Decorator for overloaded functions/methods.
In a stub file, place two or more stub definitions for the same
function in a row, each decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
In a non-stub file (i.e. a regular .py file), do the same but
follow it with an implementation. The implementation should *not*
be decorated with @overload. For example:
@overload
def utf8(value: None) -> None: ...
@overload
def utf8(value: bytes) -> bytes: ...
@overload
def utf8(value: str) -> bytes: ...
def utf8(value):
# implementation goes here
The overloads for a function can be retrieved at runtime using the
get_overloads() function.
"""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
try:
_overload_registry[f.__module__][f.__qualname__][
f.__code__.co_firstlineno
] = func
except AttributeError:
# Not a normal function; ignore.
pass
return _overload_dummy
def get_overloads(func):
"""Return all defined overloads for *func* as a sequence."""
# classmethod and staticmethod
f = getattr(func, "__func__", func)
if f.__module__ not in _overload_registry:
return []
mod_dict = _overload_registry[f.__module__]
if f.__qualname__ not in mod_dict:
return []
return list(mod_dict[f.__qualname__].values())
def clear_overloads():
"""Clear all overloads in the registry."""
_overload_registry.clear()
# This is not a real generic class. Don't use outside annotations.
Type = typing.Type
# Various ABCs mimicking those in collections.abc.
# A few are simply re-exported for completeness.
Awaitable = typing.Awaitable
Coroutine = typing.Coroutine
AsyncIterable = typing.AsyncIterable
AsyncIterator = typing.AsyncIterator
Deque = typing.Deque
ContextManager = typing.ContextManager
AsyncContextManager = typing.AsyncContextManager
DefaultDict = typing.DefaultDict
OrderedDict = typing.OrderedDict
Counter = typing.Counter
ChainMap = typing.ChainMap
AsyncGenerator = typing.AsyncGenerator
Text = typing.Text
TYPE_CHECKING = typing.TYPE_CHECKING
_PROTO_ALLOWLIST = {
'collections.abc': [
'Callable', 'Awaitable', 'Iterable', 'Iterator', 'AsyncIterable',
'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', 'Buffer',
],
'contextlib': ['AbstractContextManager', 'AbstractAsyncContextManager'],
'typing_extensions': ['Buffer'],
}
_EXCLUDED_ATTRS = {
"__abstractmethods__", "__annotations__", "__weakref__", "_is_protocol",
"_is_runtime_protocol", "__dict__", "__slots__", "__parameters__",
"__orig_bases__", "__module__", "_MutableMapping__marker", "__doc__",
"__subclasshook__", "__orig_class__", "__init__", "__new__",
"__protocol_attrs__", "__callable_proto_members_only__",
"__match_args__",
}
if sys.version_info >= (3, 9):
_EXCLUDED_ATTRS.add("__class_getitem__")
if sys.version_info >= (3, 12):
_EXCLUDED_ATTRS.add("__type_params__")
_EXCLUDED_ATTRS = frozenset(_EXCLUDED_ATTRS)
def _get_protocol_attrs(cls):
attrs = set()
for base in cls.__mro__[:-1]: # without object
if base.__name__ in {'Protocol', 'Generic'}:
continue
annotations = getattr(base, '__annotations__', {})
for attr in (*base.__dict__, *annotations):
if (not attr.startswith('_abc_') and attr not in _EXCLUDED_ATTRS):
attrs.add(attr)
return attrs
def _caller(depth=2):
try:
return sys._getframe(depth).f_globals.get('__name__', '__main__')
except (AttributeError, ValueError): # For platforms without _getframe()
return None
# `__match_args__` attribute was removed from protocol members in 3.13,
# we want to backport this change to older Python versions.
if sys.version_info >= (3, 13):
Protocol = typing.Protocol
else:
def _allow_reckless_class_checks(depth=3):
"""Allow instance and class checks for special stdlib modules.
The abc and functools modules indiscriminately call isinstance() and
issubclass() on the whole MRO of a user class, which may contain protocols.
"""
return _caller(depth) in {'abc', 'functools', None}
def _no_init(self, *args, **kwargs):
if type(self)._is_protocol:
raise TypeError('Protocols cannot be instantiated')
# Inheriting from typing._ProtocolMeta isn't actually desirable,
# but is necessary to allow typing.Protocol and typing_extensions.Protocol
# to mix without getting TypeErrors about "metaclass conflict"
class _ProtocolMeta(type(typing.Protocol)):
# This metaclass is somewhat unfortunate,
# but is necessary for several reasons...
#
# NOTE: DO NOT call super() in any methods in this class
# That would call the methods on typing._ProtocolMeta on Python 3.8-3.11
# and those are slow
def __new__(mcls, name, bases, namespace, **kwargs):
if name == "Protocol" and len(bases) < 2:
pass
elif {Protocol, typing.Protocol} & set(bases):
for base in bases:
if not (
base in {object, typing.Generic, Protocol, typing.Protocol}
or base.__name__ in _PROTO_ALLOWLIST.get(base.__module__, [])
or is_protocol(base)
):
raise TypeError(
f"Protocols can only inherit from other protocols, "
f"got {base!r}"
)
return abc.ABCMeta.__new__(mcls, name, bases, namespace, **kwargs)
def __init__(cls, *args, **kwargs):
abc.ABCMeta.__init__(cls, *args, **kwargs)
if getattr(cls, "_is_protocol", False):
cls.__protocol_attrs__ = _get_protocol_attrs(cls)
# PEP 544 prohibits using issubclass()
# with protocols that have non-method members.
cls.__callable_proto_members_only__ = all(
callable(getattr(cls, attr, None)) for attr in cls.__protocol_attrs__
)
def __subclasscheck__(cls, other):
if cls is Protocol:
return type.__subclasscheck__(cls, other)
if (
getattr(cls, '_is_protocol', False)
and not _allow_reckless_class_checks()
):
if not isinstance(other, type):
# Same error message as for issubclass(1, int).
raise TypeError('issubclass() arg 1 must be a class')
if (
not cls.__callable_proto_members_only__
and cls.__dict__.get("__subclasshook__") is _proto_hook
):
non_method_attrs = sorted(
attr for attr in cls.__protocol_attrs__
if not callable(getattr(cls, attr, None))
)
raise TypeError(
"Protocols with non-method members don't support issubclass()."
f" Non-method members: {str(non_method_attrs)[1:-1]}."
)
if not getattr(cls, '_is_runtime_protocol', False):
raise TypeError(
"Instance and class checks can only be used with "
"@runtime_checkable protocols"
)
return abc.ABCMeta.__subclasscheck__(cls, other)
def __instancecheck__(cls, instance):
# We need this method for situations where attributes are
# assigned in __init__.
if cls is Protocol:
return type.__instancecheck__(cls, instance)
if not getattr(cls, "_is_protocol", False):
# i.e., it's a concrete subclass of a protocol
return abc.ABCMeta.__instancecheck__(cls, instance)
if (
not getattr(cls, '_is_runtime_protocol', False) and
not _allow_reckless_class_checks()
):
raise TypeError("Instance and class checks can only be used with"
" @runtime_checkable protocols")
if abc.ABCMeta.__instancecheck__(cls, instance):
return True
for attr in cls.__protocol_attrs__:
try:
val = inspect.getattr_static(instance, attr)
except AttributeError:
break
if val is None and callable(getattr(cls, attr, None)):
break
else:
return True
return False
def __eq__(cls, other):
# Hack so that typing.Generic.__class_getitem__
# treats typing_extensions.Protocol
# as equivalent to typing.Protocol
if abc.ABCMeta.__eq__(cls, other) is True:
return True
return cls is Protocol and other is typing.Protocol
# This has to be defined, or the abc-module cache
# complains about classes with this metaclass being unhashable,
# if we define only __eq__!
def __hash__(cls) -> int:
return type.__hash__(cls)
@classmethod
def _proto_hook(cls, other):
if not cls.__dict__.get('_is_protocol', False):
return NotImplemented
for attr in cls.__protocol_attrs__:
for base in other.__mro__:
# Check if the members appears in the class dictionary...
if attr in base.__dict__:
if base.__dict__[attr] is None:
return NotImplemented
break
# ...or in annotations, if it is a sub-protocol.
annotations = getattr(base, '__annotations__', {})
if (
isinstance(annotations, collections.abc.Mapping)
and attr in annotations
and is_protocol(other)
):
break
else:
return NotImplemented
return True
class Protocol(typing.Generic, metaclass=_ProtocolMeta):
__doc__ = typing.Protocol.__doc__
__slots__ = ()
_is_protocol = True
_is_runtime_protocol = False
def __init_subclass__(cls, *args, **kwargs):
super().__init_subclass__(*args, **kwargs)
# Determine if this is a protocol or a concrete subclass.
if not cls.__dict__.get('_is_protocol', False):
cls._is_protocol = any(b is Protocol for b in cls.__bases__)
# Set (or override) the protocol subclass hook.
if '__subclasshook__' not in cls.__dict__:
cls.__subclasshook__ = _proto_hook
# Prohibit instantiation for protocol classes
if cls._is_protocol and cls.__init__ is Protocol.__init__:
cls.__init__ = _no_init
# The "runtime" alias exists for backwards compatibility.
runtime = runtime_checkable = typing.runtime_checkable
# Our version of runtime-checkable protocols is faster on Python 3.8-3.11
if sys.version_info >= (3, 12):
SupportsInt = typing.SupportsInt
SupportsFloat = typing.SupportsFloat
SupportsComplex = typing.SupportsComplex
SupportsBytes = typing.SupportsBytes
SupportsIndex = typing.SupportsIndex
SupportsAbs = typing.SupportsAbs
SupportsRound = typing.SupportsRound
else:
@runtime_checkable
class SupportsInt(Protocol):
"""An ABC with one abstract method __int__."""
__slots__ = ()
@abc.abstractmethod
def __int__(self) -> int:
pass
@runtime_checkable
class SupportsFloat(Protocol):
"""An ABC with one abstract method __float__."""
__slots__ = ()
@abc.abstractmethod
def __float__(self) -> float:
pass
@runtime_checkable
class SupportsComplex(Protocol):
"""An ABC with one abstract method __complex__."""
__slots__ = ()
@abc.abstractmethod
def __complex__(self) -> complex:
pass
@runtime_checkable
class SupportsBytes(Protocol):
"""An ABC with one abstract method __bytes__."""
__slots__ = ()
@abc.abstractmethod
def __bytes__(self) -> bytes:
pass
@runtime_checkable
class SupportsIndex(Protocol):
__slots__ = ()
@abc.abstractmethod
def __index__(self) -> int:
pass
@runtime_checkable
class SupportsAbs(Protocol[T_co]):
"""
An ABC with one abstract method __abs__ that is covariant in its return type.
"""
__slots__ = ()
@abc.abstractmethod
def __abs__(self) -> T_co:
pass
@runtime_checkable
class SupportsRound(Protocol[T_co]):
"""
An ABC with one abstract method __round__ that is covariant in its return type.
"""
__slots__ = ()
@abc.abstractmethod
def __round__(self, ndigits: int = 0) -> T_co:
pass
def _ensure_subclassable(mro_entries):
def inner(func):
if sys.implementation.name == "pypy" and sys.version_info < (3, 9):
cls_dict = {
"__call__": staticmethod(func),
"__mro_entries__": staticmethod(mro_entries)
}
t = type(func.__name__, (), cls_dict)
return functools.update_wrapper(t(), func)
else:
func.__mro_entries__ = mro_entries
return func
return inner
if hasattr(typing, "ReadOnly"):
# The standard library TypedDict in Python 3.8 does not store runtime information
# about which (if any) keys are optional. See https://bugs.python.org/issue38834
# The standard library TypedDict in Python 3.9.0/1 does not honour the "total"
# keyword with old-style TypedDict(). See https://bugs.python.org/issue42059
# The standard library TypedDict below Python 3.11 does not store runtime
# information about optional and required keys when using Required or NotRequired.
# Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11.
# Aaaand on 3.12 we add __orig_bases__ to TypedDict
# to enable better runtime introspection.
# On 3.13 we deprecate some odd ways of creating TypedDicts.
# PEP 705 proposes adding the ReadOnly[] qualifier.
TypedDict = typing.TypedDict
_TypedDictMeta = typing._TypedDictMeta
is_typeddict = typing.is_typeddict
else:
# 3.10.0 and later
_TAKES_MODULE = "module" in inspect.signature(typing._type_check).parameters
def _get_typeddict_qualifiers(annotation_type):
while True:
annotation_origin = get_origin(annotation_type)
if annotation_origin is Annotated:
annotation_args = get_args(annotation_type)
if annotation_args:
annotation_type = annotation_args[0]
else:
break
elif annotation_origin is Required:
yield Required
annotation_type, = get_args(annotation_type)
elif annotation_origin is NotRequired:
yield NotRequired
annotation_type, = get_args(annotation_type)
elif annotation_origin is ReadOnly:
yield ReadOnly
annotation_type, = get_args(annotation_type)
else:
break
class _TypedDictMeta(type):
def __new__(cls, name, bases, ns, *, total=True):
"""Create new typed dict class object.
This method is called when TypedDict is subclassed,
or when TypedDict is instantiated. This way
TypedDict supports all three syntax forms described in its docstring.
Subclasses and instances of TypedDict return actual dictionaries.
"""
for base in bases:
if type(base) is not _TypedDictMeta and base is not typing.Generic:
raise TypeError('cannot inherit from both a TypedDict type '
'and a non-TypedDict base class')
if any(issubclass(b, typing.Generic) for b in bases):
generic_base = (typing.Generic,)
else:
generic_base = ()
# typing.py generally doesn't let you inherit from plain Generic, unless
# the name of the class happens to be "Protocol"
tp_dict = type.__new__(_TypedDictMeta, "Protocol", (*generic_base, dict), ns)
tp_dict.__name__ = name
if tp_dict.__qualname__ == "Protocol":
tp_dict.__qualname__ = name
if not hasattr(tp_dict, '__orig_bases__'):
tp_dict.__orig_bases__ = bases
annotations = {}
own_annotations = ns.get('__annotations__', {})
msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type"
if _TAKES_MODULE:
own_annotations = {
n: typing._type_check(tp, msg, module=tp_dict.__module__)
for n, tp in own_annotations.items()
}
else:
own_annotations = {
n: typing._type_check(tp, msg)
for n, tp in own_annotations.items()
}
required_keys = set()
optional_keys = set()
readonly_keys = set()
mutable_keys = set()
for base in bases:
base_dict = base.__dict__
annotations.update(base_dict.get('__annotations__', {}))
required_keys.update(base_dict.get('__required_keys__', ()))
optional_keys.update(base_dict.get('__optional_keys__', ()))
readonly_keys.update(base_dict.get('__readonly_keys__', ()))
mutable_keys.update(base_dict.get('__mutable_keys__', ()))
annotations.update(own_annotations)
for annotation_key, annotation_type in own_annotations.items():
qualifiers = set(_get_typeddict_qualifiers(annotation_type))
if Required in qualifiers:
required_keys.add(annotation_key)
elif NotRequired in qualifiers:
optional_keys.add(annotation_key)
elif total:
required_keys.add(annotation_key)
else:
optional_keys.add(annotation_key)
if ReadOnly in qualifiers:
if annotation_key in mutable_keys:
raise TypeError(
f"Cannot override mutable key {annotation_key!r}"
" with read-only key"
)
readonly_keys.add(annotation_key)
else:
mutable_keys.add(annotation_key)
readonly_keys.discard(annotation_key)
tp_dict.__annotations__ = annotations
tp_dict.__required_keys__ = frozenset(required_keys)
tp_dict.__optional_keys__ = frozenset(optional_keys)
tp_dict.__readonly_keys__ = frozenset(readonly_keys)
tp_dict.__mutable_keys__ = frozenset(mutable_keys)
if not hasattr(tp_dict, '__total__'):
tp_dict.__total__ = total
return tp_dict
__call__ = dict # static method
def __subclasscheck__(cls, other):
# Typed dicts are only for static structural subtyping.
raise TypeError('TypedDict does not support instance and class checks')
__instancecheck__ = __subclasscheck__
_TypedDict = type.__new__(_TypedDictMeta, 'TypedDict', (), {})
@_ensure_subclassable(lambda bases: (_TypedDict,))
def TypedDict(typename, fields=_marker, /, *, total=True, **kwargs):
"""A simple typed namespace. At runtime it is equivalent to a plain dict.
TypedDict creates a dictionary type such that a type checker will expect all
instances to have a certain set of keys, where each key is
associated with a value of a consistent type. This expectation
is not checked at runtime.
Usage::
class Point2D(TypedDict):
x: int
y: int
label: str
a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK
b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check
assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first')
The type info can be accessed via the Point2D.__annotations__ dict, and
the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets.
TypedDict supports an additional equivalent form::
Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str})
By default, all keys must be present in a TypedDict. It is possible
to override this by specifying totality::
class Point2D(TypedDict, total=False):
x: int
y: int
This means that a Point2D TypedDict can have any of the keys omitted. A type
checker is only expected to support a literal False or True as the value of
the total argument. True is the default, and makes all items defined in the
class body be required.
The Required and NotRequired special forms can also be used to mark
individual keys as being required or not required::
class Point2D(TypedDict):
x: int # the "x" key must always be present (Required is the default)
y: NotRequired[int] # the "y" key can be omitted
See PEP 655 for more details on Required and NotRequired.
"""
if fields is _marker or fields is None:
if fields is _marker:
deprecated_thing = "Failing to pass a value for the 'fields' parameter"
else:
deprecated_thing = "Passing `None` as the 'fields' parameter"
example = f"`{typename} = TypedDict({typename!r}, {{}})`"
deprecation_msg = (
f"{deprecated_thing} is deprecated and will be disallowed in "
"Python 3.15. To create a TypedDict class with 0 fields "
"using the functional syntax, pass an empty dictionary, e.g. "
) + example + "."
warnings.warn(deprecation_msg, DeprecationWarning, stacklevel=2)
fields = kwargs
elif kwargs:
raise TypeError("TypedDict takes either a dict or keyword arguments,"
" but not both")
if kwargs:
if sys.version_info >= (3, 13):
raise TypeError("TypedDict takes no keyword arguments")
warnings.warn(
"The kwargs-based syntax for TypedDict definitions is deprecated "
"in Python 3.11, will be removed in Python 3.13, and may not be "
"understood by third-party type checkers.",
DeprecationWarning,
stacklevel=2,
)
ns = {'__annotations__': dict(fields)}
module = _caller()
if module is not None:
# Setting correct module is necessary to make typed dict classes pickleable.
ns['__module__'] = module
td = _TypedDictMeta(typename, (), ns, total=total)
td.__orig_bases__ = (TypedDict,)
return td
if hasattr(typing, "_TypedDictMeta"):
_TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta)
else:
_TYPEDDICT_TYPES = (_TypedDictMeta,)
def is_typeddict(tp):
"""Check if an annotation is a TypedDict class
For example::
class Film(TypedDict):
title: str
year: int
is_typeddict(Film) # => True
is_typeddict(Union[list, str]) # => False
"""
# On 3.8, this would otherwise return True
if hasattr(typing, "TypedDict") and tp is typing.TypedDict:
return False
return isinstance(tp, _TYPEDDICT_TYPES)
if hasattr(typing, "assert_type"):
assert_type = typing.assert_type
else:
def assert_type(val, typ, /):
"""Assert (to the type checker) that the value is of the given type.
When the type checker encounters a call to assert_type(), it
emits an error if the value is not of the specified type::
def greet(name: str) -> None:
assert_type(name, str) # ok
assert_type(name, int) # type checker error
At runtime this returns the first argument unchanged and otherwise
does nothing.
"""
return val
if hasattr(typing, "Required"): # 3.11+
get_type_hints = typing.get_type_hints
else: # <=3.10
# replaces _strip_annotations()
def _strip_extras(t):
"""Strips Annotated, Required and NotRequired from a given type."""
if isinstance(t, _AnnotatedAlias):
return _strip_extras(t.__origin__)
if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired):
return _strip_extras(t.__args__[0])
if isinstance(t, typing._GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return t.copy_with(stripped_args)
if hasattr(_types, "GenericAlias") and isinstance(t, _types.GenericAlias):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return _types.GenericAlias(t.__origin__, stripped_args)
if hasattr(_types, "UnionType") and isinstance(t, _types.UnionType):
stripped_args = tuple(_strip_extras(a) for a in t.__args__)
if stripped_args == t.__args__:
return t
return functools.reduce(operator.or_, stripped_args)
return t
def get_type_hints(obj, globalns=None, localns=None, include_extras=False):
"""Return type hints for an object.
This is often the same as obj.__annotations__, but it handles
forward references encoded as string literals, adds Optional[t] if a
default value equal to None is set and recursively replaces all
'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T'
(unless 'include_extras=True').
The argument may be a module, class, method, or function. The annotations
are returned as a dictionary. For classes, annotations include also
inherited members.
TypeError is raised if the argument is not of a type that can contain
annotations, and an empty dictionary is returned if no annotations are
present.
BEWARE -- the behavior of globalns and localns is counterintuitive
(unless you are familiar with how eval() and exec() work). The
search order is locals first, then globals.
- If no dict arguments are passed, an attempt is made to use the
globals from obj (or the respective module's globals for classes),
and these are also used as the locals. If the object does not appear
to have globals, an empty dictionary is used.
- If one dict argument is passed, it is used for both globals and
locals.
- If two dict arguments are passed, they specify globals and
locals, respectively.
"""
if hasattr(typing, "Annotated"): # 3.9+
hint = typing.get_type_hints(
obj, globalns=globalns, localns=localns, include_extras=True
)
else: # 3.8
hint = typing.get_type_hints(obj, globalns=globalns, localns=localns)
if include_extras:
return hint
return {k: _strip_extras(t) for k, t in hint.items()}
# Python 3.9+ has PEP 593 (Annotated)
if hasattr(typing, 'Annotated'):
Annotated = typing.Annotated
# Not exported and not a public API, but needed for get_origin() and get_args()
# to work.
_AnnotatedAlias = typing._AnnotatedAlias
# 3.8
else:
class _AnnotatedAlias(typing._GenericAlias, _root=True):
"""Runtime representation of an annotated type.
At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't'
with extra annotations. The alias behaves like a normal typing alias,
instantiating is the same as instantiating the underlying type, binding
it to types is also the same.
"""
def __init__(self, origin, metadata):
if isinstance(origin, _AnnotatedAlias):
metadata = origin.__metadata__ + metadata
origin = origin.__origin__
super().__init__(origin, origin)
self.__metadata__ = metadata
def copy_with(self, params):
assert len(params) == 1
new_type = params[0]
return _AnnotatedAlias(new_type, self.__metadata__)
def __repr__(self):
return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, "
f"{', '.join(repr(a) for a in self.__metadata__)}]")
def __reduce__(self):
return operator.getitem, (
Annotated, (self.__origin__,) + self.__metadata__
)
def __eq__(self, other):
if not isinstance(other, _AnnotatedAlias):
return NotImplemented
if self.__origin__ != other.__origin__:
return False
return self.__metadata__ == other.__metadata__
def __hash__(self):
return hash((self.__origin__, self.__metadata__))
class Annotated:
"""Add context specific metadata to a type.
Example: Annotated[int, runtime_check.Unsigned] indicates to the
hypothetical runtime_check module that this type is an unsigned int.
Every other consumer of this type can ignore this metadata and treat
this type as int.
The first argument to Annotated must be a valid type (and will be in
the __origin__ field), the remaining arguments are kept as a tuple in
the __extra__ field.
Details:
- It's an error to call `Annotated` with less than two arguments.
- Nested Annotated are flattened::
Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3]
- Instantiating an annotated type is equivalent to instantiating the
underlying type::
Annotated[C, Ann1](5) == C(5)
- Annotated can be used as a generic type alias::
Optimized = Annotated[T, runtime.Optimize()]
Optimized[int] == Annotated[int, runtime.Optimize()]
OptimizedList = Annotated[List[T], runtime.Optimize()]
OptimizedList[int] == Annotated[List[int], runtime.Optimize()]
"""
__slots__ = ()
def __new__(cls, *args, **kwargs):
raise TypeError("Type Annotated cannot be instantiated.")
@typing._tp_cache
def __class_getitem__(cls, params):
if not isinstance(params, tuple) or len(params) < 2:
raise TypeError("Annotated[...] should be used "
"with at least two arguments (a type and an "
"annotation).")
allowed_special_forms = (ClassVar, Final)
if get_origin(params[0]) in allowed_special_forms:
origin = params[0]
else:
msg = "Annotated[t, ...]: t must be a type."
origin = typing._type_check(params[0], msg)
metadata = tuple(params[1:])
return _AnnotatedAlias(origin, metadata)
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
f"Cannot subclass {cls.__module__}.Annotated"
)
# Python 3.8 has get_origin() and get_args() but those implementations aren't
# Annotated-aware, so we can't use those. Python 3.9's versions don't support
# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do.
if sys.version_info[:2] >= (3, 10):
get_origin = typing.get_origin
get_args = typing.get_args
# 3.8-3.9
else:
try:
# 3.9+
from typing import _BaseGenericAlias
except ImportError:
_BaseGenericAlias = typing._GenericAlias
try:
# 3.9+
from typing import GenericAlias as _typing_GenericAlias
except ImportError:
_typing_GenericAlias = typing._GenericAlias
def get_origin(tp):
"""Get the unsubscripted version of a type.
This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar
and Annotated. Return None for unsupported types. Examples::
get_origin(Literal[42]) is Literal
get_origin(int) is None
get_origin(ClassVar[int]) is ClassVar
get_origin(Generic) is Generic
get_origin(Generic[T]) is Generic
get_origin(Union[T, int]) is Union
get_origin(List[Tuple[T, T]][int]) == list
get_origin(P.args) is P
"""
if isinstance(tp, _AnnotatedAlias):
return Annotated
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias,
ParamSpecArgs, ParamSpecKwargs)):
return tp.__origin__
if tp is typing.Generic:
return typing.Generic
return None
def get_args(tp):
"""Get type arguments with all substitutions performed.
For unions, basic simplifications used by Union constructor are performed.
Examples::
get_args(Dict[str, int]) == (str, int)
get_args(int) == ()
get_args(Union[int, Union[T, int], str][int]) == (int, str)
get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int])
get_args(Callable[[], T][int]) == ([], int)
"""
if isinstance(tp, _AnnotatedAlias):
return (tp.__origin__,) + tp.__metadata__
if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)):
if getattr(tp, "_special", False):
return ()
res = tp.__args__
if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis:
res = (list(res[:-1]), res[-1])
return res
return ()
# 3.10+
if hasattr(typing, 'TypeAlias'):
TypeAlias = typing.TypeAlias
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def TypeAlias(self, parameters):
"""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example above.
"""
raise TypeError(f"{self} is not subscriptable")
# 3.8
else:
TypeAlias = _ExtensionsSpecialForm(
'TypeAlias',
doc="""Special marker indicating that an assignment should
be recognized as a proper type alias definition by type
checkers.
For example::
Predicate: TypeAlias = Callable[..., bool]
It's invalid when used anywhere except as in the example
above."""
)
def _set_default(type_param, default):
if isinstance(default, (tuple, list)):
type_param.__default__ = tuple((typing._type_check(d, "Default must be a type")
for d in default))
elif default != _marker:
if isinstance(type_param, ParamSpec) and default is ...: # ... not valid <3.11
type_param.__default__ = default
else:
type_param.__default__ = typing._type_check(default, "Default must be a type")
else:
type_param.__default__ = None
def _set_module(typevarlike):
# for pickling:
def_mod = _caller(depth=3)
if def_mod != 'typing_extensions':
typevarlike.__module__ = def_mod
class _DefaultMixin:
"""Mixin for TypeVarLike defaults."""
__slots__ = ()
__init__ = _set_default
# Classes using this metaclass must provide a _backported_typevarlike ClassVar
class _TypeVarLikeMeta(type):
def __instancecheck__(cls, __instance: Any) -> bool:
return isinstance(__instance, cls._backported_typevarlike)
# Add default and infer_variance parameters from PEP 696 and 695
class TypeVar(metaclass=_TypeVarLikeMeta):
"""Type variable."""
_backported_typevarlike = typing.TypeVar
def __new__(cls, name, *constraints, bound=None,
covariant=False, contravariant=False,
default=_marker, infer_variance=False):
if hasattr(typing, "TypeAliasType"):
# PEP 695 implemented (3.12+), can pass infer_variance to typing.TypeVar
typevar = typing.TypeVar(name, *constraints, bound=bound,
covariant=covariant, contravariant=contravariant,
infer_variance=infer_variance)
else:
typevar = typing.TypeVar(name, *constraints, bound=bound,
covariant=covariant, contravariant=contravariant)
if infer_variance and (covariant or contravariant):
raise ValueError("Variance cannot be specified with infer_variance.")
typevar.__infer_variance__ = infer_variance
_set_default(typevar, default)
_set_module(typevar)
return typevar
def __init_subclass__(cls) -> None:
raise TypeError(f"type '{__name__}.TypeVar' is not an acceptable base type")
# Python 3.10+ has PEP 612
if hasattr(typing, 'ParamSpecArgs'):
ParamSpecArgs = typing.ParamSpecArgs
ParamSpecKwargs = typing.ParamSpecKwargs
# 3.8-3.9
else:
class _Immutable:
"""Mixin to indicate that object should not be copied."""
__slots__ = ()
def __copy__(self):
return self
def __deepcopy__(self, memo):
return self
class ParamSpecArgs(_Immutable):
"""The args for a ParamSpec object.
Given a ParamSpec object P, P.args is an instance of ParamSpecArgs.
ParamSpecArgs objects have a reference back to their ParamSpec:
P.args.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.args"
def __eq__(self, other):
if not isinstance(other, ParamSpecArgs):
return NotImplemented
return self.__origin__ == other.__origin__
class ParamSpecKwargs(_Immutable):
"""The kwargs for a ParamSpec object.
Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs.
ParamSpecKwargs objects have a reference back to their ParamSpec:
P.kwargs.__origin__ is P
This type is meant for runtime introspection and has no special meaning to
static type checkers.
"""
def __init__(self, origin):
self.__origin__ = origin
def __repr__(self):
return f"{self.__origin__.__name__}.kwargs"
def __eq__(self, other):
if not isinstance(other, ParamSpecKwargs):
return NotImplemented
return self.__origin__ == other.__origin__
# 3.10+
if hasattr(typing, 'ParamSpec'):
# Add default parameter - PEP 696
class ParamSpec(metaclass=_TypeVarLikeMeta):
"""Parameter specification."""
_backported_typevarlike = typing.ParamSpec
def __new__(cls, name, *, bound=None,
covariant=False, contravariant=False,
infer_variance=False, default=_marker):
if hasattr(typing, "TypeAliasType"):
# PEP 695 implemented, can pass infer_variance to typing.TypeVar
paramspec = typing.ParamSpec(name, bound=bound,
covariant=covariant,
contravariant=contravariant,
infer_variance=infer_variance)
else:
paramspec = typing.ParamSpec(name, bound=bound,
covariant=covariant,
contravariant=contravariant)
paramspec.__infer_variance__ = infer_variance
_set_default(paramspec, default)
_set_module(paramspec)
return paramspec
def __init_subclass__(cls) -> None:
raise TypeError(f"type '{__name__}.ParamSpec' is not an acceptable base type")
# 3.8-3.9
else:
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class ParamSpec(list, _DefaultMixin):
"""Parameter specification variable.
Usage::
P = ParamSpec('P')
Parameter specification variables exist primarily for the benefit of static
type checkers. They are used to forward the parameter types of one
callable to another callable, a pattern commonly found in higher order
functions and decorators. They are only valid when used in ``Concatenate``,
or s the first argument to ``Callable``. In Python 3.10 and higher,
they are also supported in user-defined Generics at runtime.
See class Generic for more information on generic types. An
example for annotating a decorator::
T = TypeVar('T')
P = ParamSpec('P')
def add_logging(f: Callable[P, T]) -> Callable[P, T]:
'''A type-safe decorator to add logging to a function.'''
def inner(*args: P.args, **kwargs: P.kwargs) -> T:
logging.info(f'{f.__name__} was called')
return f(*args, **kwargs)
return inner
@add_logging
def add_two(x: float, y: float) -> float:
'''Add two numbers together.'''
return x + y
Parameter specification variables defined with covariant=True or
contravariant=True can be used to declare covariant or contravariant
generic types. These keyword arguments are valid, but their actual semantics
are yet to be decided. See PEP 612 for details.
Parameter specification variables can be introspected. e.g.:
P.__name__ == 'T'
P.__bound__ == None
P.__covariant__ == False
P.__contravariant__ == False
Note that only parameter specification variables defined in global scope can
be pickled.
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
@property
def args(self):
return ParamSpecArgs(self)
@property
def kwargs(self):
return ParamSpecKwargs(self)
def __init__(self, name, *, bound=None, covariant=False, contravariant=False,
infer_variance=False, default=_marker):
super().__init__([self])
self.__name__ = name
self.__covariant__ = bool(covariant)
self.__contravariant__ = bool(contravariant)
self.__infer_variance__ = bool(infer_variance)
if bound:
self.__bound__ = typing._type_check(bound, 'Bound must be a type.')
else:
self.__bound__ = None
_DefaultMixin.__init__(self, default)
# for pickling:
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __repr__(self):
if self.__infer_variance__:
prefix = ''
elif self.__covariant__:
prefix = '+'
elif self.__contravariant__:
prefix = '-'
else:
prefix = '~'
return prefix + self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
# Hack to get typing._type_check to pass.
def __call__(self, *args, **kwargs):
pass
# 3.8-3.9
if not hasattr(typing, 'Concatenate'):
# Inherits from list as a workaround for Callable checks in Python < 3.9.2.
class _ConcatenateGenericAlias(list):
# Trick Generic into looking into this for __parameters__.
__class__ = typing._GenericAlias
# Flag in 3.8.
_special = False
def __init__(self, origin, args):
super().__init__(args)
self.__origin__ = origin
self.__args__ = args
def __repr__(self):
_type_repr = typing._type_repr
return (f'{_type_repr(self.__origin__)}'
f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]')
def __hash__(self):
return hash((self.__origin__, self.__args__))
# Hack to get typing._type_check to pass in Generic.
def __call__(self, *args, **kwargs):
pass
@property
def __parameters__(self):
return tuple(
tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec))
)
# 3.8-3.9
@typing._tp_cache
def _concatenate_getitem(self, parameters):
if parameters == ():
raise TypeError("Cannot take a Concatenate of no types.")
if not isinstance(parameters, tuple):
parameters = (parameters,)
if not isinstance(parameters[-1], ParamSpec):
raise TypeError("The last parameter to Concatenate should be a "
"ParamSpec variable.")
msg = "Concatenate[arg, ...]: each arg must be a type."
parameters = tuple(typing._type_check(p, msg) for p in parameters)
return _ConcatenateGenericAlias(self, parameters)
# 3.10+
if hasattr(typing, 'Concatenate'):
Concatenate = typing.Concatenate
_ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa: F811
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def Concatenate(self, parameters):
"""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
"""
return _concatenate_getitem(self, parameters)
# 3.8
else:
class _ConcatenateForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
return _concatenate_getitem(self, parameters)
Concatenate = _ConcatenateForm(
'Concatenate',
doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a
higher order function which adds, removes or transforms parameters of a
callable.
For example::
Callable[Concatenate[int, P], int]
See PEP 612 for detailed information.
""")
# 3.10+
if hasattr(typing, 'TypeGuard'):
TypeGuard = typing.TypeGuard
# 3.9
elif sys.version_info[:2] >= (3, 9):
@_ExtensionsSpecialForm
def TypeGuard(self, parameters):
"""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
"""
item = typing._type_check(parameters, f'{self} accepts only a single type.')
return typing._GenericAlias(self, (item,))
# 3.8
else:
class _TypeGuardForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type')
return typing._GenericAlias(self, (item,))
TypeGuard = _TypeGuardForm(
'TypeGuard',
doc="""Special typing form used to annotate the return type of a user-defined
type guard function. ``TypeGuard`` only accepts a single type argument.
At runtime, functions marked this way should return a boolean.
``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static
type checkers to determine a more precise type of an expression within a
program's code flow. Usually type narrowing is done by analyzing
conditional code flow and applying the narrowing to a block of code. The
conditional expression here is sometimes referred to as a "type guard".
Sometimes it would be convenient to use a user-defined boolean function
as a type guard. Such a function should use ``TypeGuard[...]`` as its
return type to alert static type checkers to this intention.
Using ``-> TypeGuard`` tells the static type checker that for a given
function:
1. The return value is a boolean.
2. If the return value is ``True``, the type of its argument
is the type inside ``TypeGuard``.
For example::
def is_str(val: Union[str, float]):
# "isinstance" type guard
if isinstance(val, str):
# Type of ``val`` is narrowed to ``str``
...
else:
# Else, type of ``val`` is narrowed to ``float``.
...
Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower
form of ``TypeA`` (it can even be a wider form) and this may lead to
type-unsafe results. The main reason is to allow for things like
narrowing ``List[object]`` to ``List[str]`` even though the latter is not
a subtype of the former, since ``List`` is invariant. The responsibility of
writing type-safe type guards is left to the user.
``TypeGuard`` also works with type variables. For more information, see
PEP 647 (User-Defined Type Guards).
""")
# Vendored from cpython typing._SpecialFrom
class _SpecialForm(typing._Final, _root=True):
__slots__ = ('_name', '__doc__', '_getitem')
def __init__(self, getitem):
self._getitem = getitem
self._name = getitem.__name__
self.__doc__ = getitem.__doc__
def __getattr__(self, item):
if item in {'__name__', '__qualname__'}:
return self._name
raise AttributeError(item)
def __mro_entries__(self, bases):
raise TypeError(f"Cannot subclass {self!r}")
def __repr__(self):
return f'typing_extensions.{self._name}'
def __reduce__(self):
return self._name
def __call__(self, *args, **kwds):
raise TypeError(f"Cannot instantiate {self!r}")
def __or__(self, other):
return typing.Union[self, other]
def __ror__(self, other):
return typing.Union[other, self]
def __instancecheck__(self, obj):
raise TypeError(f"{self} cannot be used with isinstance()")
def __subclasscheck__(self, cls):
raise TypeError(f"{self} cannot be used with issubclass()")
@typing._tp_cache
def __getitem__(self, parameters):
return self._getitem(self, parameters)
if hasattr(typing, "LiteralString"): # 3.11+
LiteralString = typing.LiteralString
else:
@_SpecialForm
def LiteralString(self, params):
"""Represents an arbitrary literal string.
Example::
from typing_extensions import LiteralString
def query(sql: LiteralString) -> ...:
...
query("SELECT * FROM table") # ok
query(f"SELECT * FROM {input()}") # not ok
See PEP 675 for details.
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Self"): # 3.11+
Self = typing.Self
else:
@_SpecialForm
def Self(self, params):
"""Used to spell the type of "self" in classes.
Example::
from typing import Self
class ReturnsSelf:
def parse(self, data: bytes) -> Self:
...
return self
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, "Never"): # 3.11+
Never = typing.Never
else:
@_SpecialForm
def Never(self, params):
"""The bottom type, a type that has no members.
This can be used to define a function that should never be
called, or a function that never returns::
from typing_extensions import Never
def never_call_me(arg: Never) -> None:
pass
def int_or_str(arg: int | str) -> None:
never_call_me(arg) # type checker error
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
never_call_me(arg) # ok, arg is of type Never
"""
raise TypeError(f"{self} is not subscriptable")
if hasattr(typing, 'Required'): # 3.11+
Required = typing.Required
NotRequired = typing.NotRequired
elif sys.version_info[:2] >= (3, 9): # 3.9-3.10
@_ExtensionsSpecialForm
def Required(self, parameters):
"""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
@_ExtensionsSpecialForm
def NotRequired(self, parameters):
"""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
else: # 3.8
class _RequiredForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
Required = _RequiredForm(
'Required',
doc="""A special typing construct to mark a key of a total=False TypedDict
as required. For example:
class Movie(TypedDict, total=False):
title: Required[str]
year: int
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
There is no runtime checking that a required key is actually provided
when instantiating a related TypedDict.
""")
NotRequired = _RequiredForm(
'NotRequired',
doc="""A special typing construct to mark a key of a TypedDict as
potentially missing. For example:
class Movie(TypedDict):
title: str
year: NotRequired[int]
m = Movie(
title='The Matrix', # typechecker error if key is omitted
year=1999,
)
""")
if hasattr(typing, 'ReadOnly'):
ReadOnly = typing.ReadOnly
elif sys.version_info[:2] >= (3, 9): # 3.9-3.12
@_ExtensionsSpecialForm
def ReadOnly(self, parameters):
"""A special typing construct to mark an item of a TypedDict as read-only.
For example:
class Movie(TypedDict):
title: ReadOnly[str]
year: int
def mutate_movie(m: Movie) -> None:
m["year"] = 1992 # allowed
m["title"] = "The Matrix" # typechecker error
There is no runtime checking for this property.
"""
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
else: # 3.8
class _ReadOnlyForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return typing._GenericAlias(self, (item,))
ReadOnly = _ReadOnlyForm(
'ReadOnly',
doc="""A special typing construct to mark a key of a TypedDict as read-only.
For example:
class Movie(TypedDict):
title: ReadOnly[str]
year: int
def mutate_movie(m: Movie) -> None:
m["year"] = 1992 # allowed
m["title"] = "The Matrix" # typechecker error
There is no runtime checking for this propery.
""")
_UNPACK_DOC = """\
Type unpack operator.
The type unpack operator takes the child types from some container type,
such as `tuple[int, str]` or a `TypeVarTuple`, and 'pulls them out'. For
example:
# For some generic class `Foo`:
Foo[Unpack[tuple[int, str]]] # Equivalent to Foo[int, str]
Ts = TypeVarTuple('Ts')
# Specifies that `Bar` is generic in an arbitrary number of types.
# (Think of `Ts` as a tuple of an arbitrary number of individual
# `TypeVar`s, which the `Unpack` is 'pulling out' directly into the
# `Generic[]`.)
class Bar(Generic[Unpack[Ts]]): ...
Bar[int] # Valid
Bar[int, str] # Also valid
From Python 3.11, this can also be done using the `*` operator:
Foo[*tuple[int, str]]
class Bar(Generic[*Ts]): ...
The operator can also be used along with a `TypedDict` to annotate
`**kwargs` in a function signature. For instance:
class Movie(TypedDict):
name: str
year: int
# This function expects two keyword arguments - *name* of type `str` and
# *year* of type `int`.
def foo(**kwargs: Unpack[Movie]): ...
Note that there is only some runtime checking of this operator. Not
everything the runtime allows may be accepted by static type checkers.
For more information, see PEP 646 and PEP 692.
"""
if sys.version_info >= (3, 12): # PEP 692 changed the repr of Unpack[]
Unpack = typing.Unpack
def _is_unpack(obj):
return get_origin(obj) is Unpack
elif sys.version_info[:2] >= (3, 9): # 3.9+
class _UnpackSpecialForm(_ExtensionsSpecialForm, _root=True):
def __init__(self, getitem):
super().__init__(getitem)
self.__doc__ = _UNPACK_DOC
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
@_UnpackSpecialForm
def Unpack(self, parameters):
item = typing._type_check(parameters, f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
else: # 3.8
class _UnpackAlias(typing._GenericAlias, _root=True):
__class__ = typing.TypeVar
class _UnpackForm(_ExtensionsSpecialForm, _root=True):
def __getitem__(self, parameters):
item = typing._type_check(parameters,
f'{self._name} accepts only a single type.')
return _UnpackAlias(self, (item,))
Unpack = _UnpackForm('Unpack', doc=_UNPACK_DOC)
def _is_unpack(obj):
return isinstance(obj, _UnpackAlias)
if hasattr(typing, "TypeVarTuple"): # 3.11+
# Add default parameter - PEP 696
class TypeVarTuple(metaclass=_TypeVarLikeMeta):
"""Type variable tuple."""
_backported_typevarlike = typing.TypeVarTuple
def __new__(cls, name, *, default=_marker):
tvt = typing.TypeVarTuple(name)
_set_default(tvt, default)
_set_module(tvt)
return tvt
def __init_subclass__(self, *args, **kwds):
raise TypeError("Cannot subclass special typing classes")
else: # <=3.10
class TypeVarTuple(_DefaultMixin):
"""Type variable tuple.
Usage::
Ts = TypeVarTuple('Ts')
In the same way that a normal type variable is a stand-in for a single
type such as ``int``, a type variable *tuple* is a stand-in for a *tuple*
type such as ``Tuple[int, str]``.
Type variable tuples can be used in ``Generic`` declarations.
Consider the following example::
class Array(Generic[*Ts]): ...
The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``,
where ``T1`` and ``T2`` are type variables. To use these type variables
as type parameters of ``Array``, we must *unpack* the type variable tuple using
the star operator: ``*Ts``. The signature of ``Array`` then behaves
as if we had simply written ``class Array(Generic[T1, T2]): ...``.
In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows
us to parameterise the class with an *arbitrary* number of type parameters.
Type variable tuples can be used anywhere a normal ``TypeVar`` can.
This includes class definitions, as shown above, as well as function
signatures and variable annotations::
class Array(Generic[*Ts]):
def __init__(self, shape: Tuple[*Ts]):
self._shape: Tuple[*Ts] = shape
def get_shape(self) -> Tuple[*Ts]:
return self._shape
shape = (Height(480), Width(640))
x: Array[Height, Width] = Array(shape)
y = abs(x) # Inferred type is Array[Height, Width]
z = x + x # ... is Array[Height, Width]
x.get_shape() # ... is tuple[Height, Width]
"""
# Trick Generic __parameters__.
__class__ = typing.TypeVar
def __iter__(self):
yield self.__unpacked__
def __init__(self, name, *, default=_marker):
self.__name__ = name
_DefaultMixin.__init__(self, default)
# for pickling:
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
self.__unpacked__ = Unpack[self]
def __repr__(self):
return self.__name__
def __hash__(self):
return object.__hash__(self)
def __eq__(self, other):
return self is other
def __reduce__(self):
return self.__name__
def __init_subclass__(self, *args, **kwds):
if '_root' not in kwds:
raise TypeError("Cannot subclass special typing classes")
if hasattr(typing, "reveal_type"): # 3.11+
reveal_type = typing.reveal_type
else: # <=3.10
def reveal_type(obj: T, /) -> T:
"""Reveal the inferred type of a variable.
When a static type checker encounters a call to ``reveal_type()``,
it will emit the inferred type of the argument::
x: int = 1
reveal_type(x)
Running a static type checker (e.g., ``mypy``) on this example
will produce output similar to 'Revealed type is "builtins.int"'.
At runtime, the function prints the runtime type of the
argument and returns it unchanged.
"""
print(f"Runtime type is {type(obj).__name__!r}", file=sys.stderr)
return obj
if hasattr(typing, "assert_never"): # 3.11+
assert_never = typing.assert_never
else: # <=3.10
def assert_never(arg: Never, /) -> Never:
"""Assert to the type checker that a line of code is unreachable.
Example::
def int_or_str(arg: int | str) -> None:
match arg:
case int():
print("It's an int")
case str():
print("It's a str")
case _:
assert_never(arg)
If a type checker finds that a call to assert_never() is
reachable, it will emit an error.
At runtime, this throws an exception when called.
"""
raise AssertionError("Expected code to be unreachable")
if sys.version_info >= (3, 12): # 3.12+
# dataclass_transform exists in 3.11 but lacks the frozen_default parameter
dataclass_transform = typing.dataclass_transform
else: # <=3.11
def dataclass_transform(
*,
eq_default: bool = True,
order_default: bool = False,
kw_only_default: bool = False,
frozen_default: bool = False,
field_specifiers: typing.Tuple[
typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]],
...
] = (),
**kwargs: typing.Any,
) -> typing.Callable[[T], T]:
"""Decorator that marks a function, class, or metaclass as providing
dataclass-like behavior.
Example:
from typing_extensions import dataclass_transform
_T = TypeVar("_T")
# Used on a decorator function
@dataclass_transform()
def create_model(cls: type[_T]) -> type[_T]:
...
return cls
@create_model
class CustomerModel:
id: int
name: str
# Used on a base class
@dataclass_transform()
class ModelBase: ...
class CustomerModel(ModelBase):
id: int
name: str
# Used on a metaclass
@dataclass_transform()
class ModelMeta(type): ...
class ModelBase(metaclass=ModelMeta): ...
class CustomerModel(ModelBase):
id: int
name: str
Each of the ``CustomerModel`` classes defined in this example will now
behave similarly to a dataclass created with the ``@dataclasses.dataclass``
decorator. For example, the type checker will synthesize an ``__init__``
method.
The arguments to this decorator can be used to customize this behavior:
- ``eq_default`` indicates whether the ``eq`` parameter is assumed to be
True or False if it is omitted by the caller.
- ``order_default`` indicates whether the ``order`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``kw_only_default`` indicates whether the ``kw_only`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``frozen_default`` indicates whether the ``frozen`` parameter is
assumed to be True or False if it is omitted by the caller.
- ``field_specifiers`` specifies a static list of supported classes
or functions that describe fields, similar to ``dataclasses.field()``.
At runtime, this decorator records its arguments in the
``__dataclass_transform__`` attribute on the decorated object.
See PEP 681 for details.
"""
def decorator(cls_or_fn):
cls_or_fn.__dataclass_transform__ = {
"eq_default": eq_default,
"order_default": order_default,
"kw_only_default": kw_only_default,
"frozen_default": frozen_default,
"field_specifiers": field_specifiers,
"kwargs": kwargs,
}
return cls_or_fn
return decorator
if hasattr(typing, "override"): # 3.12+
override = typing.override
else: # <=3.11
_F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any])
def override(arg: _F, /) -> _F:
"""Indicate that a method is intended to override a method in a base class.
Usage:
class Base:
def method(self) -> None:
pass
class Child(Base):
@override
def method(self) -> None:
super().method()
When this decorator is applied to a method, the type checker will
validate that it overrides a method with the same name on a base class.
This helps prevent bugs that may occur when a base class is changed
without an equivalent change to a child class.
There is no runtime checking of these properties. The decorator
sets the ``__override__`` attribute to ``True`` on the decorated object
to allow runtime introspection.
See PEP 698 for details.
"""
try:
arg.__override__ = True
except (AttributeError, TypeError):
# Skip the attribute silently if it is not writable.
# AttributeError happens if the object has __slots__ or a
# read-only property, TypeError if it's a builtin class.
pass
return arg
if hasattr(warnings, "deprecated"):
deprecated = warnings.deprecated
else:
_T = typing.TypeVar("_T")
class deprecated:
"""Indicate that a class, function or overload is deprecated.
When this decorator is applied to an object, the type checker
will generate a diagnostic on usage of the deprecated object.
Usage:
@deprecated("Use B instead")
class A:
pass
@deprecated("Use g instead")
def f():
pass
@overload
@deprecated("int support is deprecated")
def g(x: int) -> int: ...
@overload
def g(x: str) -> int: ...
The warning specified by *category* will be emitted at runtime
on use of deprecated objects. For functions, that happens on calls;
for classes, on instantiation and on creation of subclasses.
If the *category* is ``None``, no warning is emitted at runtime.
The *stacklevel* determines where the
warning is emitted. If it is ``1`` (the default), the warning
is emitted at the direct caller of the deprecated object; if it
is higher, it is emitted further up the stack.
Static type checker behavior is not affected by the *category*
and *stacklevel* arguments.
The deprecation message passed to the decorator is saved in the
``__deprecated__`` attribute on the decorated object.
If applied to an overload, the decorator
must be after the ``@overload`` decorator for the attribute to
exist on the overload as returned by ``get_overloads()``.
See PEP 702 for details.
"""
def __init__(
self,
message: str,
/,
*,
category: typing.Optional[typing.Type[Warning]] = DeprecationWarning,
stacklevel: int = 1,
) -> None:
if not isinstance(message, str):
raise TypeError(
"Expected an object of type str for 'message', not "
f"{type(message).__name__!r}"
)
self.message = message
self.category = category
self.stacklevel = stacklevel
def __call__(self, arg: _T, /) -> _T:
# Make sure the inner functions created below don't
# retain a reference to self.
msg = self.message
category = self.category
stacklevel = self.stacklevel
if category is None:
arg.__deprecated__ = msg
return arg
elif isinstance(arg, type):
import functools
from types import MethodType
original_new = arg.__new__
@functools.wraps(original_new)
def __new__(cls, *args, **kwargs):
if cls is arg:
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
if original_new is not object.__new__:
return original_new(cls, *args, **kwargs)
# Mirrors a similar check in object.__new__.
elif cls.__init__ is object.__init__ and (args or kwargs):
raise TypeError(f"{cls.__name__}() takes no arguments")
else:
return original_new(cls)
arg.__new__ = staticmethod(__new__)
original_init_subclass = arg.__init_subclass__
# We need slightly different behavior if __init_subclass__
# is a bound method (likely if it was implemented in Python)
if isinstance(original_init_subclass, MethodType):
original_init_subclass = original_init_subclass.__func__
@functools.wraps(original_init_subclass)
def __init_subclass__(*args, **kwargs):
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
return original_init_subclass(*args, **kwargs)
arg.__init_subclass__ = classmethod(__init_subclass__)
# Or otherwise, which likely means it's a builtin such as
# object's implementation of __init_subclass__.
else:
@functools.wraps(original_init_subclass)
def __init_subclass__(*args, **kwargs):
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
return original_init_subclass(*args, **kwargs)
arg.__init_subclass__ = __init_subclass__
arg.__deprecated__ = __new__.__deprecated__ = msg
__init_subclass__.__deprecated__ = msg
return arg
elif callable(arg):
import functools
@functools.wraps(arg)
def wrapper(*args, **kwargs):
warnings.warn(msg, category=category, stacklevel=stacklevel + 1)
return arg(*args, **kwargs)
arg.__deprecated__ = wrapper.__deprecated__ = msg
return wrapper
else:
raise TypeError(
"@deprecated decorator with non-None category must be applied to "
f"a class or callable, not {arg!r}"
)
# We have to do some monkey patching to deal with the dual nature of
# Unpack/TypeVarTuple:
# - We want Unpack to be a kind of TypeVar so it gets accepted in
# Generic[Unpack[Ts]]
# - We want it to *not* be treated as a TypeVar for the purposes of
# counting generic parameters, so that when we subscript a generic,
# the runtime doesn't try to substitute the Unpack with the subscripted type.
if not hasattr(typing, "TypeVarTuple"):
typing._collect_type_vars = _collect_type_vars
typing._check_generic = _check_generic
# Backport typing.NamedTuple as it exists in Python 3.13.
# In 3.11, the ability to define generic `NamedTuple`s was supported.
# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8.
# On 3.12, we added __orig_bases__ to call-based NamedTuples
# On 3.13, we deprecated kwargs-based NamedTuples
if sys.version_info >= (3, 13):
NamedTuple = typing.NamedTuple
else:
def _make_nmtuple(name, types, module, defaults=()):
fields = [n for n, t in types]
annotations = {n: typing._type_check(t, f"field {n} annotation must be a type")
for n, t in types}
nm_tpl = collections.namedtuple(name, fields,
defaults=defaults, module=module)
nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations
# The `_field_types` attribute was removed in 3.9;
# in earlier versions, it is the same as the `__annotations__` attribute
if sys.version_info < (3, 9):
nm_tpl._field_types = annotations
return nm_tpl
_prohibited_namedtuple_fields = typing._prohibited
_special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'})
class _NamedTupleMeta(type):
def __new__(cls, typename, bases, ns):
assert _NamedTuple in bases
for base in bases:
if base is not _NamedTuple and base is not typing.Generic:
raise TypeError(
'can only inherit from a NamedTuple type and Generic')
bases = tuple(tuple if base is _NamedTuple else base for base in bases)
types = ns.get('__annotations__', {})
default_names = []
for field_name in types:
if field_name in ns:
default_names.append(field_name)
elif default_names:
raise TypeError(f"Non-default namedtuple field {field_name} "
f"cannot follow default field"
f"{'s' if len(default_names) > 1 else ''} "
f"{', '.join(default_names)}")
nm_tpl = _make_nmtuple(
typename, types.items(),
defaults=[ns[n] for n in default_names],
module=ns['__module__']
)
nm_tpl.__bases__ = bases
if typing.Generic in bases:
if hasattr(typing, '_generic_class_getitem'): # 3.12+
nm_tpl.__class_getitem__ = classmethod(typing._generic_class_getitem)
else:
class_getitem = typing.Generic.__class_getitem__.__func__
nm_tpl.__class_getitem__ = classmethod(class_getitem)
# update from user namespace without overriding special namedtuple attributes
for key, val in ns.items():
if key in _prohibited_namedtuple_fields:
raise AttributeError("Cannot overwrite NamedTuple attribute " + key)
elif key not in _special_namedtuple_fields:
if key not in nm_tpl._fields:
setattr(nm_tpl, key, ns[key])
try:
set_name = type(val).__set_name__
except AttributeError:
pass
else:
try:
set_name(val, nm_tpl, key)
except BaseException as e:
msg = (
f"Error calling __set_name__ on {type(val).__name__!r} "
f"instance {key!r} in {typename!r}"
)
# BaseException.add_note() existed on py311,
# but the __set_name__ machinery didn't start
# using add_note() until py312.
# Making sure exceptions are raised in the same way
# as in "normal" classes seems most important here.
if sys.version_info >= (3, 12):
e.add_note(msg)
raise
else:
raise RuntimeError(msg) from e
if typing.Generic in bases:
nm_tpl.__init_subclass__()
return nm_tpl
_NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {})
def _namedtuple_mro_entries(bases):
assert NamedTuple in bases
return (_NamedTuple,)
@_ensure_subclassable(_namedtuple_mro_entries)
def NamedTuple(typename, fields=_marker, /, **kwargs):
"""Typed version of namedtuple.
Usage::
class Employee(NamedTuple):
name: str
id: int
This is equivalent to::
Employee = collections.namedtuple('Employee', ['name', 'id'])
The resulting class has an extra __annotations__ attribute, giving a
dict that maps field names to types. (The field names are also in
the _fields attribute, which is part of the namedtuple API.)
An alternative equivalent functional syntax is also accepted::
Employee = NamedTuple('Employee', [('name', str), ('id', int)])
"""
if fields is _marker:
if kwargs:
deprecated_thing = "Creating NamedTuple classes using keyword arguments"
deprecation_msg = (
"{name} is deprecated and will be disallowed in Python {remove}. "
"Use the class-based or functional syntax instead."
)
else:
deprecated_thing = "Failing to pass a value for the 'fields' parameter"
example = f"`{typename} = NamedTuple({typename!r}, [])`"
deprecation_msg = (
"{name} is deprecated and will be disallowed in Python {remove}. "
"To create a NamedTuple class with 0 fields "
"using the functional syntax, "
"pass an empty list, e.g. "
) + example + "."
elif fields is None:
if kwargs:
raise TypeError(
"Cannot pass `None` as the 'fields' parameter "
"and also specify fields using keyword arguments"
)
else:
deprecated_thing = "Passing `None` as the 'fields' parameter"
example = f"`{typename} = NamedTuple({typename!r}, [])`"
deprecation_msg = (
"{name} is deprecated and will be disallowed in Python {remove}. "
"To create a NamedTuple class with 0 fields "
"using the functional syntax, "
"pass an empty list, e.g. "
) + example + "."
elif kwargs:
raise TypeError("Either list of fields or keywords"
" can be provided to NamedTuple, not both")
if fields is _marker or fields is None:
warnings.warn(
deprecation_msg.format(name=deprecated_thing, remove="3.15"),
DeprecationWarning,
stacklevel=2,
)
fields = kwargs.items()
nt = _make_nmtuple(typename, fields, module=_caller())
nt.__orig_bases__ = (NamedTuple,)
return nt
if hasattr(collections.abc, "Buffer"):
Buffer = collections.abc.Buffer
else:
class Buffer(abc.ABC):
"""Base class for classes that implement the buffer protocol.
The buffer protocol allows Python objects to expose a low-level
memory buffer interface. Before Python 3.12, it is not possible
to implement the buffer protocol in pure Python code, or even
to check whether a class implements the buffer protocol. In
Python 3.12 and higher, the ``__buffer__`` method allows access
to the buffer protocol from Python code, and the
``collections.abc.Buffer`` ABC allows checking whether a class
implements the buffer protocol.
To indicate support for the buffer protocol in earlier versions,
inherit from this ABC, either in a stub file or at runtime,
or use ABC registration. This ABC provides no methods, because
there is no Python-accessible methods shared by pre-3.12 buffer
classes. It is useful primarily for static checks.
"""
# As a courtesy, register the most common stdlib buffer classes.
Buffer.register(memoryview)
Buffer.register(bytearray)
Buffer.register(bytes)
# Backport of types.get_original_bases, available on 3.12+ in CPython
if hasattr(_types, "get_original_bases"):
get_original_bases = _types.get_original_bases
else:
def get_original_bases(cls, /):
"""Return the class's "original" bases prior to modification by `__mro_entries__`.
Examples::
from typing import TypeVar, Generic
from typing_extensions import NamedTuple, TypedDict
T = TypeVar("T")
class Foo(Generic[T]): ...
class Bar(Foo[int], float): ...
class Baz(list[str]): ...
Eggs = NamedTuple("Eggs", [("a", int), ("b", str)])
Spam = TypedDict("Spam", {"a": int, "b": str})
assert get_original_bases(Bar) == (Foo[int], float)
assert get_original_bases(Baz) == (list[str],)
assert get_original_bases(Eggs) == (NamedTuple,)
assert get_original_bases(Spam) == (TypedDict,)
assert get_original_bases(int) == (object,)
"""
try:
return cls.__dict__.get("__orig_bases__", cls.__bases__)
except AttributeError:
raise TypeError(
f'Expected an instance of type, not {type(cls).__name__!r}'
) from None
# NewType is a class on Python 3.10+, making it pickleable
# The error message for subclassing instances of NewType was improved on 3.11+
if sys.version_info >= (3, 11):
NewType = typing.NewType
else:
class NewType:
"""NewType creates simple unique types with almost zero
runtime overhead. NewType(name, tp) is considered a subtype of tp
by static type checkers. At runtime, NewType(name, tp) returns
a dummy callable that simply returns its argument. Usage::
UserId = NewType('UserId', int)
def name_by_id(user_id: UserId) -> str:
...
UserId('user') # Fails type check
name_by_id(42) # Fails type check
name_by_id(UserId(42)) # OK
num = UserId(5) + 1 # type: int
"""
def __call__(self, obj, /):
return obj
def __init__(self, name, tp):
self.__qualname__ = name
if '.' in name:
name = name.rpartition('.')[-1]
self.__name__ = name
self.__supertype__ = tp
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
def __mro_entries__(self, bases):
# We defined __mro_entries__ to get a better error message
# if a user attempts to subclass a NewType instance. bpo-46170
supercls_name = self.__name__
class Dummy:
def __init_subclass__(cls):
subcls_name = cls.__name__
raise TypeError(
f"Cannot subclass an instance of NewType. "
f"Perhaps you were looking for: "
f"`{subcls_name} = NewType({subcls_name!r}, {supercls_name})`"
)
return (Dummy,)
def __repr__(self):
return f'{self.__module__}.{self.__qualname__}'
def __reduce__(self):
return self.__qualname__
if sys.version_info >= (3, 10):
# PEP 604 methods
# It doesn't make sense to have these methods on Python <3.10
def __or__(self, other):
return typing.Union[self, other]
def __ror__(self, other):
return typing.Union[other, self]
if hasattr(typing, "TypeAliasType"):
TypeAliasType = typing.TypeAliasType
else:
def _is_unionable(obj):
"""Corresponds to is_unionable() in unionobject.c in CPython."""
return obj is None or isinstance(obj, (
type,
_types.GenericAlias,
_types.UnionType,
TypeAliasType,
))
class TypeAliasType:
"""Create named, parameterized type aliases.
This provides a backport of the new `type` statement in Python 3.12:
type ListOrSet[T] = list[T] | set[T]
is equivalent to:
T = TypeVar("T")
ListOrSet = TypeAliasType("ListOrSet", list[T] | set[T], type_params=(T,))
The name ListOrSet can then be used as an alias for the type it refers to.
The type_params argument should contain all the type parameters used
in the value of the type alias. If the alias is not generic, this
argument is omitted.
Static type checkers should only support type aliases declared using
TypeAliasType that follow these rules:
- The first argument (the name) must be a string literal.
- The TypeAliasType instance must be immediately assigned to a variable
of the same name. (For example, 'X = TypeAliasType("Y", int)' is invalid,
as is 'X, Y = TypeAliasType("X", int), TypeAliasType("Y", int)').
"""
def __init__(self, name: str, value, *, type_params=()):
if not isinstance(name, str):
raise TypeError("TypeAliasType name must be a string")
self.__value__ = value
self.__type_params__ = type_params
parameters = []
for type_param in type_params:
if isinstance(type_param, TypeVarTuple):
parameters.extend(type_param)
else:
parameters.append(type_param)
self.__parameters__ = tuple(parameters)
def_mod = _caller()
if def_mod != 'typing_extensions':
self.__module__ = def_mod
# Setting this attribute closes the TypeAliasType from further modification
self.__name__ = name
def __setattr__(self, name: str, value: object, /) -> None:
if hasattr(self, "__name__"):
self._raise_attribute_error(name)
super().__setattr__(name, value)
def __delattr__(self, name: str, /) -> Never:
self._raise_attribute_error(name)
def _raise_attribute_error(self, name: str) -> Never:
# Match the Python 3.12 error messages exactly
if name == "__name__":
raise AttributeError("readonly attribute")
elif name in {"__value__", "__type_params__", "__parameters__", "__module__"}:
raise AttributeError(
f"attribute '{name}' of 'typing.TypeAliasType' objects "
"is not writable"
)
else:
raise AttributeError(
f"'typing.TypeAliasType' object has no attribute '{name}'"
)
def __repr__(self) -> str:
return self.__name__
def __getitem__(self, parameters):
if not isinstance(parameters, tuple):
parameters = (parameters,)
parameters = [
typing._type_check(
item, f'Subscripting {self.__name__} requires a type.'
)
for item in parameters
]
return typing._GenericAlias(self, tuple(parameters))
def __reduce__(self):
return self.__name__
def __init_subclass__(cls, *args, **kwargs):
raise TypeError(
"type 'typing_extensions.TypeAliasType' is not an acceptable base type"
)
# The presence of this method convinces typing._type_check
# that TypeAliasTypes are types.
def __call__(self):
raise TypeError("Type alias is not callable")
if sys.version_info >= (3, 10):
def __or__(self, right):
# For forward compatibility with 3.12, reject Unions
# that are not accepted by the built-in Union.
if not _is_unionable(right):
return NotImplemented
return typing.Union[self, right]
def __ror__(self, left):
if not _is_unionable(left):
return NotImplemented
return typing.Union[left, self]
if hasattr(typing, "is_protocol"):
is_protocol = typing.is_protocol
get_protocol_members = typing.get_protocol_members
else:
def is_protocol(tp: type, /) -> bool:
"""Return True if the given type is a Protocol.
Example::
>>> from typing_extensions import Protocol, is_protocol
>>> class P(Protocol):
... def a(self) -> str: ...
... b: int
>>> is_protocol(P)
True
>>> is_protocol(int)
False
"""
return (
isinstance(tp, type)
and getattr(tp, '_is_protocol', False)
and tp is not Protocol
and tp is not typing.Protocol
)
def get_protocol_members(tp: type, /) -> typing.FrozenSet[str]:
"""Return the set of members defined in a Protocol.
Example::
>>> from typing_extensions import Protocol, get_protocol_members
>>> class P(Protocol):
... def a(self) -> str: ...
... b: int
>>> get_protocol_members(P)
frozenset({'a', 'b'})
Raise a TypeError for arguments that are not Protocols.
"""
if not is_protocol(tp):
raise TypeError(f'{tp!r} is not a Protocol')
if hasattr(tp, '__protocol_attrs__'):
return frozenset(tp.__protocol_attrs__)
return frozenset(_get_protocol_attrs(tp))
if hasattr(typing, "Doc"):
Doc = typing.Doc
else:
class Doc:
"""Define the documentation of a type annotation using ``Annotated``, to be
used in class attributes, function and method parameters, return values,
and variables.
The value should be a positional-only string literal to allow static tools
like editors and documentation generators to use it.
This complements docstrings.
The string value passed is available in the attribute ``documentation``.
Example::
>>> from typing_extensions import Annotated, Doc
>>> def hi(to: Annotated[str, Doc("Who to say hi to")]) -> None: ...
"""
def __init__(self, documentation: str, /) -> None:
self.documentation = documentation
def __repr__(self) -> str:
return f"Doc({self.documentation!r})"
def __hash__(self) -> int:
return hash(self.documentation)
def __eq__(self, other: object) -> bool:
if not isinstance(other, Doc):
return NotImplemented
return self.documentation == other.documentation
# Aliases for items that have always been in typing.
# Explicitly assign these (rather than using `from typing import *` at the top),
# so that we get a CI error if one of these is deleted from typing.py
# in a future version of Python
AbstractSet = typing.AbstractSet
AnyStr = typing.AnyStr
BinaryIO = typing.BinaryIO
Callable = typing.Callable
Collection = typing.Collection
Container = typing.Container
Dict = typing.Dict
ForwardRef = typing.ForwardRef
FrozenSet = typing.FrozenSet
Generator = typing.Generator
Generic = typing.Generic
Hashable = typing.Hashable
IO = typing.IO
ItemsView = typing.ItemsView
Iterable = typing.Iterable
Iterator = typing.Iterator
KeysView = typing.KeysView
List = typing.List
Mapping = typing.Mapping
MappingView = typing.MappingView
Match = typing.Match
MutableMapping = typing.MutableMapping
MutableSequence = typing.MutableSequence
MutableSet = typing.MutableSet
Optional = typing.Optional
Pattern = typing.Pattern
Reversible = typing.Reversible
Sequence = typing.Sequence
Set = typing.Set
Sized = typing.Sized
TextIO = typing.TextIO
Tuple = typing.Tuple
Union = typing.Union
ValuesView = typing.ValuesView
cast = typing.cast
no_type_check = typing.no_type_check
no_type_check_decorator = typing.no_type_check_decorator
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11 | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/__editable__.tap_okta-0.0.0.pth | /Users/nchebolu/work/raptor/taps/tap-okta/src
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/wheel-0.42.0.dist-info/RECORD | wheel/__init__.py,sha256=c5n4mea4NyUhMCk8GWbX4_O739E5ATPX23lTJRXf9ZI,59
wheel/__main__.py,sha256=NkMUnuTCGcOkgY0IBLgBCVC_BGGcWORx2K8jYGS12UE,455
wheel/_setuptools_logging.py,sha256=NoCnjJ4DFEZ45Eo-2BdXLsWJCwGkait1tp_17paleVw,746
wheel/bdist_wheel.py,sha256=Hrol9LUphvfapYo6Ro4RHhypq8iLew6jpp8NXd_CFw4,20943
wheel/macosx_libfile.py,sha256=mKH4GW3FILt0jLgm5LPgj7D5XyEvBU2Fgc-jCxMfSng,16143
wheel/metadata.py,sha256=jGDlp6IMblnujK4u1eni8VAdn2WYycSdQ-P6jaGBUMw,5882
wheel/util.py,sha256=e0jpnsbbM9QhaaMSyap-_ZgUxcxwpyLDk6RHcrduPLg,621
wheel/wheelfile.py,sha256=A5QzHd3cpDBqDEr8O6R6jqwLKiqkLlde6VjfgdQXo5Q,7701
wheel/cli/__init__.py,sha256=eBNhnPwWTtdKAJHy77lvz7gOQ5Eu3GavGugXxhSsn-U,4264
wheel/cli/convert.py,sha256=qJcpYGKqdfw1P6BelgN1Hn_suNgM6bvyEWFlZeuSWx0,9439
wheel/cli/pack.py,sha256=H6BZ8HyIYqP_2quRiczjHN08dykmdWTSLN0VMTYkzh8,3110
wheel/cli/tags.py,sha256=lHw-LaWrkS5Jy_qWcw-6pSjeNM6yAjDnqKI3E5JTTCU,4760
wheel/cli/unpack.py,sha256=Y_J7ynxPSoFFTT7H0fMgbBlVErwyDGcObgme5MBuz58,1021
wheel/vendored/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
wheel/vendored/vendor.txt,sha256=nMQ1MrIbjx7YcPQqZbwUPHLy08Q1lMPPL90HWSrazw0,16
wheel/vendored/packaging/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
wheel/vendored/packaging/_elffile.py,sha256=hbmK8OD6Z7fY6hwinHEUcD1by7czkGiNYu7ShnFEk2k,3266
wheel/vendored/packaging/_manylinux.py,sha256=Rq6ppXAxH8XFtNf6tC-B-1SKuvCODPBvcCoSulMtbtk,9526
wheel/vendored/packaging/_musllinux.py,sha256=kgmBGLFybpy8609-KTvzmt2zChCPWYvhp5BWP4JX7dE,2676
wheel/vendored/packaging/_parser.py,sha256=5DhK_zYJE4U4yzSkgEBT4F7tT2xZ6Pkx4gSRKyvXneQ,10382
wheel/vendored/packaging/_structures.py,sha256=q3eVNmbWJGG_S0Dit_S3Ao8qQqz_5PYTXFAKBZe5yr4,1431
wheel/vendored/packaging/_tokenizer.py,sha256=alCtbwXhOFAmFGZ6BQ-wCTSFoRAJ2z-ysIf7__MTJ_k,5292
wheel/vendored/packaging/markers.py,sha256=eH-txS2zq1HdNpTd9LcZUcVIwewAiNU0grmq5wjKnOk,8208
wheel/vendored/packaging/requirements.py,sha256=wswG4mXHSgE9w4NjNnlSvgLGo6yYvfHVEFnWhuEmXxg,2952
wheel/vendored/packaging/specifiers.py,sha256=ZOpqL_w_Kj6ZF_OWdliQUzhEyHlDbi6989kr-sF5GHs,39206
wheel/vendored/packaging/tags.py,sha256=pkG6gQ28RlhS09VzymVhVpGrWF5doHXfK1VxG9cdhoY,18355
wheel/vendored/packaging/utils.py,sha256=XgdmP3yx9-wQEFjO7OvMj9RjEf5JlR5HFFR69v7SQ9E,5268
wheel/vendored/packaging/version.py,sha256=XjRBLNK17UMDgLeP8UHnqwiY3TdSi03xFQURtec211A,16236
wheel-0.42.0.dist-info/entry_points.txt,sha256=rTY1BbkPHhkGMm4Q3F0pIzJBzW2kMxoG1oriffvGdA0,104
wheel-0.42.0.dist-info/LICENSE.txt,sha256=MMI2GGeRCPPo6h0qZYx8pBe9_IkcmO8aifpP8MmChlQ,1107
wheel-0.42.0.dist-info/WHEEL,sha256=EZbGkh7Ie4PoZfRQ8I0ZuP9VklN_TvcZ6DSE5Uar4z4,81
wheel-0.42.0.dist-info/METADATA,sha256=QMZYvPF88F2lBnZ9cf7-ugqmkGDUN8j3FUvNHikLhck,2203
wheel-0.42.0.dist-info/RECORD,,
wheel-0.42.0.virtualenv,,
wheel/vendored/__pycache__,,
wheel/vendored/packaging/specifiers.cpython-311.pyc,,
wheel/vendored/packaging/_elffile.cpython-311.pyc,,
../../../bin/wheel3,,
wheel/cli/__pycache__,,
wheel/vendored/packaging/__init__.cpython-311.pyc,,
wheel/vendored/packaging/_manylinux.cpython-311.pyc,,
wheel/cli/unpack.cpython-311.pyc,,
../../../bin/wheel,,
wheel-0.42.0.dist-info/INSTALLER,,
wheel-0.42.0.dist-info/__pycache__,,
wheel/vendored/packaging/version.cpython-311.pyc,,
wheel/vendored/packaging/utils.cpython-311.pyc,,
wheel/metadata.cpython-311.pyc,,
wheel/vendored/packaging/_musllinux.cpython-311.pyc,,
wheel/cli/pack.cpython-311.pyc,,
wheel/_setuptools_logging.cpython-311.pyc,,
wheel/vendored/packaging/tags.cpython-311.pyc,,
wheel/macosx_libfile.cpython-311.pyc,,
wheel/vendored/packaging/markers.cpython-311.pyc,,
wheel/cli/__init__.cpython-311.pyc,,
wheel/vendored/__init__.cpython-311.pyc,,
wheel/__pycache__,,
wheel/wheelfile.cpython-311.pyc,,
wheel/__main__.cpython-311.pyc,,
../../../bin/wheel3.11,,
wheel/vendored/packaging/_parser.cpython-311.pyc,,
wheel/bdist_wheel.cpython-311.pyc,,
wheel/vendored/packaging/__pycache__,,
wheel/vendored/packaging/requirements.cpython-311.pyc,,
../../../bin/wheel-3.11,,
wheel/vendored/packaging/_tokenizer.cpython-311.pyc,,
wheel/cli/tags.cpython-311.pyc,,
wheel/cli/convert.cpython-311.pyc,,
wheel/util.cpython-311.pyc,,
wheel/vendored/packaging/_structures.cpython-311.pyc,,
wheel/__init__.cpython-311.pyc,, | 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/wheel-0.42.0.dist-info/WHEEL | Wheel-Version: 1.0
Generator: flit 3.9.0
Root-Is-Purelib: true
Tag: py3-none-any
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/wheel-0.42.0.dist-info/entry_points.txt | [console_scripts]
wheel=wheel.cli:main
[distutils.commands]
bdist_wheel=wheel.bdist_wheel:bdist_wheel
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/wheel-0.42.0.dist-info/LICENSE.txt | MIT License
Copyright (c) 2012 Daniel Holth <dholth@fastmail.fm> and contributors
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/wheel-0.42.0.dist-info/INSTALLER | pip
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/wheel-0.42.0.dist-info/METADATA | Metadata-Version: 2.1
Name: wheel
Version: 0.42.0
Summary: A built-package format for Python
Keywords: wheel,packaging
Author-email: Daniel Holth <dholth@fastmail.fm>
Maintainer-email: Alex Grönholm <alex.gronholm@nextday.fi>
Requires-Python: >=3.7
Description-Content-Type: text/x-rst
Classifier: Development Status :: 5 - Production/Stable
Classifier: Intended Audience :: Developers
Classifier: Topic :: System :: Archiving :: Packaging
Classifier: License :: OSI Approved :: MIT License
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3 :: Only
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3.12
Requires-Dist: pytest >= 6.0.0 ; extra == "test"
Requires-Dist: setuptools >= 65 ; extra == "test"
Project-URL: Changelog, https://wheel.readthedocs.io/en/stable/news.html
Project-URL: Documentation, https://wheel.readthedocs.io/
Project-URL: Issue Tracker, https://github.com/pypa/wheel/issues
Project-URL: Source, https://github.com/pypa/wheel
Provides-Extra: test
wheel
=====
This library is the reference implementation of the Python wheel packaging
standard, as defined in `PEP 427`_.
It has two different roles:
#. A setuptools_ extension for building wheels that provides the
``bdist_wheel`` setuptools command
#. A command line tool for working with wheel files
It should be noted that wheel is **not** intended to be used as a library, and
as such there is no stable, public API.
.. _PEP 427: https://www.python.org/dev/peps/pep-0427/
.. _setuptools: https://pypi.org/project/setuptools/
Documentation
-------------
The documentation_ can be found on Read The Docs.
.. _documentation: https://wheel.readthedocs.io/
Code of Conduct
---------------
Everyone interacting in the wheel project's codebases, issue trackers, chat
rooms, and mailing lists is expected to follow the `PSF Code of Conduct`_.
.. _PSF Code of Conduct: https://github.com/pypa/.github/blob/main/CODE_OF_CONDUCT.md
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/jwt/algorithms.py | from __future__ import annotations
import hashlib
import hmac
import json
import sys
from abc import ABC, abstractmethod
from typing import TYPE_CHECKING, Any, ClassVar, NoReturn, Union, cast, overload
from .exceptions import InvalidKeyError
from .types import HashlibHash, JWKDict
from .utils import (
base64url_decode,
base64url_encode,
der_to_raw_signature,
force_bytes,
from_base64url_uint,
is_pem_format,
is_ssh_key,
raw_to_der_signature,
to_base64url_uint,
)
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
try:
from cryptography.exceptions import InvalidSignature
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
from cryptography.hazmat.primitives.asymmetric.ec import (
ECDSA,
SECP256K1,
SECP256R1,
SECP384R1,
SECP521R1,
EllipticCurve,
EllipticCurvePrivateKey,
EllipticCurvePrivateNumbers,
EllipticCurvePublicKey,
EllipticCurvePublicNumbers,
)
from cryptography.hazmat.primitives.asymmetric.ed448 import (
Ed448PrivateKey,
Ed448PublicKey,
)
from cryptography.hazmat.primitives.asymmetric.ed25519 import (
Ed25519PrivateKey,
Ed25519PublicKey,
)
from cryptography.hazmat.primitives.asymmetric.rsa import (
RSAPrivateKey,
RSAPrivateNumbers,
RSAPublicKey,
RSAPublicNumbers,
rsa_crt_dmp1,
rsa_crt_dmq1,
rsa_crt_iqmp,
rsa_recover_prime_factors,
)
from cryptography.hazmat.primitives.serialization import (
Encoding,
NoEncryption,
PrivateFormat,
PublicFormat,
load_pem_private_key,
load_pem_public_key,
load_ssh_public_key,
)
has_crypto = True
except ModuleNotFoundError:
has_crypto = False
if TYPE_CHECKING:
# Type aliases for convenience in algorithms method signatures
AllowedRSAKeys = RSAPrivateKey | RSAPublicKey
AllowedECKeys = EllipticCurvePrivateKey | EllipticCurvePublicKey
AllowedOKPKeys = (
Ed25519PrivateKey | Ed25519PublicKey | Ed448PrivateKey | Ed448PublicKey
)
AllowedKeys = AllowedRSAKeys | AllowedECKeys | AllowedOKPKeys
AllowedPrivateKeys = (
RSAPrivateKey | EllipticCurvePrivateKey | Ed25519PrivateKey | Ed448PrivateKey
)
AllowedPublicKeys = (
RSAPublicKey | EllipticCurvePublicKey | Ed25519PublicKey | Ed448PublicKey
)
requires_cryptography = {
"RS256",
"RS384",
"RS512",
"ES256",
"ES256K",
"ES384",
"ES521",
"ES512",
"PS256",
"PS384",
"PS512",
"EdDSA",
}
def get_default_algorithms() -> dict[str, Algorithm]:
"""
Returns the algorithms that are implemented by the library.
"""
default_algorithms = {
"none": NoneAlgorithm(),
"HS256": HMACAlgorithm(HMACAlgorithm.SHA256),
"HS384": HMACAlgorithm(HMACAlgorithm.SHA384),
"HS512": HMACAlgorithm(HMACAlgorithm.SHA512),
}
if has_crypto:
default_algorithms.update(
{
"RS256": RSAAlgorithm(RSAAlgorithm.SHA256),
"RS384": RSAAlgorithm(RSAAlgorithm.SHA384),
"RS512": RSAAlgorithm(RSAAlgorithm.SHA512),
"ES256": ECAlgorithm(ECAlgorithm.SHA256),
"ES256K": ECAlgorithm(ECAlgorithm.SHA256),
"ES384": ECAlgorithm(ECAlgorithm.SHA384),
"ES521": ECAlgorithm(ECAlgorithm.SHA512),
"ES512": ECAlgorithm(
ECAlgorithm.SHA512
), # Backward compat for #219 fix
"PS256": RSAPSSAlgorithm(RSAPSSAlgorithm.SHA256),
"PS384": RSAPSSAlgorithm(RSAPSSAlgorithm.SHA384),
"PS512": RSAPSSAlgorithm(RSAPSSAlgorithm.SHA512),
"EdDSA": OKPAlgorithm(),
}
)
return default_algorithms
class Algorithm(ABC):
"""
The interface for an algorithm used to sign and verify tokens.
"""
def compute_hash_digest(self, bytestr: bytes) -> bytes:
"""
Compute a hash digest using the specified algorithm's hash algorithm.
If there is no hash algorithm, raises a NotImplementedError.
"""
# lookup self.hash_alg if defined in a way that mypy can understand
hash_alg = getattr(self, "hash_alg", None)
if hash_alg is None:
raise NotImplementedError
if (
has_crypto
and isinstance(hash_alg, type)
and issubclass(hash_alg, hashes.HashAlgorithm)
):
digest = hashes.Hash(hash_alg(), backend=default_backend())
digest.update(bytestr)
return bytes(digest.finalize())
else:
return bytes(hash_alg(bytestr).digest())
@abstractmethod
def prepare_key(self, key: Any) -> Any:
"""
Performs necessary validation and conversions on the key and returns
the key value in the proper format for sign() and verify().
"""
@abstractmethod
def sign(self, msg: bytes, key: Any) -> bytes:
"""
Returns a digital signature for the specified message
using the specified key value.
"""
@abstractmethod
def verify(self, msg: bytes, key: Any, sig: bytes) -> bool:
"""
Verifies that the specified digital signature is valid
for the specified message and key values.
"""
@overload
@staticmethod
@abstractmethod
def to_jwk(key_obj, as_dict: Literal[True]) -> JWKDict:
... # pragma: no cover
@overload
@staticmethod
@abstractmethod
def to_jwk(key_obj, as_dict: Literal[False] = False) -> str:
... # pragma: no cover
@staticmethod
@abstractmethod
def to_jwk(key_obj, as_dict: bool = False) -> Union[JWKDict, str]:
"""
Serializes a given key into a JWK
"""
@staticmethod
@abstractmethod
def from_jwk(jwk: str | JWKDict) -> Any:
"""
Deserializes a given key from JWK back into a key object
"""
class NoneAlgorithm(Algorithm):
"""
Placeholder for use when no signing or verification
operations are required.
"""
def prepare_key(self, key: str | None) -> None:
if key == "":
key = None
if key is not None:
raise InvalidKeyError('When alg = "none", key value must be None.')
return key
def sign(self, msg: bytes, key: None) -> bytes:
return b""
def verify(self, msg: bytes, key: None, sig: bytes) -> bool:
return False
@staticmethod
def to_jwk(key_obj: Any, as_dict: bool = False) -> NoReturn:
raise NotImplementedError()
@staticmethod
def from_jwk(jwk: str | JWKDict) -> NoReturn:
raise NotImplementedError()
class HMACAlgorithm(Algorithm):
"""
Performs signing and verification operations using HMAC
and the specified hash function.
"""
SHA256: ClassVar[HashlibHash] = hashlib.sha256
SHA384: ClassVar[HashlibHash] = hashlib.sha384
SHA512: ClassVar[HashlibHash] = hashlib.sha512
def __init__(self, hash_alg: HashlibHash) -> None:
self.hash_alg = hash_alg
def prepare_key(self, key: str | bytes) -> bytes:
key_bytes = force_bytes(key)
if is_pem_format(key_bytes) or is_ssh_key(key_bytes):
raise InvalidKeyError(
"The specified key is an asymmetric key or x509 certificate and"
" should not be used as an HMAC secret."
)
return key_bytes
@overload
@staticmethod
def to_jwk(key_obj: str | bytes, as_dict: Literal[True]) -> JWKDict:
... # pragma: no cover
@overload
@staticmethod
def to_jwk(key_obj: str | bytes, as_dict: Literal[False] = False) -> str:
... # pragma: no cover
@staticmethod
def to_jwk(key_obj: str | bytes, as_dict: bool = False) -> Union[JWKDict, str]:
jwk = {
"k": base64url_encode(force_bytes(key_obj)).decode(),
"kty": "oct",
}
if as_dict:
return jwk
else:
return json.dumps(jwk)
@staticmethod
def from_jwk(jwk: str | JWKDict) -> bytes:
try:
if isinstance(jwk, str):
obj: JWKDict = json.loads(jwk)
elif isinstance(jwk, dict):
obj = jwk
else:
raise ValueError
except ValueError:
raise InvalidKeyError("Key is not valid JSON")
if obj.get("kty") != "oct":
raise InvalidKeyError("Not an HMAC key")
return base64url_decode(obj["k"])
def sign(self, msg: bytes, key: bytes) -> bytes:
return hmac.new(key, msg, self.hash_alg).digest()
def verify(self, msg: bytes, key: bytes, sig: bytes) -> bool:
return hmac.compare_digest(sig, self.sign(msg, key))
if has_crypto:
class RSAAlgorithm(Algorithm):
"""
Performs signing and verification operations using
RSASSA-PKCS-v1_5 and the specified hash function.
"""
SHA256: ClassVar[type[hashes.HashAlgorithm]] = hashes.SHA256
SHA384: ClassVar[type[hashes.HashAlgorithm]] = hashes.SHA384
SHA512: ClassVar[type[hashes.HashAlgorithm]] = hashes.SHA512
def __init__(self, hash_alg: type[hashes.HashAlgorithm]) -> None:
self.hash_alg = hash_alg
def prepare_key(self, key: AllowedRSAKeys | str | bytes) -> AllowedRSAKeys:
if isinstance(key, (RSAPrivateKey, RSAPublicKey)):
return key
if not isinstance(key, (bytes, str)):
raise TypeError("Expecting a PEM-formatted key.")
key_bytes = force_bytes(key)
try:
if key_bytes.startswith(b"ssh-rsa"):
return cast(RSAPublicKey, load_ssh_public_key(key_bytes))
else:
return cast(
RSAPrivateKey, load_pem_private_key(key_bytes, password=None)
)
except ValueError:
return cast(RSAPublicKey, load_pem_public_key(key_bytes))
@overload
@staticmethod
def to_jwk(key_obj: AllowedRSAKeys, as_dict: Literal[True]) -> JWKDict:
... # pragma: no cover
@overload
@staticmethod
def to_jwk(key_obj: AllowedRSAKeys, as_dict: Literal[False] = False) -> str:
... # pragma: no cover
@staticmethod
def to_jwk(
key_obj: AllowedRSAKeys, as_dict: bool = False
) -> Union[JWKDict, str]:
obj: dict[str, Any] | None = None
if hasattr(key_obj, "private_numbers"):
# Private key
numbers = key_obj.private_numbers()
obj = {
"kty": "RSA",
"key_ops": ["sign"],
"n": to_base64url_uint(numbers.public_numbers.n).decode(),
"e": to_base64url_uint(numbers.public_numbers.e).decode(),
"d": to_base64url_uint(numbers.d).decode(),
"p": to_base64url_uint(numbers.p).decode(),
"q": to_base64url_uint(numbers.q).decode(),
"dp": to_base64url_uint(numbers.dmp1).decode(),
"dq": to_base64url_uint(numbers.dmq1).decode(),
"qi": to_base64url_uint(numbers.iqmp).decode(),
}
elif hasattr(key_obj, "verify"):
# Public key
numbers = key_obj.public_numbers()
obj = {
"kty": "RSA",
"key_ops": ["verify"],
"n": to_base64url_uint(numbers.n).decode(),
"e": to_base64url_uint(numbers.e).decode(),
}
else:
raise InvalidKeyError("Not a public or private key")
if as_dict:
return obj
else:
return json.dumps(obj)
@staticmethod
def from_jwk(jwk: str | JWKDict) -> AllowedRSAKeys:
try:
if isinstance(jwk, str):
obj = json.loads(jwk)
elif isinstance(jwk, dict):
obj = jwk
else:
raise ValueError
except ValueError:
raise InvalidKeyError("Key is not valid JSON")
if obj.get("kty") != "RSA":
raise InvalidKeyError("Not an RSA key")
if "d" in obj and "e" in obj and "n" in obj:
# Private key
if "oth" in obj:
raise InvalidKeyError(
"Unsupported RSA private key: > 2 primes not supported"
)
other_props = ["p", "q", "dp", "dq", "qi"]
props_found = [prop in obj for prop in other_props]
any_props_found = any(props_found)
if any_props_found and not all(props_found):
raise InvalidKeyError(
"RSA key must include all parameters if any are present besides d"
)
public_numbers = RSAPublicNumbers(
from_base64url_uint(obj["e"]),
from_base64url_uint(obj["n"]),
)
if any_props_found:
numbers = RSAPrivateNumbers(
d=from_base64url_uint(obj["d"]),
p=from_base64url_uint(obj["p"]),
q=from_base64url_uint(obj["q"]),
dmp1=from_base64url_uint(obj["dp"]),
dmq1=from_base64url_uint(obj["dq"]),
iqmp=from_base64url_uint(obj["qi"]),
public_numbers=public_numbers,
)
else:
d = from_base64url_uint(obj["d"])
p, q = rsa_recover_prime_factors(
public_numbers.n, d, public_numbers.e
)
numbers = RSAPrivateNumbers(
d=d,
p=p,
q=q,
dmp1=rsa_crt_dmp1(d, p),
dmq1=rsa_crt_dmq1(d, q),
iqmp=rsa_crt_iqmp(p, q),
public_numbers=public_numbers,
)
return numbers.private_key()
elif "n" in obj and "e" in obj:
# Public key
return RSAPublicNumbers(
from_base64url_uint(obj["e"]),
from_base64url_uint(obj["n"]),
).public_key()
else:
raise InvalidKeyError("Not a public or private key")
def sign(self, msg: bytes, key: RSAPrivateKey) -> bytes:
return key.sign(msg, padding.PKCS1v15(), self.hash_alg())
def verify(self, msg: bytes, key: RSAPublicKey, sig: bytes) -> bool:
try:
key.verify(sig, msg, padding.PKCS1v15(), self.hash_alg())
return True
except InvalidSignature:
return False
class ECAlgorithm(Algorithm):
"""
Performs signing and verification operations using
ECDSA and the specified hash function
"""
SHA256: ClassVar[type[hashes.HashAlgorithm]] = hashes.SHA256
SHA384: ClassVar[type[hashes.HashAlgorithm]] = hashes.SHA384
SHA512: ClassVar[type[hashes.HashAlgorithm]] = hashes.SHA512
def __init__(self, hash_alg: type[hashes.HashAlgorithm]) -> None:
self.hash_alg = hash_alg
def prepare_key(self, key: AllowedECKeys | str | bytes) -> AllowedECKeys:
if isinstance(key, (EllipticCurvePrivateKey, EllipticCurvePublicKey)):
return key
if not isinstance(key, (bytes, str)):
raise TypeError("Expecting a PEM-formatted key.")
key_bytes = force_bytes(key)
# Attempt to load key. We don't know if it's
# a Signing Key or a Verifying Key, so we try
# the Verifying Key first.
try:
if key_bytes.startswith(b"ecdsa-sha2-"):
crypto_key = load_ssh_public_key(key_bytes)
else:
crypto_key = load_pem_public_key(key_bytes) # type: ignore[assignment]
except ValueError:
crypto_key = load_pem_private_key(key_bytes, password=None) # type: ignore[assignment]
# Explicit check the key to prevent confusing errors from cryptography
if not isinstance(
crypto_key, (EllipticCurvePrivateKey, EllipticCurvePublicKey)
):
raise InvalidKeyError(
"Expecting a EllipticCurvePrivateKey/EllipticCurvePublicKey. Wrong key provided for ECDSA algorithms"
)
return crypto_key
def sign(self, msg: bytes, key: EllipticCurvePrivateKey) -> bytes:
der_sig = key.sign(msg, ECDSA(self.hash_alg()))
return der_to_raw_signature(der_sig, key.curve)
def verify(self, msg: bytes, key: "AllowedECKeys", sig: bytes) -> bool:
try:
der_sig = raw_to_der_signature(sig, key.curve)
except ValueError:
return False
try:
public_key = (
key.public_key()
if isinstance(key, EllipticCurvePrivateKey)
else key
)
public_key.verify(der_sig, msg, ECDSA(self.hash_alg()))
return True
except InvalidSignature:
return False
@overload
@staticmethod
def to_jwk(key_obj: AllowedECKeys, as_dict: Literal[True]) -> JWKDict:
... # pragma: no cover
@overload
@staticmethod
def to_jwk(key_obj: AllowedECKeys, as_dict: Literal[False] = False) -> str:
... # pragma: no cover
@staticmethod
def to_jwk(
key_obj: AllowedECKeys, as_dict: bool = False
) -> Union[JWKDict, str]:
if isinstance(key_obj, EllipticCurvePrivateKey):
public_numbers = key_obj.public_key().public_numbers()
elif isinstance(key_obj, EllipticCurvePublicKey):
public_numbers = key_obj.public_numbers()
else:
raise InvalidKeyError("Not a public or private key")
if isinstance(key_obj.curve, SECP256R1):
crv = "P-256"
elif isinstance(key_obj.curve, SECP384R1):
crv = "P-384"
elif isinstance(key_obj.curve, SECP521R1):
crv = "P-521"
elif isinstance(key_obj.curve, SECP256K1):
crv = "secp256k1"
else:
raise InvalidKeyError(f"Invalid curve: {key_obj.curve}")
obj: dict[str, Any] = {
"kty": "EC",
"crv": crv,
"x": to_base64url_uint(public_numbers.x).decode(),
"y": to_base64url_uint(public_numbers.y).decode(),
}
if isinstance(key_obj, EllipticCurvePrivateKey):
obj["d"] = to_base64url_uint(
key_obj.private_numbers().private_value
).decode()
if as_dict:
return obj
else:
return json.dumps(obj)
@staticmethod
def from_jwk(jwk: str | JWKDict) -> AllowedECKeys:
try:
if isinstance(jwk, str):
obj = json.loads(jwk)
elif isinstance(jwk, dict):
obj = jwk
else:
raise ValueError
except ValueError:
raise InvalidKeyError("Key is not valid JSON")
if obj.get("kty") != "EC":
raise InvalidKeyError("Not an Elliptic curve key")
if "x" not in obj or "y" not in obj:
raise InvalidKeyError("Not an Elliptic curve key")
x = base64url_decode(obj.get("x"))
y = base64url_decode(obj.get("y"))
curve = obj.get("crv")
curve_obj: EllipticCurve
if curve == "P-256":
if len(x) == len(y) == 32:
curve_obj = SECP256R1()
else:
raise InvalidKeyError("Coords should be 32 bytes for curve P-256")
elif curve == "P-384":
if len(x) == len(y) == 48:
curve_obj = SECP384R1()
else:
raise InvalidKeyError("Coords should be 48 bytes for curve P-384")
elif curve == "P-521":
if len(x) == len(y) == 66:
curve_obj = SECP521R1()
else:
raise InvalidKeyError("Coords should be 66 bytes for curve P-521")
elif curve == "secp256k1":
if len(x) == len(y) == 32:
curve_obj = SECP256K1()
else:
raise InvalidKeyError(
"Coords should be 32 bytes for curve secp256k1"
)
else:
raise InvalidKeyError(f"Invalid curve: {curve}")
public_numbers = EllipticCurvePublicNumbers(
x=int.from_bytes(x, byteorder="big"),
y=int.from_bytes(y, byteorder="big"),
curve=curve_obj,
)
if "d" not in obj:
return public_numbers.public_key()
d = base64url_decode(obj.get("d"))
if len(d) != len(x):
raise InvalidKeyError(
"D should be {} bytes for curve {}", len(x), curve
)
return EllipticCurvePrivateNumbers(
int.from_bytes(d, byteorder="big"), public_numbers
).private_key()
class RSAPSSAlgorithm(RSAAlgorithm):
"""
Performs a signature using RSASSA-PSS with MGF1
"""
def sign(self, msg: bytes, key: RSAPrivateKey) -> bytes:
return key.sign(
msg,
padding.PSS(
mgf=padding.MGF1(self.hash_alg()),
salt_length=self.hash_alg().digest_size,
),
self.hash_alg(),
)
def verify(self, msg: bytes, key: RSAPublicKey, sig: bytes) -> bool:
try:
key.verify(
sig,
msg,
padding.PSS(
mgf=padding.MGF1(self.hash_alg()),
salt_length=self.hash_alg().digest_size,
),
self.hash_alg(),
)
return True
except InvalidSignature:
return False
class OKPAlgorithm(Algorithm):
"""
Performs signing and verification operations using EdDSA
This class requires ``cryptography>=2.6`` to be installed.
"""
def __init__(self, **kwargs: Any) -> None:
pass
def prepare_key(self, key: AllowedOKPKeys | str | bytes) -> AllowedOKPKeys:
if isinstance(key, (bytes, str)):
key_str = key.decode("utf-8") if isinstance(key, bytes) else key
key_bytes = key.encode("utf-8") if isinstance(key, str) else key
if "-----BEGIN PUBLIC" in key_str:
key = load_pem_public_key(key_bytes) # type: ignore[assignment]
elif "-----BEGIN PRIVATE" in key_str:
key = load_pem_private_key(key_bytes, password=None) # type: ignore[assignment]
elif key_str[0:4] == "ssh-":
key = load_ssh_public_key(key_bytes) # type: ignore[assignment]
# Explicit check the key to prevent confusing errors from cryptography
if not isinstance(
key,
(Ed25519PrivateKey, Ed25519PublicKey, Ed448PrivateKey, Ed448PublicKey),
):
raise InvalidKeyError(
"Expecting a EllipticCurvePrivateKey/EllipticCurvePublicKey. Wrong key provided for EdDSA algorithms"
)
return key
def sign(
self, msg: str | bytes, key: Ed25519PrivateKey | Ed448PrivateKey
) -> bytes:
"""
Sign a message ``msg`` using the EdDSA private key ``key``
:param str|bytes msg: Message to sign
:param Ed25519PrivateKey}Ed448PrivateKey key: A :class:`.Ed25519PrivateKey`
or :class:`.Ed448PrivateKey` isinstance
:return bytes signature: The signature, as bytes
"""
msg_bytes = msg.encode("utf-8") if isinstance(msg, str) else msg
return key.sign(msg_bytes)
def verify(
self, msg: str | bytes, key: AllowedOKPKeys, sig: str | bytes
) -> bool:
"""
Verify a given ``msg`` against a signature ``sig`` using the EdDSA key ``key``
:param str|bytes sig: EdDSA signature to check ``msg`` against
:param str|bytes msg: Message to sign
:param Ed25519PrivateKey|Ed25519PublicKey|Ed448PrivateKey|Ed448PublicKey key:
A private or public EdDSA key instance
:return bool verified: True if signature is valid, False if not.
"""
try:
msg_bytes = msg.encode("utf-8") if isinstance(msg, str) else msg
sig_bytes = sig.encode("utf-8") if isinstance(sig, str) else sig
public_key = (
key.public_key()
if isinstance(key, (Ed25519PrivateKey, Ed448PrivateKey))
else key
)
public_key.verify(sig_bytes, msg_bytes)
return True # If no exception was raised, the signature is valid.
except InvalidSignature:
return False
@overload
@staticmethod
def to_jwk(key: AllowedOKPKeys, as_dict: Literal[True]) -> JWKDict:
... # pragma: no cover
@overload
@staticmethod
def to_jwk(key: AllowedOKPKeys, as_dict: Literal[False] = False) -> str:
... # pragma: no cover
@staticmethod
def to_jwk(key: AllowedOKPKeys, as_dict: bool = False) -> Union[JWKDict, str]:
if isinstance(key, (Ed25519PublicKey, Ed448PublicKey)):
x = key.public_bytes(
encoding=Encoding.Raw,
format=PublicFormat.Raw,
)
crv = "Ed25519" if isinstance(key, Ed25519PublicKey) else "Ed448"
obj = {
"x": base64url_encode(force_bytes(x)).decode(),
"kty": "OKP",
"crv": crv,
}
if as_dict:
return obj
else:
return json.dumps(obj)
if isinstance(key, (Ed25519PrivateKey, Ed448PrivateKey)):
d = key.private_bytes(
encoding=Encoding.Raw,
format=PrivateFormat.Raw,
encryption_algorithm=NoEncryption(),
)
x = key.public_key().public_bytes(
encoding=Encoding.Raw,
format=PublicFormat.Raw,
)
crv = "Ed25519" if isinstance(key, Ed25519PrivateKey) else "Ed448"
obj = {
"x": base64url_encode(force_bytes(x)).decode(),
"d": base64url_encode(force_bytes(d)).decode(),
"kty": "OKP",
"crv": crv,
}
if as_dict:
return obj
else:
return json.dumps(obj)
raise InvalidKeyError("Not a public or private key")
@staticmethod
def from_jwk(jwk: str | JWKDict) -> AllowedOKPKeys:
try:
if isinstance(jwk, str):
obj = json.loads(jwk)
elif isinstance(jwk, dict):
obj = jwk
else:
raise ValueError
except ValueError:
raise InvalidKeyError("Key is not valid JSON")
if obj.get("kty") != "OKP":
raise InvalidKeyError("Not an Octet Key Pair")
curve = obj.get("crv")
if curve != "Ed25519" and curve != "Ed448":
raise InvalidKeyError(f"Invalid curve: {curve}")
if "x" not in obj:
raise InvalidKeyError('OKP should have "x" parameter')
x = base64url_decode(obj.get("x"))
try:
if "d" not in obj:
if curve == "Ed25519":
return Ed25519PublicKey.from_public_bytes(x)
return Ed448PublicKey.from_public_bytes(x)
d = base64url_decode(obj.get("d"))
if curve == "Ed25519":
return Ed25519PrivateKey.from_private_bytes(d)
return Ed448PrivateKey.from_private_bytes(d)
except ValueError as err:
raise InvalidKeyError("Invalid key parameter") from err
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/jwt/api_jwt.py | from __future__ import annotations
import json
import warnings
from calendar import timegm
from collections.abc import Iterable
from datetime import datetime, timedelta, timezone
from typing import TYPE_CHECKING, Any
from . import api_jws
from .exceptions import (
DecodeError,
ExpiredSignatureError,
ImmatureSignatureError,
InvalidAudienceError,
InvalidIssuedAtError,
InvalidIssuerError,
MissingRequiredClaimError,
)
from .warnings import RemovedInPyjwt3Warning
if TYPE_CHECKING:
from .algorithms import AllowedPrivateKeys, AllowedPublicKeys
class PyJWT:
def __init__(self, options: dict[str, Any] | None = None) -> None:
if options is None:
options = {}
self.options: dict[str, Any] = {**self._get_default_options(), **options}
@staticmethod
def _get_default_options() -> dict[str, bool | list[str]]:
return {
"verify_signature": True,
"verify_exp": True,
"verify_nbf": True,
"verify_iat": True,
"verify_aud": True,
"verify_iss": True,
"require": [],
}
def encode(
self,
payload: dict[str, Any],
key: AllowedPrivateKeys | str | bytes,
algorithm: str | None = "HS256",
headers: dict[str, Any] | None = None,
json_encoder: type[json.JSONEncoder] | None = None,
sort_headers: bool = True,
) -> str:
# Check that we get a dict
if not isinstance(payload, dict):
raise TypeError(
"Expecting a dict object, as JWT only supports "
"JSON objects as payloads."
)
# Payload
payload = payload.copy()
for time_claim in ["exp", "iat", "nbf"]:
# Convert datetime to a intDate value in known time-format claims
if isinstance(payload.get(time_claim), datetime):
payload[time_claim] = timegm(payload[time_claim].utctimetuple())
json_payload = self._encode_payload(
payload,
headers=headers,
json_encoder=json_encoder,
)
return api_jws.encode(
json_payload,
key,
algorithm,
headers,
json_encoder,
sort_headers=sort_headers,
)
def _encode_payload(
self,
payload: dict[str, Any],
headers: dict[str, Any] | None = None,
json_encoder: type[json.JSONEncoder] | None = None,
) -> bytes:
"""
Encode a given payload to the bytes to be signed.
This method is intended to be overridden by subclasses that need to
encode the payload in a different way, e.g. compress the payload.
"""
return json.dumps(
payload,
separators=(",", ":"),
cls=json_encoder,
).encode("utf-8")
def decode_complete(
self,
jwt: str | bytes,
key: AllowedPublicKeys | str | bytes = "",
algorithms: list[str] | None = None,
options: dict[str, Any] | None = None,
# deprecated arg, remove in pyjwt3
verify: bool | None = None,
# could be used as passthrough to api_jws, consider removal in pyjwt3
detached_payload: bytes | None = None,
# passthrough arguments to _validate_claims
# consider putting in options
audience: str | Iterable[str] | None = None,
issuer: str | None = None,
leeway: float | timedelta = 0,
# kwargs
**kwargs: Any,
) -> dict[str, Any]:
if kwargs:
warnings.warn(
"passing additional kwargs to decode_complete() is deprecated "
"and will be removed in pyjwt version 3. "
f"Unsupported kwargs: {tuple(kwargs.keys())}",
RemovedInPyjwt3Warning,
)
options = dict(options or {}) # shallow-copy or initialize an empty dict
options.setdefault("verify_signature", True)
# If the user has set the legacy `verify` argument, and it doesn't match
# what the relevant `options` entry for the argument is, inform the user
# that they're likely making a mistake.
if verify is not None and verify != options["verify_signature"]:
warnings.warn(
"The `verify` argument to `decode` does nothing in PyJWT 2.0 and newer. "
"The equivalent is setting `verify_signature` to False in the `options` dictionary. "
"This invocation has a mismatch between the kwarg and the option entry.",
category=DeprecationWarning,
)
if not options["verify_signature"]:
options.setdefault("verify_exp", False)
options.setdefault("verify_nbf", False)
options.setdefault("verify_iat", False)
options.setdefault("verify_aud", False)
options.setdefault("verify_iss", False)
if options["verify_signature"] and not algorithms:
raise DecodeError(
'It is required that you pass in a value for the "algorithms" argument when calling decode().'
)
decoded = api_jws.decode_complete(
jwt,
key=key,
algorithms=algorithms,
options=options,
detached_payload=detached_payload,
)
payload = self._decode_payload(decoded)
merged_options = {**self.options, **options}
self._validate_claims(
payload, merged_options, audience=audience, issuer=issuer, leeway=leeway
)
decoded["payload"] = payload
return decoded
def _decode_payload(self, decoded: dict[str, Any]) -> Any:
"""
Decode the payload from a JWS dictionary (payload, signature, header).
This method is intended to be overridden by subclasses that need to
decode the payload in a different way, e.g. decompress compressed
payloads.
"""
try:
payload = json.loads(decoded["payload"])
except ValueError as e:
raise DecodeError(f"Invalid payload string: {e}")
if not isinstance(payload, dict):
raise DecodeError("Invalid payload string: must be a json object")
return payload
def decode(
self,
jwt: str | bytes,
key: AllowedPublicKeys | str | bytes = "",
algorithms: list[str] | None = None,
options: dict[str, Any] | None = None,
# deprecated arg, remove in pyjwt3
verify: bool | None = None,
# could be used as passthrough to api_jws, consider removal in pyjwt3
detached_payload: bytes | None = None,
# passthrough arguments to _validate_claims
# consider putting in options
audience: str | Iterable[str] | None = None,
issuer: str | None = None,
leeway: float | timedelta = 0,
# kwargs
**kwargs: Any,
) -> Any:
if kwargs:
warnings.warn(
"passing additional kwargs to decode() is deprecated "
"and will be removed in pyjwt version 3. "
f"Unsupported kwargs: {tuple(kwargs.keys())}",
RemovedInPyjwt3Warning,
)
decoded = self.decode_complete(
jwt,
key,
algorithms,
options,
verify=verify,
detached_payload=detached_payload,
audience=audience,
issuer=issuer,
leeway=leeway,
)
return decoded["payload"]
def _validate_claims(
self,
payload: dict[str, Any],
options: dict[str, Any],
audience=None,
issuer=None,
leeway: float | timedelta = 0,
) -> None:
if isinstance(leeway, timedelta):
leeway = leeway.total_seconds()
if audience is not None and not isinstance(audience, (str, Iterable)):
raise TypeError("audience must be a string, iterable or None")
self._validate_required_claims(payload, options)
now = datetime.now(tz=timezone.utc).timestamp()
if "iat" in payload and options["verify_iat"]:
self._validate_iat(payload, now, leeway)
if "nbf" in payload and options["verify_nbf"]:
self._validate_nbf(payload, now, leeway)
if "exp" in payload and options["verify_exp"]:
self._validate_exp(payload, now, leeway)
if options["verify_iss"]:
self._validate_iss(payload, issuer)
if options["verify_aud"]:
self._validate_aud(
payload, audience, strict=options.get("strict_aud", False)
)
def _validate_required_claims(
self,
payload: dict[str, Any],
options: dict[str, Any],
) -> None:
for claim in options["require"]:
if payload.get(claim) is None:
raise MissingRequiredClaimError(claim)
def _validate_iat(
self,
payload: dict[str, Any],
now: float,
leeway: float,
) -> None:
try:
iat = int(payload["iat"])
except ValueError:
raise InvalidIssuedAtError("Issued At claim (iat) must be an integer.")
if iat > (now + leeway):
raise ImmatureSignatureError("The token is not yet valid (iat)")
def _validate_nbf(
self,
payload: dict[str, Any],
now: float,
leeway: float,
) -> None:
try:
nbf = int(payload["nbf"])
except ValueError:
raise DecodeError("Not Before claim (nbf) must be an integer.")
if nbf > (now + leeway):
raise ImmatureSignatureError("The token is not yet valid (nbf)")
def _validate_exp(
self,
payload: dict[str, Any],
now: float,
leeway: float,
) -> None:
try:
exp = int(payload["exp"])
except ValueError:
raise DecodeError("Expiration Time claim (exp) must be an" " integer.")
if exp <= (now - leeway):
raise ExpiredSignatureError("Signature has expired")
def _validate_aud(
self,
payload: dict[str, Any],
audience: str | Iterable[str] | None,
*,
strict: bool = False,
) -> None:
if audience is None:
if "aud" not in payload or not payload["aud"]:
return
# Application did not specify an audience, but
# the token has the 'aud' claim
raise InvalidAudienceError("Invalid audience")
if "aud" not in payload or not payload["aud"]:
# Application specified an audience, but it could not be
# verified since the token does not contain a claim.
raise MissingRequiredClaimError("aud")
audience_claims = payload["aud"]
# In strict mode, we forbid list matching: the supplied audience
# must be a string, and it must exactly match the audience claim.
if strict:
# Only a single audience is allowed in strict mode.
if not isinstance(audience, str):
raise InvalidAudienceError("Invalid audience (strict)")
# Only a single audience claim is allowed in strict mode.
if not isinstance(audience_claims, str):
raise InvalidAudienceError("Invalid claim format in token (strict)")
if audience != audience_claims:
raise InvalidAudienceError("Audience doesn't match (strict)")
return
if isinstance(audience_claims, str):
audience_claims = [audience_claims]
if not isinstance(audience_claims, list):
raise InvalidAudienceError("Invalid claim format in token")
if any(not isinstance(c, str) for c in audience_claims):
raise InvalidAudienceError("Invalid claim format in token")
if isinstance(audience, str):
audience = [audience]
if all(aud not in audience_claims for aud in audience):
raise InvalidAudienceError("Audience doesn't match")
def _validate_iss(self, payload: dict[str, Any], issuer: Any) -> None:
if issuer is None:
return
if "iss" not in payload:
raise MissingRequiredClaimError("iss")
if payload["iss"] != issuer:
raise InvalidIssuerError("Invalid issuer")
_jwt_global_obj = PyJWT()
encode = _jwt_global_obj.encode
decode_complete = _jwt_global_obj.decode_complete
decode = _jwt_global_obj.decode
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/jwt/warnings.py | class RemovedInPyjwt3Warning(DeprecationWarning):
pass
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/jwt/__init__.py | from .api_jwk import PyJWK, PyJWKSet
from .api_jws import (
PyJWS,
get_algorithm_by_name,
get_unverified_header,
register_algorithm,
unregister_algorithm,
)
from .api_jwt import PyJWT, decode, encode
from .exceptions import (
DecodeError,
ExpiredSignatureError,
ImmatureSignatureError,
InvalidAlgorithmError,
InvalidAudienceError,
InvalidIssuedAtError,
InvalidIssuerError,
InvalidKeyError,
InvalidSignatureError,
InvalidTokenError,
MissingRequiredClaimError,
PyJWKClientConnectionError,
PyJWKClientError,
PyJWKError,
PyJWKSetError,
PyJWTError,
)
from .jwks_client import PyJWKClient
__version__ = "2.8.0"
__title__ = "PyJWT"
__description__ = "JSON Web Token implementation in Python"
__url__ = "https://pyjwt.readthedocs.io"
__uri__ = __url__
__doc__ = f"{__description__} <{__uri__}>"
__author__ = "José Padilla"
__email__ = "hello@jpadilla.com"
__license__ = "MIT"
__copyright__ = "Copyright 2015-2022 José Padilla"
__all__ = [
"PyJWS",
"PyJWT",
"PyJWKClient",
"PyJWK",
"PyJWKSet",
"decode",
"encode",
"get_unverified_header",
"register_algorithm",
"unregister_algorithm",
"get_algorithm_by_name",
# Exceptions
"DecodeError",
"ExpiredSignatureError",
"ImmatureSignatureError",
"InvalidAlgorithmError",
"InvalidAudienceError",
"InvalidIssuedAtError",
"InvalidIssuerError",
"InvalidKeyError",
"InvalidSignatureError",
"InvalidTokenError",
"MissingRequiredClaimError",
"PyJWKClientConnectionError",
"PyJWKClientError",
"PyJWKError",
"PyJWKSetError",
"PyJWTError",
]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/jwt/types.py | from typing import Any, Callable, Dict
JWKDict = Dict[str, Any]
HashlibHash = Callable[..., Any]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/jwt/jwk_set_cache.py | import time
from typing import Optional
from .api_jwk import PyJWKSet, PyJWTSetWithTimestamp
class JWKSetCache:
def __init__(self, lifespan: int) -> None:
self.jwk_set_with_timestamp: Optional[PyJWTSetWithTimestamp] = None
self.lifespan = lifespan
def put(self, jwk_set: PyJWKSet) -> None:
if jwk_set is not None:
self.jwk_set_with_timestamp = PyJWTSetWithTimestamp(jwk_set)
else:
# clear cache
self.jwk_set_with_timestamp = None
def get(self) -> Optional[PyJWKSet]:
if self.jwk_set_with_timestamp is None or self.is_expired():
return None
return self.jwk_set_with_timestamp.get_jwk_set()
def is_expired(self) -> bool:
return (
self.jwk_set_with_timestamp is not None
and self.lifespan > -1
and time.monotonic()
> self.jwk_set_with_timestamp.get_timestamp() + self.lifespan
)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/jwt/utils.py | import base64
import binascii
import re
from typing import Union
try:
from cryptography.hazmat.primitives.asymmetric.ec import EllipticCurve
from cryptography.hazmat.primitives.asymmetric.utils import (
decode_dss_signature,
encode_dss_signature,
)
except ModuleNotFoundError:
pass
def force_bytes(value: Union[bytes, str]) -> bytes:
if isinstance(value, str):
return value.encode("utf-8")
elif isinstance(value, bytes):
return value
else:
raise TypeError("Expected a string value")
def base64url_decode(input: Union[bytes, str]) -> bytes:
input_bytes = force_bytes(input)
rem = len(input_bytes) % 4
if rem > 0:
input_bytes += b"=" * (4 - rem)
return base64.urlsafe_b64decode(input_bytes)
def base64url_encode(input: bytes) -> bytes:
return base64.urlsafe_b64encode(input).replace(b"=", b"")
def to_base64url_uint(val: int) -> bytes:
if val < 0:
raise ValueError("Must be a positive integer")
int_bytes = bytes_from_int(val)
if len(int_bytes) == 0:
int_bytes = b"\x00"
return base64url_encode(int_bytes)
def from_base64url_uint(val: Union[bytes, str]) -> int:
data = base64url_decode(force_bytes(val))
return int.from_bytes(data, byteorder="big")
def number_to_bytes(num: int, num_bytes: int) -> bytes:
padded_hex = "%0*x" % (2 * num_bytes, num)
return binascii.a2b_hex(padded_hex.encode("ascii"))
def bytes_to_number(string: bytes) -> int:
return int(binascii.b2a_hex(string), 16)
def bytes_from_int(val: int) -> bytes:
remaining = val
byte_length = 0
while remaining != 0:
remaining >>= 8
byte_length += 1
return val.to_bytes(byte_length, "big", signed=False)
def der_to_raw_signature(der_sig: bytes, curve: "EllipticCurve") -> bytes:
num_bits = curve.key_size
num_bytes = (num_bits + 7) // 8
r, s = decode_dss_signature(der_sig)
return number_to_bytes(r, num_bytes) + number_to_bytes(s, num_bytes)
def raw_to_der_signature(raw_sig: bytes, curve: "EllipticCurve") -> bytes:
num_bits = curve.key_size
num_bytes = (num_bits + 7) // 8
if len(raw_sig) != 2 * num_bytes:
raise ValueError("Invalid signature")
r = bytes_to_number(raw_sig[:num_bytes])
s = bytes_to_number(raw_sig[num_bytes:])
return bytes(encode_dss_signature(r, s))
# Based on https://github.com/hynek/pem/blob/7ad94db26b0bc21d10953f5dbad3acfdfacf57aa/src/pem/_core.py#L224-L252
_PEMS = {
b"CERTIFICATE",
b"TRUSTED CERTIFICATE",
b"PRIVATE KEY",
b"PUBLIC KEY",
b"ENCRYPTED PRIVATE KEY",
b"OPENSSH PRIVATE KEY",
b"DSA PRIVATE KEY",
b"RSA PRIVATE KEY",
b"RSA PUBLIC KEY",
b"EC PRIVATE KEY",
b"DH PARAMETERS",
b"NEW CERTIFICATE REQUEST",
b"CERTIFICATE REQUEST",
b"SSH2 PUBLIC KEY",
b"SSH2 ENCRYPTED PRIVATE KEY",
b"X509 CRL",
}
_PEM_RE = re.compile(
b"----[- ]BEGIN ("
+ b"|".join(_PEMS)
+ b""")[- ]----\r?
.+?\r?
----[- ]END \\1[- ]----\r?\n?""",
re.DOTALL,
)
def is_pem_format(key: bytes) -> bool:
return bool(_PEM_RE.search(key))
# Based on https://github.com/pyca/cryptography/blob/bcb70852d577b3f490f015378c75cba74986297b/src/cryptography/hazmat/primitives/serialization/ssh.py#L40-L46
_CERT_SUFFIX = b"-cert-v01@openssh.com"
_SSH_PUBKEY_RC = re.compile(rb"\A(\S+)[ \t]+(\S+)")
_SSH_KEY_FORMATS = [
b"ssh-ed25519",
b"ssh-rsa",
b"ssh-dss",
b"ecdsa-sha2-nistp256",
b"ecdsa-sha2-nistp384",
b"ecdsa-sha2-nistp521",
]
def is_ssh_key(key: bytes) -> bool:
if any(string_value in key for string_value in _SSH_KEY_FORMATS):
return True
ssh_pubkey_match = _SSH_PUBKEY_RC.match(key)
if ssh_pubkey_match:
key_type = ssh_pubkey_match.group(1)
if _CERT_SUFFIX == key_type[-len(_CERT_SUFFIX) :]:
return True
return False
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/jwt/exceptions.py | class PyJWTError(Exception):
"""
Base class for all exceptions
"""
pass
class InvalidTokenError(PyJWTError):
pass
class DecodeError(InvalidTokenError):
pass
class InvalidSignatureError(DecodeError):
pass
class ExpiredSignatureError(InvalidTokenError):
pass
class InvalidAudienceError(InvalidTokenError):
pass
class InvalidIssuerError(InvalidTokenError):
pass
class InvalidIssuedAtError(InvalidTokenError):
pass
class ImmatureSignatureError(InvalidTokenError):
pass
class InvalidKeyError(PyJWTError):
pass
class InvalidAlgorithmError(InvalidTokenError):
pass
class MissingRequiredClaimError(InvalidTokenError):
def __init__(self, claim: str) -> None:
self.claim = claim
def __str__(self) -> str:
return f'Token is missing the "{self.claim}" claim'
class PyJWKError(PyJWTError):
pass
class PyJWKSetError(PyJWTError):
pass
class PyJWKClientError(PyJWTError):
pass
class PyJWKClientConnectionError(PyJWKClientError):
pass
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/jwt/api_jwk.py | from __future__ import annotations
import json
import time
from typing import Any
from .algorithms import get_default_algorithms, has_crypto, requires_cryptography
from .exceptions import InvalidKeyError, PyJWKError, PyJWKSetError, PyJWTError
from .types import JWKDict
class PyJWK:
def __init__(self, jwk_data: JWKDict, algorithm: str | None = None) -> None:
self._algorithms = get_default_algorithms()
self._jwk_data = jwk_data
kty = self._jwk_data.get("kty", None)
if not kty:
raise InvalidKeyError(f"kty is not found: {self._jwk_data}")
if not algorithm and isinstance(self._jwk_data, dict):
algorithm = self._jwk_data.get("alg", None)
if not algorithm:
# Determine alg with kty (and crv).
crv = self._jwk_data.get("crv", None)
if kty == "EC":
if crv == "P-256" or not crv:
algorithm = "ES256"
elif crv == "P-384":
algorithm = "ES384"
elif crv == "P-521":
algorithm = "ES512"
elif crv == "secp256k1":
algorithm = "ES256K"
else:
raise InvalidKeyError(f"Unsupported crv: {crv}")
elif kty == "RSA":
algorithm = "RS256"
elif kty == "oct":
algorithm = "HS256"
elif kty == "OKP":
if not crv:
raise InvalidKeyError(f"crv is not found: {self._jwk_data}")
if crv == "Ed25519":
algorithm = "EdDSA"
else:
raise InvalidKeyError(f"Unsupported crv: {crv}")
else:
raise InvalidKeyError(f"Unsupported kty: {kty}")
if not has_crypto and algorithm in requires_cryptography:
raise PyJWKError(f"{algorithm} requires 'cryptography' to be installed.")
self.Algorithm = self._algorithms.get(algorithm)
if not self.Algorithm:
raise PyJWKError(f"Unable to find an algorithm for key: {self._jwk_data}")
self.key = self.Algorithm.from_jwk(self._jwk_data)
@staticmethod
def from_dict(obj: JWKDict, algorithm: str | None = None) -> "PyJWK":
return PyJWK(obj, algorithm)
@staticmethod
def from_json(data: str, algorithm: None = None) -> "PyJWK":
obj = json.loads(data)
return PyJWK.from_dict(obj, algorithm)
@property
def key_type(self) -> str | None:
return self._jwk_data.get("kty", None)
@property
def key_id(self) -> str | None:
return self._jwk_data.get("kid", None)
@property
def public_key_use(self) -> str | None:
return self._jwk_data.get("use", None)
class PyJWKSet:
def __init__(self, keys: list[JWKDict]) -> None:
self.keys = []
if not keys:
raise PyJWKSetError("The JWK Set did not contain any keys")
if not isinstance(keys, list):
raise PyJWKSetError("Invalid JWK Set value")
for key in keys:
try:
self.keys.append(PyJWK(key))
except PyJWTError:
# skip unusable keys
continue
if len(self.keys) == 0:
raise PyJWKSetError(
"The JWK Set did not contain any usable keys. Perhaps 'cryptography' is not installed?"
)
@staticmethod
def from_dict(obj: dict[str, Any]) -> "PyJWKSet":
keys = obj.get("keys", [])
return PyJWKSet(keys)
@staticmethod
def from_json(data: str) -> "PyJWKSet":
obj = json.loads(data)
return PyJWKSet.from_dict(obj)
def __getitem__(self, kid: str) -> "PyJWK":
for key in self.keys:
if key.key_id == kid:
return key
raise KeyError(f"keyset has no key for kid: {kid}")
class PyJWTSetWithTimestamp:
def __init__(self, jwk_set: PyJWKSet):
self.jwk_set = jwk_set
self.timestamp = time.monotonic()
def get_jwk_set(self) -> PyJWKSet:
return self.jwk_set
def get_timestamp(self) -> float:
return self.timestamp
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/jwt/jwks_client.py | import json
import urllib.request
from functools import lru_cache
from ssl import SSLContext
from typing import Any, Dict, List, Optional
from urllib.error import URLError
from .api_jwk import PyJWK, PyJWKSet
from .api_jwt import decode_complete as decode_token
from .exceptions import PyJWKClientConnectionError, PyJWKClientError
from .jwk_set_cache import JWKSetCache
class PyJWKClient:
def __init__(
self,
uri: str,
cache_keys: bool = False,
max_cached_keys: int = 16,
cache_jwk_set: bool = True,
lifespan: int = 300,
headers: Optional[Dict[str, Any]] = None,
timeout: int = 30,
ssl_context: Optional[SSLContext] = None,
):
if headers is None:
headers = {}
self.uri = uri
self.jwk_set_cache: Optional[JWKSetCache] = None
self.headers = headers
self.timeout = timeout
self.ssl_context = ssl_context
if cache_jwk_set:
# Init jwt set cache with default or given lifespan.
# Default lifespan is 300 seconds (5 minutes).
if lifespan <= 0:
raise PyJWKClientError(
f'Lifespan must be greater than 0, the input is "{lifespan}"'
)
self.jwk_set_cache = JWKSetCache(lifespan)
else:
self.jwk_set_cache = None
if cache_keys:
# Cache signing keys
# Ignore mypy (https://github.com/python/mypy/issues/2427)
self.get_signing_key = lru_cache(maxsize=max_cached_keys)(self.get_signing_key) # type: ignore
def fetch_data(self) -> Any:
jwk_set: Any = None
try:
r = urllib.request.Request(url=self.uri, headers=self.headers)
with urllib.request.urlopen(
r, timeout=self.timeout, context=self.ssl_context
) as response:
jwk_set = json.load(response)
except (URLError, TimeoutError) as e:
raise PyJWKClientConnectionError(
f'Fail to fetch data from the url, err: "{e}"'
)
else:
return jwk_set
finally:
if self.jwk_set_cache is not None:
self.jwk_set_cache.put(jwk_set)
def get_jwk_set(self, refresh: bool = False) -> PyJWKSet:
data = None
if self.jwk_set_cache is not None and not refresh:
data = self.jwk_set_cache.get()
if data is None:
data = self.fetch_data()
if not isinstance(data, dict):
raise PyJWKClientError("The JWKS endpoint did not return a JSON object")
return PyJWKSet.from_dict(data)
def get_signing_keys(self, refresh: bool = False) -> List[PyJWK]:
jwk_set = self.get_jwk_set(refresh)
signing_keys = [
jwk_set_key
for jwk_set_key in jwk_set.keys
if jwk_set_key.public_key_use in ["sig", None] and jwk_set_key.key_id
]
if not signing_keys:
raise PyJWKClientError("The JWKS endpoint did not contain any signing keys")
return signing_keys
def get_signing_key(self, kid: str) -> PyJWK:
signing_keys = self.get_signing_keys()
signing_key = self.match_kid(signing_keys, kid)
if not signing_key:
# If no matching signing key from the jwk set, refresh the jwk set and try again.
signing_keys = self.get_signing_keys(refresh=True)
signing_key = self.match_kid(signing_keys, kid)
if not signing_key:
raise PyJWKClientError(
f'Unable to find a signing key that matches: "{kid}"'
)
return signing_key
def get_signing_key_from_jwt(self, token: str) -> PyJWK:
unverified = decode_token(token, options={"verify_signature": False})
header = unverified["header"]
return self.get_signing_key(header.get("kid"))
@staticmethod
def match_kid(signing_keys: List[PyJWK], kid: str) -> Optional[PyJWK]:
signing_key = None
for key in signing_keys:
if key.key_id == kid:
signing_key = key
break
return signing_key
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/jwt/api_jws.py | from __future__ import annotations
import binascii
import json
import warnings
from typing import TYPE_CHECKING, Any
from .algorithms import (
Algorithm,
get_default_algorithms,
has_crypto,
requires_cryptography,
)
from .exceptions import (
DecodeError,
InvalidAlgorithmError,
InvalidSignatureError,
InvalidTokenError,
)
from .utils import base64url_decode, base64url_encode
from .warnings import RemovedInPyjwt3Warning
if TYPE_CHECKING:
from .algorithms import AllowedPrivateKeys, AllowedPublicKeys
class PyJWS:
header_typ = "JWT"
def __init__(
self,
algorithms: list[str] | None = None,
options: dict[str, Any] | None = None,
) -> None:
self._algorithms = get_default_algorithms()
self._valid_algs = (
set(algorithms) if algorithms is not None else set(self._algorithms)
)
# Remove algorithms that aren't on the whitelist
for key in list(self._algorithms.keys()):
if key not in self._valid_algs:
del self._algorithms[key]
if options is None:
options = {}
self.options = {**self._get_default_options(), **options}
@staticmethod
def _get_default_options() -> dict[str, bool]:
return {"verify_signature": True}
def register_algorithm(self, alg_id: str, alg_obj: Algorithm) -> None:
"""
Registers a new Algorithm for use when creating and verifying tokens.
"""
if alg_id in self._algorithms:
raise ValueError("Algorithm already has a handler.")
if not isinstance(alg_obj, Algorithm):
raise TypeError("Object is not of type `Algorithm`")
self._algorithms[alg_id] = alg_obj
self._valid_algs.add(alg_id)
def unregister_algorithm(self, alg_id: str) -> None:
"""
Unregisters an Algorithm for use when creating and verifying tokens
Throws KeyError if algorithm is not registered.
"""
if alg_id not in self._algorithms:
raise KeyError(
"The specified algorithm could not be removed"
" because it is not registered."
)
del self._algorithms[alg_id]
self._valid_algs.remove(alg_id)
def get_algorithms(self) -> list[str]:
"""
Returns a list of supported values for the 'alg' parameter.
"""
return list(self._valid_algs)
def get_algorithm_by_name(self, alg_name: str) -> Algorithm:
"""
For a given string name, return the matching Algorithm object.
Example usage:
>>> jws_obj.get_algorithm_by_name("RS256")
"""
try:
return self._algorithms[alg_name]
except KeyError as e:
if not has_crypto and alg_name in requires_cryptography:
raise NotImplementedError(
f"Algorithm '{alg_name}' could not be found. Do you have cryptography installed?"
) from e
raise NotImplementedError("Algorithm not supported") from e
def encode(
self,
payload: bytes,
key: AllowedPrivateKeys | str | bytes,
algorithm: str | None = "HS256",
headers: dict[str, Any] | None = None,
json_encoder: type[json.JSONEncoder] | None = None,
is_payload_detached: bool = False,
sort_headers: bool = True,
) -> str:
segments = []
# declare a new var to narrow the type for type checkers
algorithm_: str = algorithm if algorithm is not None else "none"
# Prefer headers values if present to function parameters.
if headers:
headers_alg = headers.get("alg")
if headers_alg:
algorithm_ = headers["alg"]
headers_b64 = headers.get("b64")
if headers_b64 is False:
is_payload_detached = True
# Header
header: dict[str, Any] = {"typ": self.header_typ, "alg": algorithm_}
if headers:
self._validate_headers(headers)
header.update(headers)
if not header["typ"]:
del header["typ"]
if is_payload_detached:
header["b64"] = False
elif "b64" in header:
# True is the standard value for b64, so no need for it
del header["b64"]
json_header = json.dumps(
header, separators=(",", ":"), cls=json_encoder, sort_keys=sort_headers
).encode()
segments.append(base64url_encode(json_header))
if is_payload_detached:
msg_payload = payload
else:
msg_payload = base64url_encode(payload)
segments.append(msg_payload)
# Segments
signing_input = b".".join(segments)
alg_obj = self.get_algorithm_by_name(algorithm_)
key = alg_obj.prepare_key(key)
signature = alg_obj.sign(signing_input, key)
segments.append(base64url_encode(signature))
# Don't put the payload content inside the encoded token when detached
if is_payload_detached:
segments[1] = b""
encoded_string = b".".join(segments)
return encoded_string.decode("utf-8")
def decode_complete(
self,
jwt: str | bytes,
key: AllowedPublicKeys | str | bytes = "",
algorithms: list[str] | None = None,
options: dict[str, Any] | None = None,
detached_payload: bytes | None = None,
**kwargs,
) -> dict[str, Any]:
if kwargs:
warnings.warn(
"passing additional kwargs to decode_complete() is deprecated "
"and will be removed in pyjwt version 3. "
f"Unsupported kwargs: {tuple(kwargs.keys())}",
RemovedInPyjwt3Warning,
)
if options is None:
options = {}
merged_options = {**self.options, **options}
verify_signature = merged_options["verify_signature"]
if verify_signature and not algorithms:
raise DecodeError(
'It is required that you pass in a value for the "algorithms" argument when calling decode().'
)
payload, signing_input, header, signature = self._load(jwt)
if header.get("b64", True) is False:
if detached_payload is None:
raise DecodeError(
'It is required that you pass in a value for the "detached_payload" argument to decode a message having the b64 header set to false.'
)
payload = detached_payload
signing_input = b".".join([signing_input.rsplit(b".", 1)[0], payload])
if verify_signature:
self._verify_signature(signing_input, header, signature, key, algorithms)
return {
"payload": payload,
"header": header,
"signature": signature,
}
def decode(
self,
jwt: str | bytes,
key: AllowedPublicKeys | str | bytes = "",
algorithms: list[str] | None = None,
options: dict[str, Any] | None = None,
detached_payload: bytes | None = None,
**kwargs,
) -> Any:
if kwargs:
warnings.warn(
"passing additional kwargs to decode() is deprecated "
"and will be removed in pyjwt version 3. "
f"Unsupported kwargs: {tuple(kwargs.keys())}",
RemovedInPyjwt3Warning,
)
decoded = self.decode_complete(
jwt, key, algorithms, options, detached_payload=detached_payload
)
return decoded["payload"]
def get_unverified_header(self, jwt: str | bytes) -> dict[str, Any]:
"""Returns back the JWT header parameters as a dict()
Note: The signature is not verified so the header parameters
should not be fully trusted until signature verification is complete
"""
headers = self._load(jwt)[2]
self._validate_headers(headers)
return headers
def _load(self, jwt: str | bytes) -> tuple[bytes, bytes, dict[str, Any], bytes]:
if isinstance(jwt, str):
jwt = jwt.encode("utf-8")
if not isinstance(jwt, bytes):
raise DecodeError(f"Invalid token type. Token must be a {bytes}")
try:
signing_input, crypto_segment = jwt.rsplit(b".", 1)
header_segment, payload_segment = signing_input.split(b".", 1)
except ValueError as err:
raise DecodeError("Not enough segments") from err
try:
header_data = base64url_decode(header_segment)
except (TypeError, binascii.Error) as err:
raise DecodeError("Invalid header padding") from err
try:
header = json.loads(header_data)
except ValueError as e:
raise DecodeError(f"Invalid header string: {e}") from e
if not isinstance(header, dict):
raise DecodeError("Invalid header string: must be a json object")
try:
payload = base64url_decode(payload_segment)
except (TypeError, binascii.Error) as err:
raise DecodeError("Invalid payload padding") from err
try:
signature = base64url_decode(crypto_segment)
except (TypeError, binascii.Error) as err:
raise DecodeError("Invalid crypto padding") from err
return (payload, signing_input, header, signature)
def _verify_signature(
self,
signing_input: bytes,
header: dict[str, Any],
signature: bytes,
key: AllowedPublicKeys | str | bytes = "",
algorithms: list[str] | None = None,
) -> None:
try:
alg = header["alg"]
except KeyError:
raise InvalidAlgorithmError("Algorithm not specified")
if not alg or (algorithms is not None and alg not in algorithms):
raise InvalidAlgorithmError("The specified alg value is not allowed")
try:
alg_obj = self.get_algorithm_by_name(alg)
except NotImplementedError as e:
raise InvalidAlgorithmError("Algorithm not supported") from e
prepared_key = alg_obj.prepare_key(key)
if not alg_obj.verify(signing_input, prepared_key, signature):
raise InvalidSignatureError("Signature verification failed")
def _validate_headers(self, headers: dict[str, Any]) -> None:
if "kid" in headers:
self._validate_kid(headers["kid"])
def _validate_kid(self, kid: Any) -> None:
if not isinstance(kid, str):
raise InvalidTokenError("Key ID header parameter must be a string")
_jws_global_obj = PyJWS()
encode = _jws_global_obj.encode
decode_complete = _jws_global_obj.decode_complete
decode = _jws_global_obj.decode
register_algorithm = _jws_global_obj.register_algorithm
unregister_algorithm = _jws_global_obj.unregister_algorithm
get_algorithm_by_name = _jws_global_obj.get_algorithm_by_name
get_unverified_header = _jws_global_obj.get_unverified_header
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/jwt/help.py | import json
import platform
import sys
from typing import Dict
from . import __version__ as pyjwt_version
try:
import cryptography
cryptography_version = cryptography.__version__
except ModuleNotFoundError:
cryptography_version = ""
def info() -> Dict[str, Dict[str, str]]:
"""
Generate information for a bug report.
Based on the requests package help utility module.
"""
try:
platform_info = {
"system": platform.system(),
"release": platform.release(),
}
except OSError:
platform_info = {"system": "Unknown", "release": "Unknown"}
implementation = platform.python_implementation()
if implementation == "CPython":
implementation_version = platform.python_version()
elif implementation == "PyPy":
pypy_version_info = sys.pypy_version_info # type: ignore[attr-defined]
implementation_version = (
f"{pypy_version_info.major}."
f"{pypy_version_info.minor}."
f"{pypy_version_info.micro}"
)
if pypy_version_info.releaselevel != "final":
implementation_version = "".join(
[implementation_version, pypy_version_info.releaselevel]
)
else:
implementation_version = "Unknown"
return {
"platform": platform_info,
"implementation": {
"name": implementation,
"version": implementation_version,
},
"cryptography": {"version": cryptography_version},
"pyjwt": {"version": pyjwt_version},
}
def main() -> None:
"""Pretty-print the bug information as JSON."""
print(json.dumps(info(), sort_keys=True, indent=2))
if __name__ == "__main__":
main()
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/backoff/_decorator.py | # coding:utf-8
import asyncio
import logging
import operator
from typing import Any, Callable, Iterable, Optional, Type, Union
from backoff._common import (
_prepare_logger,
_config_handlers,
_log_backoff,
_log_giveup
)
from backoff._jitter import full_jitter
from backoff import _async, _sync
from backoff._typing import (
_CallableT,
_Handler,
_Jitterer,
_MaybeCallable,
_MaybeLogger,
_MaybeSequence,
_Predicate,
_WaitGenerator,
)
def on_predicate(wait_gen: _WaitGenerator,
predicate: _Predicate[Any] = operator.not_,
*,
max_tries: Optional[_MaybeCallable[int]] = None,
max_time: Optional[_MaybeCallable[float]] = None,
jitter: Union[_Jitterer, None] = full_jitter,
on_success: Union[_Handler, Iterable[_Handler], None] = None,
on_backoff: Union[_Handler, Iterable[_Handler], None] = None,
on_giveup: Union[_Handler, Iterable[_Handler], None] = None,
logger: _MaybeLogger = 'backoff',
backoff_log_level: int = logging.INFO,
giveup_log_level: int = logging.ERROR,
**wait_gen_kwargs: Any) -> Callable[[_CallableT], _CallableT]:
"""Returns decorator for backoff and retry triggered by predicate.
Args:
wait_gen: A generator yielding successive wait times in
seconds.
predicate: A function which when called on the return value of
the target function will trigger backoff when considered
truthily. If not specified, the default behavior is to
backoff on falsey return values.
max_tries: The maximum number of attempts to make before giving
up. In the case of failure, the result of the last attempt
will be returned. The default value of None means there
is no limit to the number of tries. If a callable is passed,
it will be evaluated at runtime and its return value used.
max_time: The maximum total amount of time to try for before
giving up. If this time expires, the result of the last
attempt will be returned. If a callable is passed, it will
be evaluated at runtime and its return value used.
jitter: A function of the value yielded by wait_gen returning
the actual time to wait. This distributes wait times
stochastically in order to avoid timing collisions across
concurrent clients. Wait times are jittered by default
using the full_jitter function. Jittering may be disabled
altogether by passing jitter=None.
on_success: Callable (or iterable of callables) with a unary
signature to be called in the event of success. The
parameter is a dict containing details about the invocation.
on_backoff: Callable (or iterable of callables) with a unary
signature to be called in the event of a backoff. The
parameter is a dict containing details about the invocation.
on_giveup: Callable (or iterable of callables) with a unary
signature to be called in the event that max_tries
is exceeded. The parameter is a dict containing details
about the invocation.
logger: Name of logger or Logger object to log to. Defaults to
'backoff'.
backoff_log_level: log level for the backoff event. Defaults to "INFO"
giveup_log_level: log level for the give up event. Defaults to "ERROR"
**wait_gen_kwargs: Any additional keyword args specified will be
passed to wait_gen when it is initialized. Any callable
args will first be evaluated and their return values passed.
This is useful for runtime configuration.
"""
def decorate(target):
nonlocal logger, on_success, on_backoff, on_giveup
logger = _prepare_logger(logger)
on_success = _config_handlers(on_success)
on_backoff = _config_handlers(
on_backoff,
default_handler=_log_backoff,
logger=logger,
log_level=backoff_log_level
)
on_giveup = _config_handlers(
on_giveup,
default_handler=_log_giveup,
logger=logger,
log_level=giveup_log_level
)
if asyncio.iscoroutinefunction(target):
retry = _async.retry_predicate
else:
retry = _sync.retry_predicate
return retry(
target,
wait_gen,
predicate,
max_tries=max_tries,
max_time=max_time,
jitter=jitter,
on_success=on_success,
on_backoff=on_backoff,
on_giveup=on_giveup,
wait_gen_kwargs=wait_gen_kwargs
)
# Return a function which decorates a target with a retry loop.
return decorate
def on_exception(wait_gen: _WaitGenerator,
exception: _MaybeSequence[Type[Exception]],
*,
max_tries: Optional[_MaybeCallable[int]] = None,
max_time: Optional[_MaybeCallable[float]] = None,
jitter: Union[_Jitterer, None] = full_jitter,
giveup: _Predicate[Exception] = lambda e: False,
on_success: Union[_Handler, Iterable[_Handler], None] = None,
on_backoff: Union[_Handler, Iterable[_Handler], None] = None,
on_giveup: Union[_Handler, Iterable[_Handler], None] = None,
raise_on_giveup: bool = True,
logger: _MaybeLogger = 'backoff',
backoff_log_level: int = logging.INFO,
giveup_log_level: int = logging.ERROR,
**wait_gen_kwargs: Any) -> Callable[[_CallableT], _CallableT]:
"""Returns decorator for backoff and retry triggered by exception.
Args:
wait_gen: A generator yielding successive wait times in
seconds.
exception: An exception type (or tuple of types) which triggers
backoff.
max_tries: The maximum number of attempts to make before giving
up. Once exhausted, the exception will be allowed to escape.
The default value of None means there is no limit to the
number of tries. If a callable is passed, it will be
evaluated at runtime and its return value used.
max_time: The maximum total amount of time to try for before
giving up. Once expired, the exception will be allowed to
escape. If a callable is passed, it will be
evaluated at runtime and its return value used.
jitter: A function of the value yielded by wait_gen returning
the actual time to wait. This distributes wait times
stochastically in order to avoid timing collisions across
concurrent clients. Wait times are jittered by default
using the full_jitter function. Jittering may be disabled
altogether by passing jitter=None.
giveup: Function accepting an exception instance and
returning whether or not to give up. Optional. The default
is to always continue.
on_success: Callable (or iterable of callables) with a unary
signature to be called in the event of success. The
parameter is a dict containing details about the invocation.
on_backoff: Callable (or iterable of callables) with a unary
signature to be called in the event of a backoff. The
parameter is a dict containing details about the invocation.
on_giveup: Callable (or iterable of callables) with a unary
signature to be called in the event that max_tries
is exceeded. The parameter is a dict containing details
about the invocation.
raise_on_giveup: Boolean indicating whether the registered exceptions
should be raised on giveup. Defaults to `True`
logger: Name or Logger object to log to. Defaults to 'backoff'.
backoff_log_level: log level for the backoff event. Defaults to "INFO"
giveup_log_level: log level for the give up event. Defaults to "ERROR"
**wait_gen_kwargs: Any additional keyword args specified will be
passed to wait_gen when it is initialized. Any callable
args will first be evaluated and their return values passed.
This is useful for runtime configuration.
"""
def decorate(target):
nonlocal logger, on_success, on_backoff, on_giveup
logger = _prepare_logger(logger)
on_success = _config_handlers(on_success)
on_backoff = _config_handlers(
on_backoff,
default_handler=_log_backoff,
logger=logger,
log_level=backoff_log_level,
)
on_giveup = _config_handlers(
on_giveup,
default_handler=_log_giveup,
logger=logger,
log_level=giveup_log_level,
)
if asyncio.iscoroutinefunction(target):
retry = _async.retry_exception
else:
retry = _sync.retry_exception
return retry(
target,
wait_gen,
exception,
max_tries=max_tries,
max_time=max_time,
jitter=jitter,
giveup=giveup,
on_success=on_success,
on_backoff=on_backoff,
on_giveup=on_giveup,
raise_on_giveup=raise_on_giveup,
wait_gen_kwargs=wait_gen_kwargs
)
# Return a function which decorates a target with a retry loop.
return decorate
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/backoff/_typing.py | # coding:utf-8
import logging
import sys
from typing import (Any, Callable, Coroutine, Dict, Generator, Sequence, Tuple,
TypeVar, Union)
if sys.version_info >= (3, 8): # pragma: no cover
from typing import TypedDict
else: # pragma: no cover
# use typing_extensions if installed but don't require it
try:
from typing_extensions import TypedDict
except ImportError:
class TypedDict(dict):
def __init_subclass__(cls, **kwargs: Any) -> None:
return super().__init_subclass__()
class _Details(TypedDict):
target: Callable[..., Any]
args: Tuple[Any, ...]
kwargs: Dict[str, Any]
tries: int
elapsed: float
class Details(_Details, total=False):
wait: float # present in the on_backoff handler case for either decorator
value: Any # present in the on_predicate decorator case
T = TypeVar("T")
_CallableT = TypeVar('_CallableT', bound=Callable[..., Any])
_Handler = Union[
Callable[[Details], None],
Callable[[Details], Coroutine[Any, Any, None]],
]
_Jitterer = Callable[[float], float]
_MaybeCallable = Union[T, Callable[[], T]]
_MaybeLogger = Union[str, logging.Logger, None]
_MaybeSequence = Union[T, Sequence[T]]
_Predicate = Callable[[T], bool]
_WaitGenerator = Callable[..., Generator[float, None, None]]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/backoff/_common.py | # coding:utf-8
import functools
import logging
import sys
import traceback
import warnings
# Use module-specific logger with a default null handler.
_logger = logging.getLogger('backoff')
_logger.addHandler(logging.NullHandler()) # pragma: no cover
_logger.setLevel(logging.INFO)
# Evaluate arg that can be either a fixed value or a callable.
def _maybe_call(f, *args, **kwargs):
if callable(f):
try:
return f(*args, **kwargs)
except TypeError:
return f
else:
return f
def _init_wait_gen(wait_gen, wait_gen_kwargs):
kwargs = {k: _maybe_call(v) for k, v in wait_gen_kwargs.items()}
initialized = wait_gen(**kwargs)
initialized.send(None) # Initialize with an empty send
return initialized
def _next_wait(wait, send_value, jitter, elapsed, max_time):
value = wait.send(send_value)
try:
if jitter is not None:
seconds = jitter(value)
else:
seconds = value
except TypeError:
warnings.warn(
"Nullary jitter function signature is deprecated. Use "
"unary signature accepting a wait value in seconds and "
"returning a jittered version of it.",
DeprecationWarning,
stacklevel=2,
)
seconds = value + jitter()
# don't sleep longer than remaining allotted max_time
if max_time is not None:
seconds = min(seconds, max_time - elapsed)
return seconds
def _prepare_logger(logger):
if isinstance(logger, str):
logger = logging.getLogger(logger)
return logger
# Configure handler list with user specified handler and optionally
# with a default handler bound to the specified logger.
def _config_handlers(
user_handlers, *, default_handler=None, logger=None, log_level=None
):
handlers = []
if logger is not None:
assert log_level is not None, "Log level is not specified"
# bind the specified logger to the default log handler
log_handler = functools.partial(
default_handler, logger=logger, log_level=log_level
)
handlers.append(log_handler)
if user_handlers is None:
return handlers
# user specified handlers can either be an iterable of handlers
# or a single handler. either way append them to the list.
if hasattr(user_handlers, '__iter__'):
# add all handlers in the iterable
handlers += list(user_handlers)
else:
# append a single handler
handlers.append(user_handlers)
return handlers
# Default backoff handler
def _log_backoff(details, logger, log_level):
msg = "Backing off %s(...) for %.1fs (%s)"
log_args = [details['target'].__name__, details['wait']]
exc_typ, exc, _ = sys.exc_info()
if exc is not None:
exc_fmt = traceback.format_exception_only(exc_typ, exc)[-1]
log_args.append(exc_fmt.rstrip("\n"))
else:
log_args.append(details['value'])
logger.log(log_level, msg, *log_args)
# Default giveup handler
def _log_giveup(details, logger, log_level):
msg = "Giving up %s(...) after %d tries (%s)"
log_args = [details['target'].__name__, details['tries']]
exc_typ, exc, _ = sys.exc_info()
if exc is not None:
exc_fmt = traceback.format_exception_only(exc_typ, exc)[-1]
log_args.append(exc_fmt.rstrip("\n"))
else:
log_args.append(details['value'])
logger.log(log_level, msg, *log_args)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/backoff/__init__.py | # coding:utf-8
"""
Function decoration for backoff and retry
This module provides function decorators which can be used to wrap a
function such that it will be retried until some condition is met. It
is meant to be of use when accessing unreliable resources with the
potential for intermittent failures i.e. network resources and external
APIs. Somewhat more generally, it may also be of use for dynamically
polling resources for externally generated content.
For examples and full documentation see the README at
https://github.com/litl/backoff
"""
from backoff._decorator import on_exception, on_predicate
from backoff._jitter import full_jitter, random_jitter
from backoff._wait_gen import constant, expo, fibo, runtime
__all__ = [
'on_predicate',
'on_exception',
'constant',
'expo',
'fibo',
'runtime',
'full_jitter',
'random_jitter',
]
__version__ = "2.2.1"
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/backoff/types.py | # coding:utf-8
from ._typing import Details
__all__ = [
'Details'
]
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/backoff/_async.py | # coding:utf-8
import datetime
import functools
import asyncio
from datetime import timedelta
from backoff._common import (_init_wait_gen, _maybe_call, _next_wait)
def _ensure_coroutine(coro_or_func):
if asyncio.iscoroutinefunction(coro_or_func):
return coro_or_func
else:
@functools.wraps(coro_or_func)
async def f(*args, **kwargs):
return coro_or_func(*args, **kwargs)
return f
def _ensure_coroutines(coros_or_funcs):
return [_ensure_coroutine(f) for f in coros_or_funcs]
async def _call_handlers(handlers,
*,
target, args, kwargs, tries, elapsed,
**extra):
details = {
'target': target,
'args': args,
'kwargs': kwargs,
'tries': tries,
'elapsed': elapsed,
}
details.update(extra)
for handler in handlers:
await handler(details)
def retry_predicate(target, wait_gen, predicate,
*,
max_tries, max_time, jitter,
on_success, on_backoff, on_giveup,
wait_gen_kwargs):
on_success = _ensure_coroutines(on_success)
on_backoff = _ensure_coroutines(on_backoff)
on_giveup = _ensure_coroutines(on_giveup)
# Easy to implement, please report if you need this.
assert not asyncio.iscoroutinefunction(max_tries)
assert not asyncio.iscoroutinefunction(jitter)
assert asyncio.iscoroutinefunction(target)
@functools.wraps(target)
async def retry(*args, **kwargs):
# update variables from outer function args
max_tries_value = _maybe_call(max_tries)
max_time_value = _maybe_call(max_time)
tries = 0
start = datetime.datetime.now()
wait = _init_wait_gen(wait_gen, wait_gen_kwargs)
while True:
tries += 1
elapsed = timedelta.total_seconds(datetime.datetime.now() - start)
details = {
"target": target,
"args": args,
"kwargs": kwargs,
"tries": tries,
"elapsed": elapsed,
}
ret = await target(*args, **kwargs)
if predicate(ret):
max_tries_exceeded = (tries == max_tries_value)
max_time_exceeded = (max_time_value is not None and
elapsed >= max_time_value)
if max_tries_exceeded or max_time_exceeded:
await _call_handlers(on_giveup, **details, value=ret)
break
try:
seconds = _next_wait(wait, ret, jitter, elapsed,
max_time_value)
except StopIteration:
await _call_handlers(on_giveup, **details, value=ret)
break
await _call_handlers(on_backoff, **details, value=ret,
wait=seconds)
# Note: there is no convenient way to pass explicit event
# loop to decorator, so here we assume that either default
# thread event loop is set and correct (it mostly is
# by default), or Python >= 3.5.3 or Python >= 3.6 is used
# where loop.get_event_loop() in coroutine guaranteed to
# return correct value.
# See for details:
# <https://groups.google.com/forum/#!topic/python-tulip/yF9C-rFpiKk>
# <https://bugs.python.org/issue28613>
await asyncio.sleep(seconds)
continue
else:
await _call_handlers(on_success, **details, value=ret)
break
return ret
return retry
def retry_exception(target, wait_gen, exception,
*,
max_tries, max_time, jitter, giveup,
on_success, on_backoff, on_giveup, raise_on_giveup,
wait_gen_kwargs):
on_success = _ensure_coroutines(on_success)
on_backoff = _ensure_coroutines(on_backoff)
on_giveup = _ensure_coroutines(on_giveup)
giveup = _ensure_coroutine(giveup)
# Easy to implement, please report if you need this.
assert not asyncio.iscoroutinefunction(max_tries)
assert not asyncio.iscoroutinefunction(jitter)
@functools.wraps(target)
async def retry(*args, **kwargs):
max_tries_value = _maybe_call(max_tries)
max_time_value = _maybe_call(max_time)
tries = 0
start = datetime.datetime.now()
wait = _init_wait_gen(wait_gen, wait_gen_kwargs)
while True:
tries += 1
elapsed = timedelta.total_seconds(datetime.datetime.now() - start)
details = {
"target": target,
"args": args,
"kwargs": kwargs,
"tries": tries,
"elapsed": elapsed,
}
try:
ret = await target(*args, **kwargs)
except exception as e:
giveup_result = await giveup(e)
max_tries_exceeded = (tries == max_tries_value)
max_time_exceeded = (max_time_value is not None and
elapsed >= max_time_value)
if giveup_result or max_tries_exceeded or max_time_exceeded:
await _call_handlers(on_giveup, **details, exception=e)
if raise_on_giveup:
raise
return None
try:
seconds = _next_wait(wait, e, jitter, elapsed,
max_time_value)
except StopIteration:
await _call_handlers(on_giveup, **details, exception=e)
raise e
await _call_handlers(on_backoff, **details, wait=seconds,
exception=e)
# Note: there is no convenient way to pass explicit event
# loop to decorator, so here we assume that either default
# thread event loop is set and correct (it mostly is
# by default), or Python >= 3.5.3 or Python >= 3.6 is used
# where loop.get_event_loop() in coroutine guaranteed to
# return correct value.
# See for details:
# <https://groups.google.com/forum/#!topic/python-tulip/yF9C-rFpiKk>
# <https://bugs.python.org/issue28613>
await asyncio.sleep(seconds)
else:
await _call_handlers(on_success, **details)
return ret
return retry
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/backoff/_jitter.py | # coding:utf-8
import random
def random_jitter(value: float) -> float:
"""Jitter the value a random number of milliseconds.
This adds up to 1 second of additional time to the original value.
Prior to backoff version 1.2 this was the default jitter behavior.
Args:
value: The unadulterated backoff value.
"""
return value + random.random()
def full_jitter(value: float) -> float:
"""Jitter the value across the full range (0 to value).
This corresponds to the "Full Jitter" algorithm specified in the
AWS blog's post on the performance of various jitter algorithms.
(http://www.awsarchitectureblog.com/2015/03/backoff.html)
Args:
value: The unadulterated backoff value.
"""
return random.uniform(0, value)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/backoff/_sync.py | # coding:utf-8
import datetime
import functools
import time
from datetime import timedelta
from backoff._common import (_init_wait_gen, _maybe_call, _next_wait)
def _call_handlers(hdlrs, target, args, kwargs, tries, elapsed, **extra):
details = {
'target': target,
'args': args,
'kwargs': kwargs,
'tries': tries,
'elapsed': elapsed,
}
details.update(extra)
for hdlr in hdlrs:
hdlr(details)
def retry_predicate(target, wait_gen, predicate,
*,
max_tries, max_time, jitter,
on_success, on_backoff, on_giveup,
wait_gen_kwargs):
@functools.wraps(target)
def retry(*args, **kwargs):
max_tries_value = _maybe_call(max_tries)
max_time_value = _maybe_call(max_time)
tries = 0
start = datetime.datetime.now()
wait = _init_wait_gen(wait_gen, wait_gen_kwargs)
while True:
tries += 1
elapsed = timedelta.total_seconds(datetime.datetime.now() - start)
details = {
"target": target,
"args": args,
"kwargs": kwargs,
"tries": tries,
"elapsed": elapsed,
}
ret = target(*args, **kwargs)
if predicate(ret):
max_tries_exceeded = (tries == max_tries_value)
max_time_exceeded = (max_time_value is not None and
elapsed >= max_time_value)
if max_tries_exceeded or max_time_exceeded:
_call_handlers(on_giveup, **details, value=ret)
break
try:
seconds = _next_wait(wait, ret, jitter, elapsed,
max_time_value)
except StopIteration:
_call_handlers(on_giveup, **details)
break
_call_handlers(on_backoff, **details,
value=ret, wait=seconds)
time.sleep(seconds)
continue
else:
_call_handlers(on_success, **details, value=ret)
break
return ret
return retry
def retry_exception(target, wait_gen, exception,
*,
max_tries, max_time, jitter, giveup,
on_success, on_backoff, on_giveup, raise_on_giveup,
wait_gen_kwargs):
@functools.wraps(target)
def retry(*args, **kwargs):
max_tries_value = _maybe_call(max_tries)
max_time_value = _maybe_call(max_time)
tries = 0
start = datetime.datetime.now()
wait = _init_wait_gen(wait_gen, wait_gen_kwargs)
while True:
tries += 1
elapsed = timedelta.total_seconds(datetime.datetime.now() - start)
details = {
"target": target,
"args": args,
"kwargs": kwargs,
"tries": tries,
"elapsed": elapsed,
}
try:
ret = target(*args, **kwargs)
except exception as e:
max_tries_exceeded = (tries == max_tries_value)
max_time_exceeded = (max_time_value is not None and
elapsed >= max_time_value)
if giveup(e) or max_tries_exceeded or max_time_exceeded:
_call_handlers(on_giveup, **details, exception=e)
if raise_on_giveup:
raise
return None
try:
seconds = _next_wait(wait, e, jitter, elapsed,
max_time_value)
except StopIteration:
_call_handlers(on_giveup, **details, exception=e)
raise e
_call_handlers(on_backoff, **details, wait=seconds,
exception=e)
time.sleep(seconds)
else:
_call_handlers(on_success, **details)
return ret
return retry
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/backoff/_wait_gen.py | # coding:utf-8
import itertools
from typing import Any, Callable, Generator, Iterable, Optional, Union
def expo(
base: float = 2,
factor: float = 1,
max_value: Optional[float] = None
) -> Generator[float, Any, None]:
"""Generator for exponential decay.
Args:
base: The mathematical base of the exponentiation operation
factor: Factor to multiply the exponentiation by.
max_value: The maximum value to yield. Once the value in the
true exponential sequence exceeds this, the value
of max_value will forever after be yielded.
"""
# Advance past initial .send() call
yield # type: ignore[misc]
n = 0
while True:
a = factor * base ** n
if max_value is None or a < max_value:
yield a
n += 1
else:
yield max_value
def fibo(max_value: Optional[int] = None) -> Generator[int, None, None]:
"""Generator for fibonaccial decay.
Args:
max_value: The maximum value to yield. Once the value in the
true fibonacci sequence exceeds this, the value
of max_value will forever after be yielded.
"""
# Advance past initial .send() call
yield # type: ignore[misc]
a = 1
b = 1
while True:
if max_value is None or a < max_value:
yield a
a, b = b, a + b
else:
yield max_value
def constant(
interval: Union[int, Iterable[float]] = 1
) -> Generator[float, None, None]:
"""Generator for constant intervals.
Args:
interval: A constant value to yield or an iterable of such values.
"""
# Advance past initial .send() call
yield # type: ignore[misc]
try:
itr = iter(interval) # type: ignore
except TypeError:
itr = itertools.repeat(interval) # type: ignore
for val in itr:
yield val
def runtime(
*,
value: Callable[[Any], float]
) -> Generator[float, None, None]:
"""Generator that is based on parsing the return value or thrown
exception of the decorated method
Args:
value: a callable which takes as input the decorated
function's return value or thrown exception and
determines how long to wait
"""
ret_or_exc = yield # type: ignore[misc]
while True:
ret_or_exc = yield value(ret_or_exc)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/ply-3.11.dist-info/RECORD | ply-3.11.dist-info/DESCRIPTION.rst,sha256=nnBY1Nj_GhIsOFck7R2yGHobQVosxi2CPQkHgeSZ0Hg,519
ply-3.11.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
ply-3.11.dist-info/METADATA,sha256=pYZ9p1TsWGQ8Kxp9yEJVyvs25PkR5h3gIDuTOCsvJGg,844
ply-3.11.dist-info/RECORD,,
ply-3.11.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110
ply-3.11.dist-info/metadata.json,sha256=s7M7va9E25_7TRpzHfNCfN73Ieiy5iKogF0PzXtMxMI,515
ply-3.11.dist-info/top_level.txt,sha256=gDYBHRQ7Vy0tY0AjXyJtadvU2LDaOsHqhhV70AGsisc,4
ply/__init__.py,sha256=sx6iBIF__WKIeU0iw2WSoSqBhclHF5EhBTc0wDigTV8,103
ply/__pycache__/__init__.cpython-311.pyc,,
ply/__pycache__/cpp.cpython-311.pyc,,
ply/__pycache__/ctokens.cpython-311.pyc,,
ply/__pycache__/lex.cpython-311.pyc,,
ply/__pycache__/yacc.cpython-311.pyc,,
ply/__pycache__/ygen.cpython-311.pyc,,
ply/cpp.py,sha256=KTg13R5SKeicwZm7bIPL44KcQBRcHsmeEGOwIBVvLko,33639
ply/ctokens.py,sha256=GmyWYDY9nl6F1WJQ9rmcQFgh1FnADFlnp_TBjTcEsqU,3155
ply/lex.py,sha256=babRISnIAfzHo7WqLYF2qGCSaH0btM8d3ztgHaK3SA0,42905
ply/yacc.py,sha256=EF043rIHrXJYG6jcb15TI2SLwdCoNOQZXCN_1M3-I4k,137736
ply/ygen.py,sha256=TRnkZgx5BBB43Qspu2J4gVtpeBut8xrTEZoLbNN0b6M,2246
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/ply-3.11.dist-info/metadata.json | {"classifiers": ["Programming Language :: Python :: 3", "Programming Language :: Python :: 2"], "description_content_type": "UNKNOWN", "extensions": {"python.details": {"contacts": [{"email": "dave@dabeaz.com", "name": "David Beazley", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "http://www.dabeaz.com/ply/"}}}, "generator": "bdist_wheel (0.30.0)", "license": "BSD", "metadata_version": "2.0", "name": "ply", "summary": "Python Lex & Yacc", "version": "3.11"} | 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/ply-3.11.dist-info/WHEEL | Wheel-Version: 1.0
Generator: bdist_wheel (0.30.0)
Root-Is-Purelib: true
Tag: py2-none-any
Tag: py3-none-any
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/ply-3.11.dist-info/DESCRIPTION.rst |
PLY is yet another implementation of lex and yacc for Python. Some notable
features include the fact that its implemented entirely in Python and it
uses LALR(1) parsing which is efficient and well suited for larger grammars.
PLY provides most of the standard lex/yacc features including support for empty
productions, precedence rules, error recovery, and support for ambiguous grammars.
PLY is extremely easy to use and provides very extensive error checking.
It is compatible with both Python 2 and Python 3.
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/ply-3.11.dist-info/top_level.txt | ply
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/ply-3.11.dist-info/INSTALLER | pip
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/ply-3.11.dist-info/METADATA | Metadata-Version: 2.0
Name: ply
Version: 3.11
Summary: Python Lex & Yacc
Home-page: http://www.dabeaz.com/ply/
Author: David Beazley
Author-email: dave@dabeaz.com
License: BSD
Description-Content-Type: UNKNOWN
Platform: UNKNOWN
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 2
PLY is yet another implementation of lex and yacc for Python. Some notable
features include the fact that its implemented entirely in Python and it
uses LALR(1) parsing which is efficient and well suited for larger grammars.
PLY provides most of the standard lex/yacc features including support for empty
productions, precedence rules, error recovery, and support for ambiguous grammars.
PLY is extremely easy to use and provides very extensive error checking.
It is compatible with both Python 2 and Python 3.
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests-2.29.0.dist-info/RECORD | requests-2.29.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
requests-2.29.0.dist-info/LICENSE,sha256=CeipvOyAZxBGUsFoaFqwkx54aPnIKEtm9a5u2uXxEws,10142
requests-2.29.0.dist-info/METADATA,sha256=ArJcA7ZkLXjt7XWQHFZzpC0DjxPcjTatU0RhoXg9vb4,4615
requests-2.29.0.dist-info/RECORD,,
requests-2.29.0.dist-info/WHEEL,sha256=pkctZYzUS4AYVn6dJ-7367OJZivF2e8RA9b_ZBjif18,92
requests-2.29.0.dist-info/top_level.txt,sha256=fMSVmHfb5rbGOo6xv-O_tUX6j-WyixssE-SnwcDRxNQ,9
requests/__init__.py,sha256=xX0rLGvoljtzG8oDF-ZYuTlgL_BQgKoxUZ1e3cpNHX8,4972
requests/__pycache__/__init__.cpython-311.pyc,,
requests/__pycache__/__version__.cpython-311.pyc,,
requests/__pycache__/_internal_utils.cpython-311.pyc,,
requests/__pycache__/adapters.cpython-311.pyc,,
requests/__pycache__/api.cpython-311.pyc,,
requests/__pycache__/auth.cpython-311.pyc,,
requests/__pycache__/certs.cpython-311.pyc,,
requests/__pycache__/compat.cpython-311.pyc,,
requests/__pycache__/cookies.cpython-311.pyc,,
requests/__pycache__/exceptions.cpython-311.pyc,,
requests/__pycache__/help.cpython-311.pyc,,
requests/__pycache__/hooks.cpython-311.pyc,,
requests/__pycache__/models.cpython-311.pyc,,
requests/__pycache__/packages.cpython-311.pyc,,
requests/__pycache__/sessions.cpython-311.pyc,,
requests/__pycache__/status_codes.cpython-311.pyc,,
requests/__pycache__/structures.cpython-311.pyc,,
requests/__pycache__/utils.cpython-311.pyc,,
requests/__version__.py,sha256=WWiFo8TJWU8edKk-8wx_g1przaQT8ZBtKWdIGlrP2mE,435
requests/_internal_utils.py,sha256=nMQymr4hs32TqVo5AbCrmcJEhvPUh7xXlluyqwslLiQ,1495
requests/adapters.py,sha256=3QQwZJCoDEFNUtHEPcXDZvusLyckm2qaz69wtEfJKoU,19578
requests/api.py,sha256=q61xcXq4tmiImrvcSVLTbFyCiD2F-L_-hWKGbz4y8vg,6449
requests/auth.py,sha256=h-HLlVx9j8rKV5hfSAycP2ApOSglTz77R0tz7qCbbEE,10187
requests/certs.py,sha256=Z9Sb410Anv6jUFTyss0jFFhU6xst8ctELqfy8Ev23gw,429
requests/compat.py,sha256=yxntVOSEHGMrn7FNr_32EEam1ZNAdPRdSE13_yaHzTk,1451
requests/cookies.py,sha256=kD3kNEcCj-mxbtf5fJsSaT86eGoEYpD3X0CSgpzl7BM,18560
requests/exceptions.py,sha256=DhveFBclVjTRxhRduVpO-GbMYMID2gmjdLfNEqNpI_U,3811
requests/help.py,sha256=gPX5d_H7Xd88aDABejhqGgl9B1VFRTt5BmiYvL3PzIQ,3875
requests/hooks.py,sha256=CiuysiHA39V5UfcCBXFIx83IrDpuwfN9RcTUgv28ftQ,733
requests/models.py,sha256=-DlKi0or8gFAM6VzutobXvvBW_2wrJuOF5NfndTIddA,35223
requests/packages.py,sha256=DXgv-FJIczZITmv0vEBAhWj4W-5CGCIN_ksvgR17Dvs,957
requests/sessions.py,sha256=KUqJcRRLovNefUs7ScOXSUVCcfSayTFWtbiJ7gOSlTI,30180
requests/status_codes.py,sha256=FvHmT5uH-_uimtRz5hH9VCbt7VV-Nei2J9upbej6j8g,4235
requests/structures.py,sha256=-IbmhVz06S-5aPSZuUthZ6-6D9XOjRuTXHOabY041XM,2912
requests/utils.py,sha256=6sx2X3cIVA8BgWOg8odxFy-_lbWDFETU8HI4fU4Rmqw,33448
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests-2.29.0.dist-info/LICENSE |
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests-2.29.0.dist-info/WHEEL | Wheel-Version: 1.0
Generator: bdist_wheel (0.40.0)
Root-Is-Purelib: true
Tag: py3-none-any
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests-2.29.0.dist-info/top_level.txt | requests
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests-2.29.0.dist-info/INSTALLER | pip
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/requests-2.29.0.dist-info/METADATA | Metadata-Version: 2.1
Name: requests
Version: 2.29.0
Summary: Python HTTP for Humans.
Home-page: https://requests.readthedocs.io
Author: Kenneth Reitz
Author-email: me@kennethreitz.org
License: Apache 2.0
Project-URL: Documentation, https://requests.readthedocs.io
Project-URL: Source, https://github.com/psf/requests
Platform: UNKNOWN
Classifier: Development Status :: 5 - Production/Stable
Classifier: Environment :: Web Environment
Classifier: Intended Audience :: Developers
Classifier: License :: OSI Approved :: Apache Software License
Classifier: Natural Language :: English
Classifier: Operating System :: OS Independent
Classifier: Programming Language :: Python
Classifier: Programming Language :: Python :: 3
Classifier: Programming Language :: Python :: 3.7
Classifier: Programming Language :: Python :: 3.8
Classifier: Programming Language :: Python :: 3.9
Classifier: Programming Language :: Python :: 3.10
Classifier: Programming Language :: Python :: 3.11
Classifier: Programming Language :: Python :: 3 :: Only
Classifier: Programming Language :: Python :: Implementation :: CPython
Classifier: Programming Language :: Python :: Implementation :: PyPy
Classifier: Topic :: Internet :: WWW/HTTP
Classifier: Topic :: Software Development :: Libraries
Requires-Python: >=3.7
Description-Content-Type: text/markdown
Requires-Dist: charset-normalizer (<4,>=2)
Requires-Dist: idna (<4,>=2.5)
Requires-Dist: urllib3 (<1.27,>=1.21.1)
Requires-Dist: certifi (>=2017.4.17)
Provides-Extra: security
Provides-Extra: socks
Requires-Dist: PySocks (!=1.5.7,>=1.5.6) ; extra == 'socks'
Provides-Extra: use_chardet_on_py3
Requires-Dist: chardet (<6,>=3.0.2) ; extra == 'use_chardet_on_py3'
# Requests
**Requests** is a simple, yet elegant, HTTP library.
```python
>>> import requests
>>> r = requests.get('https://httpbin.org/basic-auth/user/pass', auth=('user', 'pass'))
>>> r.status_code
200
>>> r.headers['content-type']
'application/json; charset=utf8'
>>> r.encoding
'utf-8'
>>> r.text
'{"authenticated": true, ...'
>>> r.json()
{'authenticated': True, ...}
```
Requests allows you to send HTTP/1.1 requests extremely easily. There’s no need to manually add query strings to your URLs, or to form-encode your `PUT` & `POST` data — but nowadays, just use the `json` method!
Requests is one of the most downloaded Python packages today, pulling in around `30M downloads / week`— according to GitHub, Requests is currently [depended upon](https://github.com/psf/requests/network/dependents?package_id=UGFja2FnZS01NzA4OTExNg%3D%3D) by `1,000,000+` repositories. You may certainly put your trust in this code.
[![Downloads](https://pepy.tech/badge/requests/month)](https://pepy.tech/project/requests)
[![Supported Versions](https://img.shields.io/pypi/pyversions/requests.svg)](https://pypi.org/project/requests)
[![Contributors](https://img.shields.io/github/contributors/psf/requests.svg)](https://github.com/psf/requests/graphs/contributors)
## Installing Requests and Supported Versions
Requests is available on PyPI:
```console
$ python -m pip install requests
```
Requests officially supports Python 3.7+.
## Supported Features & Best–Practices
Requests is ready for the demands of building robust and reliable HTTP–speaking applications, for the needs of today.
- Keep-Alive & Connection Pooling
- International Domains and URLs
- Sessions with Cookie Persistence
- Browser-style TLS/SSL Verification
- Basic & Digest Authentication
- Familiar `dict`–like Cookies
- Automatic Content Decompression and Decoding
- Multi-part File Uploads
- SOCKS Proxy Support
- Connection Timeouts
- Streaming Downloads
- Automatic honoring of `.netrc`
- Chunked HTTP Requests
## API Reference and User Guide available on [Read the Docs](https://requests.readthedocs.io)
[![Read the Docs](https://raw.githubusercontent.com/psf/requests/main/ext/ss.png)](https://requests.readthedocs.io)
## Cloning the repository
When cloning the Requests repository, you may need to add the `-c
fetch.fsck.badTimezone=ignore` flag to avoid an error about a bad commit (see
[this issue](https://github.com/psf/requests/issues/2690) for more background):
```shell
git clone -c fetch.fsck.badTimezone=ignore https://github.com/psf/requests.git
```
You can also apply this setting to your global Git config:
```shell
git config --global fetch.fsck.badTimezone ignore
```
---
[![Kenneth Reitz](https://raw.githubusercontent.com/psf/requests/main/ext/kr.png)](https://kennethreitz.org) [![Python Software Foundation](https://raw.githubusercontent.com/psf/requests/main/ext/psf.png)](https://www.python.org/psf)
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/ply/yacc.py | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2018
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammar is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
import re
import types
import sys
import os.path
import inspect
import warnings
__version__ = '3.11'
__tabversion__ = '3.10'
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = True # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = False # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
# String type-checking compatibility
if sys.version_info[0] < 3:
string_types = basestring
else:
string_types = str
MAXINT = sys.maxsize
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def debug(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
info = debug
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception):
pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit] + ' ...'
result = '<%s @ 0x%x> (%s)' % (type(r).__name__, id(r), repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str:
repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return '<%s @ 0x%x>' % (type(r).__name__, id(r))
# Panic mode error recovery support. This feature is being reworked--much of the
# code here is to offer a deprecation/backwards compatible transition
_errok = None
_token = None
_restart = None
_warnmsg = '''PLY: Don't use global functions errok(), token(), and restart() in p_error().
Instead, invoke the methods on the associated parser instance:
def p_error(p):
...
# Use parser.errok(), parser.token(), parser.restart()
...
parser = yacc.yacc()
'''
def errok():
warnings.warn(_warnmsg)
return _errok()
def restart():
warnings.warn(_warnmsg)
return _restart()
def token():
warnings.warn(_warnmsg)
return _token()
# Utility function to call the p_error() function with some deprecation hacks
def call_errorfunc(errorfunc, token, parser):
global _errok, _token, _restart
_errok = parser.errok
_token = parser.token
_restart = parser.restart
r = errorfunc(token)
try:
del _errok, _token, _restart
except NameError:
pass
return r
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self):
return self.type
def __repr__(self):
return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self, s, stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser = None
def __getitem__(self, n):
if isinstance(n, slice):
return [s.value for s in self.slice[n]]
elif n >= 0:
return self.slice[n].value
else:
return self.stack[n].value
def __setitem__(self, n, v):
self.slice[n].value = v
def __getslice__(self, i, j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self, n):
return getattr(self.slice[n], 'lineno', 0)
def set_lineno(self, n, lineno):
self.slice[n].lineno = lineno
def linespan(self, n):
startline = getattr(self.slice[n], 'lineno', 0)
endline = getattr(self.slice[n], 'endlineno', startline)
return startline, endline
def lexpos(self, n):
return getattr(self.slice[n], 'lexpos', 0)
def set_lexpos(self, n, lexpos):
self.slice[n].lexpos = lexpos
def lexspan(self, n):
startpos = getattr(self.slice[n], 'lexpos', 0)
endpos = getattr(self.slice[n], 'endlexpos', startpos)
return startpos, endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self, lrtab, errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
self.set_defaulted_states()
self.errorok = True
def errok(self):
self.errorok = True
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
# Defaulted state support.
# This method identifies parser states where there is only one possible reduction action.
# For such states, the parser can make a choose to make a rule reduction without consuming
# the next look-ahead token. This delayed invocation of the tokenizer can be useful in
# certain kinds of advanced parsing situations where the lexer and parser interact with
# each other or change states (i.e., manipulation of scope, lexer states, etc.).
#
# See: http://www.gnu.org/software/bison/manual/html_node/Default-Reductions.html#Default-Reductions
def set_defaulted_states(self):
self.defaulted_states = {}
for state, actions in self.action.items():
rules = list(actions.values())
if len(rules) == 1 and rules[0] < 0:
self.defaulted_states[state] = rules[0]
def disable_defaulted_states(self):
self.defaulted_states = {}
def parse(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug, int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input, lexer, debug, tracking, tokenfunc)
elif tracking:
return self.parseopt(input, lexer, debug, tracking, tokenfunc)
else:
return self.parseopt_notrack(input, lexer, debug, tracking, tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. Optimized versions of this function
# are automatically created by the ply/ygen.py script. This script cuts out
# sections enclosed in markers such as this:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parsedebug-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
#--! DEBUG
debug.info('PLY: PARSE DEBUG START')
#--! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
#--! DEBUG
debug.debug('')
debug.debug('State : %s', state)
#--! DEBUG
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
#--! DEBUG
debug.debug('Defaulted state %s: Reduce using %d', state, -t)
#--! DEBUG
#--! DEBUG
debug.debug('Stack : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
#--! DEBUG
debug.debug('Action : Shift and goto state %s', t)
#--! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
#--! DEBUG
if plen:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str,
'['+','.join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+']',
goto[statestack[-1-plen]][pname])
else:
debug.info('Action : Reduce rule [%s] with %s and goto state %d', p.str, [],
goto[statestack[-1]][pname])
#--! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
#--! DEBUG
debug.info('Result : %s', format_result(pslice[0]))
#--! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
#--! DEBUG
debug.info('Done : Returning %s', format_result(result))
debug.info('PLY: PARSE DEBUG END')
#--! DEBUG
return result
if t is None:
#--! DEBUG
debug.error('Error : %s',
('%s . %s' % (' '.join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
#--! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parsedebug-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY!
# This code is automatically generated by the ply/ygen.py script. Make
# changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
#--! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1, 'endlineno', t1.lineno)
sym.endlexpos = getattr(t1, 'endlexpos', t1.lexpos)
#--! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
#--! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
#--! TRACKING
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
#--! TRACKING
if tracking:
sym.endlineno = getattr(lookahead, 'lineno', sym.lineno)
sym.endlexpos = getattr(lookahead, 'lexpos', sym.lexpos)
#--! TRACKING
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
#--! TRACKING
if tracking:
lookahead.lineno = sym.lineno
lookahead.lexpos = sym.lexpos
#--! TRACKING
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-end
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. This code is automatically generated
# by the ply/ygen.py script. Make changes to the parsedebug() method instead.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self, input=None, lexer=None, debug=False, tracking=False, tokenfunc=None):
#--! parseopt-notrack-start
lookahead = None # Current lookahead symbol
lookaheadstack = [] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
defaulted_states = self.defaulted_states # Local reference to defaulted states
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
from . import lex
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set the parser() token method (sometimes used in error recovery)
self.token = get_token
# Set up the state and symbol stacks
statestack = [] # Stack of parsing states
self.statestack = statestack
symstack = [] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while True:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if state not in defaulted_states:
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
else:
t = defaulted_states[state]
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount:
errorcount -= 1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
self.state = state
p.callable(pslice)
del statestack[-plen:]
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
symstack.extend(targ[1:-1]) # Put the production slice back on the stack
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [sym]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
self.state = state
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead) # Save the current lookahead token
statestack.pop() # Pop back one state (before the reduce)
state = statestack[-1]
sym.type = 'error'
sym.value = 'error'
lookahead = sym
errorcount = error_count
self.errorok = False
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n, 'value', None)
return result
if t is None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = False
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
if errtoken and not hasattr(errtoken, 'lexer'):
errtoken.lexer = lexer
self.state = state
tok = call_errorfunc(self.errorfunc, errtoken, self)
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken, 'lineno'):
lineno = lookahead.lineno
else:
lineno = 0
if lineno:
sys.stderr.write('yacc: Syntax error at line %d, token=%s\n' % (lineno, errtoken.type))
else:
sys.stderr.write('yacc: Syntax error, token=%s' % errtoken.type)
else:
sys.stderr.write('yacc: Parse error in input. EOF\n')
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
# Create the error symbol for the first time and make it the new lookahead symbol
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead, 'lineno'):
t.lineno = t.endlineno = lookahead.lineno
if hasattr(lookahead, 'lexpos'):
t.lexpos = t.endlexpos = lookahead.lexpos
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
sym = symstack.pop()
statestack.pop()
state = statestack[-1]
continue
# Call an error function here
raise RuntimeError('yacc: internal parser error!!!\n')
#--! parseopt-notrack-end
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self, number, name, prod, precedence=('right', 0), func=None, file='', line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = []
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
self.str = '%s -> <empty>' % self.name
def __str__(self):
return self.str
def __repr__(self):
return 'Production(' + str(self) + ')'
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self, index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self, n):
if n > len(self.prod):
return None
p = LRItem(self, n)
# Precompute the list of productions immediately following.
try:
p.lr_after = self.Prodnames[p.prod[n+1]]
except (IndexError, KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self, str, name, len, func, file, line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return 'MiniProduction(%s)' % self.str
# Bind the production function name to a callable
def bind(self, pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self, p, n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = {}
self.prod.insert(n, '.')
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = '%s -> %s' % (self.name, ' '.join(self.prod))
else:
s = '%s -> <empty>' % self.name
return s
def __repr__(self):
return 'LRItem(' + str(self) + ')'
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError):
pass
class Grammar(object):
def __init__(self, terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = {} # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = {} # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = {} # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = {} # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = {} # A dictionary of precomputed FIRST(x) symbols
self.Follow = {} # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = {} # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = set() # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self, index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self, term, assoc, level):
assert self.Productions == [None], 'Must call set_precedence() before add_production()'
if term in self.Precedence:
raise GrammarError('Precedence already specified for terminal %r' % term)
if assoc not in ['left', 'right', 'nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc, level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self, prodname, syms, func=None, file='', line=0):
if prodname in self.Terminals:
raise GrammarError('%s:%d: Illegal rule name %r. Already defined as a token' % (file, line, prodname))
if prodname == 'error':
raise GrammarError('%s:%d: Illegal rule name %r. error is a reserved word' % (file, line, prodname))
if not _is_identifier.match(prodname):
raise GrammarError('%s:%d: Illegal rule name %r' % (file, line, prodname))
# Look for literal tokens
for n, s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError('%s:%d: Literal token %s in rule %r may only be a single character' %
(file, line, s, prodname))
if c not in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError('%s:%d: Illegal name %r in rule %r' % (file, line, s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError('%s:%d: Syntax error. Nothing follows %%prec' % (file, line))
if syms[-2] != '%prec':
raise GrammarError('%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule' %
(file, line))
precname = syms[-1]
prodprec = self.Precedence.get(precname)
if not prodprec:
raise GrammarError('%s:%d: Nothing known about the precedence of %r' % (file, line, precname))
else:
self.UsedPrecedence.add(precname)
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms, self.Terminals)
prodprec = self.Precedence.get(precname, ('right', 0))
# See if the rule is already in the rulemap
map = '%s -> %s' % (prodname, syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError('%s:%d: Duplicate rule %s. ' % (file, line, m) +
'Previous definition at %s:%d' % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if prodname not in self.Nonterminals:
self.Nonterminals[prodname] = []
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if t not in self.Nonterminals:
self.Nonterminals[t] = []
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber, prodname, syms, prodprec, func, file, line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [p]
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self, start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError('start symbol %s undefined' % start)
self.Productions[0] = Production(0, "S'", [start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if s in reachable:
return
reachable.add(s)
for p in self.Prodnames.get(s, []):
for r in p.prod:
mark_reachable_from(r)
reachable = set()
mark_reachable_from(self.Productions[0].prod[0])
return [s for s in self.Nonterminals if s not in reachable]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = True
terminates['$end'] = True
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = False
# Then propagate termination until no change:
while True:
some_change = False
for (n, pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = False
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = True
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = True
some_change = True
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s, term) in terminates.items():
if not term:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p:
continue
for s in p.prod:
if s not in self.Prodnames and s not in self.Terminals and s != 'error':
result.append((s, p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s, v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s, v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname, self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self, beta):
# We are computing First(x1,x2,x3,...,xn)
result = []
for x in beta:
x_produces_empty = False
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = True
else:
if f not in result:
result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while True:
some_change = False
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append(f)
some_change = True
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self, start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = []
if not start:
start = self.Productions[1].name
self.Follow[start] = ['$end']
while True:
didadd = False
for p in self.Productions[1:]:
# Here is the production set
for i, B in enumerate(p.prod):
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = False
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if f == '<empty>':
hasempty = True
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = True
if not didadd:
break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while True:
if i > len(p):
lri = None
else:
lri = LRItem(p, i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError, KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri:
break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError):
pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self, module):
if isinstance(module, types.ModuleType):
parsetab = module
else:
exec('import %s' % module)
parsetab = sys.modules[module]
if parsetab._tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self, filename):
try:
import cPickle as pickle
except ImportError:
import pickle
if not os.path.exists(filename):
raise ImportError
in_f = open(filename, 'rb')
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError('yacc table file version is out of date')
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self, pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X, R, FP):
N = {}
for x in X:
N[x] = 0
stack = []
F = {}
for x in X:
if N[x] == 0:
traverse(x, N, stack, F, X, R, FP)
return F
def traverse(x, N, stack, F, X, R, FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y, N, stack, F, X, R, FP)
N[x] = min(N[x], N[y])
for a in F.get(y, []):
if a not in F[x]:
F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError):
pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self, grammar, method='LALR', log=None):
if method not in ['SLR', 'LALR']:
raise LALRError('Unsupported method %s' % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self, I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = True
while didadd:
didadd = False
for j in J:
for x in j.lr_after:
if getattr(x, 'lr0_added', 0) == self._add_count:
continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = True
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self, I, x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I), x))
if g:
return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x)
if not s:
s = {}
self.lr_goto_cache[x] = s
gs = []
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n))
if not s1:
s1 = {}
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end')
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I), x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [self.lr0_closure([self.grammar.Productions[0].lr_next])]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = {}
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I, x)
if not g or id(g) in self.lr0_cidhash:
continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = set()
num_nullable = 0
while True:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable.add(p.name)
continue
for t in p.prod:
if t not in nullable:
break
else:
nullable.add(p.name)
if len(nullable) == num_nullable:
break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self, C):
trans = []
for stateno, state in enumerate(C):
for p in state:
if p.lr_index < p.len - 1:
t = (stateno, p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans:
trans.append(t)
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self, C, trans, nullable):
state, N = trans
terms = []
g = self.lr0_goto(C[state], N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms:
terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self, C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state], N)
j = self.lr0_cidhash.get(id(g), -1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j, a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self, C, trans, nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state, N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N:
continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j, t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals:
break # No forget it
if p.prod[li] not in nullable:
break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j, t))
g = self.lr0_goto(C[j], t) # Go to next set
j = self.lr0_cidhash.get(id(g), -1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name:
continue
if r.len != p.len:
continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]:
break
i = i + 1
else:
lookb.append((j, r))
for i in includes:
if i not in includedict:
includedict[i] = []
includedict[i].append((state, N))
lookdict[(state, N)] = lookb
return lookdict, includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self, C, ntrans, nullable):
FP = lambda x: self.dr_relation(C, x, nullable)
R = lambda x: self.reads_relation(C, x, nullable)
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self, ntrans, readsets, inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x, [])
F = digraph(ntrans, R, FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self, lookbacks, followset):
for trans, lb in lookbacks.items():
# Loop over productions in lookback
for state, p in lb:
if state not in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans, [])
for a in f:
if a not in p.lookaheads[state]:
p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self, C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C, trans, nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C, trans, nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans, readsets, included)
# Add all of the lookaheads
self.add_lookaheads(lookd, followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = {} # Action production array (temporary)
log.info('Parsing method: %s', self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [] # List of actions
st_action = {}
st_actionp = {}
st_goto = {}
log.info('')
log.info('state %d', st)
log.info('')
for p in I:
log.info(' (%d) %s', p.number, p)
log.info('')
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action['$end'] = 0
st_actionp['$end'] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a, p, 'reduce using rule %d (%s)' % (p.number, p)))
r = st_action.get(a)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from rule being reduced (p)
rprec, rlevel = Productions[p.number].prec
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp, rejectp = pp, oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp, rejectp = oldp, pp
self.rr_conflicts.append((st, chosenp, rejectp))
log.info(' ! reduce/reduce conflict for %s resolved using rule %d (%s)',
a, st_actionp[a].number, st_actionp[a])
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I, a)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
# We are in a shift state
actlist.append((a, p, 'shift and go to state %d' % j))
r = st_action.get(a)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError('Shift/shift conflict in state %d' % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
# Shift precedence comes from the token
sprec, slevel = Precedence.get(a, ('right', 0))
# Reduce precedence comes from the rule that could have been reduced
rprec, rlevel = Productions[st_actionp[a].number].prec
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as shift', a)
self.sr_conflicts.append((st, a, 'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(' ! shift/reduce conflict for %s resolved as reduce', a)
self.sr_conflicts.append((st, a, 'reduce'))
else:
raise LALRError('Unknown conflict in state %d' % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = {}
for a, p, m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(' %-15s %s', a, m)
_actprint[(a, m)] = 1
log.info('')
# Print the actions that were not used. (debugging)
not_used = 0
for a, p, m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a, m) in _actprint:
log.debug(' ! %-15s [ %s ]', a, m)
not_used = 1
_actprint[(a, m)] = 1
if not_used:
log.debug('')
# Construct the goto table for this state
nkeys = {}
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I, n)
j = self.lr0_cidhash.get(id(g), -1)
if j >= 0:
st_goto[n] = j
log.info(' %-30s shift and go to state %d', n, j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self, tabmodule, outputdir='', signature=''):
if isinstance(tabmodule, types.ModuleType):
raise IOError("Won't overwrite existing tabmodule")
basemodulename = tabmodule.split('.')[-1]
filename = os.path.join(outputdir, basemodulename) + '.py'
try:
f = open(filename, 'w')
f.write('''
# %s
# This file is automatically generated. Do not edit.
# pylint: disable=W,C,R
_tabversion = %r
_lr_method = %r
_lr_signature = %r
''' % (os.path.basename(filename), __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = {}
for s, nd in self.lr_action.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_action_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_action = {}
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = {}
_lr_action[_x][_k] = _y
del _lr_action_items
''')
else:
f.write('\n_lr_action = { ')
for k, v in self.lr_action.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
if smaller:
# Factor out names to try and make smaller
items = {}
for s, nd in self.lr_goto.items():
for name, v in nd.items():
i = items.get(name)
if not i:
i = ([], [])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write('\n_lr_goto_items = {')
for k, v in items.items():
f.write('%r:([' % k)
for i in v[0]:
f.write('%r,' % i)
f.write('],[')
for i in v[1]:
f.write('%r,' % i)
f.write(']),')
f.write('}\n')
f.write('''
_lr_goto = {}
for _k, _v in _lr_goto_items.items():
for _x, _y in zip(_v[0], _v[1]):
if not _x in _lr_goto: _lr_goto[_x] = {}
_lr_goto[_x][_k] = _y
del _lr_goto_items
''')
else:
f.write('\n_lr_goto = { ')
for k, v in self.lr_goto.items():
f.write('(%r,%r):%r,' % (k[0], k[1], v))
f.write('}\n')
# Write production table
f.write('_lr_productions = [\n')
for p in self.lr_productions:
if p.func:
f.write(' (%r,%r,%d,%r,%r,%d),\n' % (p.str, p.name, p.len,
p.func, os.path.basename(p.file), p.line))
else:
f.write(' (%r,%r,%d,None,None,None),\n' % (str(p), p.name, p.len))
f.write(']\n')
f.close()
except IOError as e:
raise
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self, filename, signature=''):
try:
import cPickle as pickle
except ImportError:
import pickle
with open(filename, 'wb') as outf:
pickle.dump(__tabversion__, outf, pickle_protocol)
pickle.dump(self.lr_method, outf, pickle_protocol)
pickle.dump(signature, outf, pickle_protocol)
pickle.dump(self.lr_action, outf, pickle_protocol)
pickle.dump(self.lr_goto, outf, pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str, p.name, p.len, p.func, os.path.basename(p.file), p.line))
else:
outp.append((str(p), p.name, p.len, None, None, None))
pickle.dump(outp, outf, pickle_protocol)
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc, file, line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p:
continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file, dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file, dline))
grammar.append((file, dline, prodname, syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError('%s:%d: Syntax error in rule %r' % (file, dline, ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self, pdict, log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.modules = set()
self.grammar = []
self.error = False
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_modules()
return self.error
# Compute a signature over the grammar
def signature(self):
parts = []
try:
if self.start:
parts.append(self.start)
if self.prec:
parts.append(''.join([''.join(p) for p in self.prec]))
if self.tokens:
parts.append(' '.join(self.tokens))
for f in self.pfuncs:
if f[3]:
parts.append(f[3])
except (TypeError, ValueError):
pass
return ''.join(parts)
# -----------------------------------------------------------------------------
# validate_modules()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_modules(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for module in self.modules:
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
continue
counthash = {}
for linen, line in enumerate(lines):
linen += 1
m = fre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.warning('%s:%d: Function %s redefined. Previously defined on line %d',
filename, linen, name, prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start, string_types):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func, types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = True
return
eline = self.error_func.__code__.co_firstlineno
efile = self.error_func.__code__.co_filename
module = inspect.getmodule(self.error_func)
self.modules.add(module)
argcount = self.error_func.__code__.co_argcount - ismethod
if argcount != 1:
self.log.error('%s:%d: p_error() requires 1 argument', efile, eline)
self.error = True
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get('tokens')
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = sorted(tokens)
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = True
return
terminals = set()
for n in self.tokens:
if n in terminals:
self.log.warning('Token %r multiply defined', n)
terminals.add(n)
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get('precedence')
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec, (list, tuple)):
self.log.error('precedence must be a list or tuple')
self.error = True
return
for level, p in enumerate(self.prec):
if not isinstance(p, (list, tuple)):
self.log.error('Bad precedence table')
self.error = True
return
if len(p) < 2:
self.log.error('Malformed precedence entry %s. Must be (assoc, term, ..., term)', p)
self.error = True
return
assoc = p[0]
if not isinstance(assoc, string_types):
self.log.error('precedence associativity must be a string')
self.error = True
return
for term in p[1:]:
if not isinstance(term, string_types):
self.log.error('precedence items must be strings')
self.error = True
return
preclist.append((term, assoc, level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if not name.startswith('p_') or name == 'p_error':
continue
if isinstance(item, (types.FunctionType, types.MethodType)):
line = getattr(item, 'co_firstlineno', item.__code__.co_firstlineno)
module = inspect.getmodule(item)
p_functions.append((line, module, name, item.__doc__))
# Sort all of the actions by line number; make sure to stringify
# modules to make them sortable, since `line` may not uniquely sort all
# p functions
p_functions.sort(key=lambda p_function: (
p_function[0],
str(p_function[1]),
p_function[2],
p_function[3]))
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error('no rules of the form p_rulename are defined')
self.error = True
return
for line, module, name, doc in self.pfuncs:
file = inspect.getsourcefile(module)
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func.__code__.co_argcount > reqargs:
self.log.error('%s:%d: Rule %r has too many arguments', file, line, func.__name__)
self.error = True
elif func.__code__.co_argcount < reqargs:
self.log.error('%s:%d: Rule %r requires an argument', file, line, func.__name__)
self.error = True
elif not func.__doc__:
self.log.warning('%s:%d: No documentation string specified in function %r (ignored)',
file, line, func.__name__)
else:
try:
parsed_g = parse_grammar(doc, file, line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError as e:
self.log.error(str(e))
self.error = True
# Looks like a valid grammar rule
# Mark the file in which defined.
self.modules.add(module)
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n, v in self.pdict.items():
if n.startswith('p_') and isinstance(v, (types.FunctionType, types.MethodType)):
continue
if n.startswith('t_'):
continue
if n.startswith('p_') and n != 'p_error':
self.log.warning('%r not defined as a function', n)
if ((isinstance(v, types.FunctionType) and v.__code__.co_argcount == 1) or
(isinstance(v, types.MethodType) and v.__func__.__code__.co_argcount == 2)):
if v.__doc__:
try:
doc = v.__doc__.split(' ')
if doc[1] == ':':
self.log.warning('%s:%d: Possible grammar rule %r defined without p_ prefix',
v.__code__.co_filename, v.__code__.co_firstlineno, n)
except IndexError:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=True, optimize=False, write_tables=True, debugfile=debug_file,
outputdir=None, debuglog=None, errorlog=None, picklefile=None):
if tabmodule is None:
tabmodule = tab_module
# Reference to the parsing method of the last built parser
global parse
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
pdict = dict(_items)
# If no __file__ or __package__ attributes are available, try to obtain them
# from the __module__ instead
if '__file__' not in pdict:
pdict['__file__'] = sys.modules[pdict['__module__']].__file__
if '__package__' not in pdict and '__module__' in pdict:
if hasattr(sys.modules[pdict['__module__']], '__package__'):
pdict['__package__'] = sys.modules[pdict['__module__']].__package__
else:
pdict = get_caller_module_dict(2)
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If tabmodule specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(tabmodule, types.ModuleType):
srcfile = tabmodule.__file__
else:
if '.' not in tabmodule:
srcfile = pdict['__file__']
else:
parts = tabmodule.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = pdict.get('__package__')
if pkg and isinstance(tabmodule, str):
if '.' not in tabmodule:
tabmodule = pkg + '.' + tabmodule
# Set start symbol if it's specified directly using an argument
if start is not None:
pdict['start'] = start
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict, log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError('Unable to build parser')
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
except Exception as e:
errorlog.warning('There was a problem loading the table file: %r', e)
except VersionError as e:
errorlog.warning(str(e))
except ImportError:
pass
if debuglog is None:
if debug:
try:
debuglog = PlyLogger(open(os.path.join(outputdir, debugfile), 'w'))
except IOError as e:
errorlog.warning("Couldn't open %r. %s" % (debugfile, e))
debuglog = NullLogger()
else:
debuglog = NullLogger()
debuglog.info('Created by PLY version %s (http://www.dabeaz.com/ply)', __version__)
errors = False
# Validate the parser information
if pinfo.validate_all():
raise YaccError('Unable to build parser')
if not pinfo.error_func:
errorlog.warning('no p_error() function is defined')
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term, assoc, level)
except GrammarError as e:
errorlog.warning('%s', e)
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname, syms, funcname, file, line)
except GrammarError as e:
errorlog.error('%s', e)
errors = True
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError as e:
errorlog.error(str(e))
errors = True
if errors:
raise YaccError('Unable to build parser')
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error('%s:%d: Symbol %r used, but not defined as a token or a rule', prod.file, prod.line, sym)
errors = True
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info('')
debuglog.info('Unused terminals:')
debuglog.info('')
for term in unused_terminals:
errorlog.warning('Token %r defined, but not used', term)
debuglog.info(' %s', term)
# Print out all productions to the debug log
if debug:
debuglog.info('')
debuglog.info('Grammar')
debuglog.info('')
for n, p in enumerate(grammar.Productions):
debuglog.info('Rule %-5d %s', n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning('%s:%d: Rule %r defined, but not used', prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning('There is 1 unused token')
if len(unused_terminals) > 1:
errorlog.warning('There are %d unused tokens', len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning('There is 1 unused rule')
if len(unused_rules) > 1:
errorlog.warning('There are %d unused rules', len(unused_rules))
if debug:
debuglog.info('')
debuglog.info('Terminals, with rules where they appear')
debuglog.info('')
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info('%-20s : %s', term, ' '.join([str(s) for s in grammar.Terminals[term]]))
debuglog.info('')
debuglog.info('Nonterminals, with rules where they appear')
debuglog.info('')
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info('%-20s : %s', nonterm, ' '.join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info('')
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning('Symbol %r is unreachable', u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error('Infinite recursion detected for symbol %r', inf)
errors = True
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error('Precedence rule %r defined for unknown symbol %r', assoc, term)
errors = True
if errors:
raise YaccError('Unable to build parser')
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug('Generating %s tables', method)
lr = LRGeneratedTable(grammar, method, debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning('1 shift/reduce conflict')
elif num_sr > 1:
errorlog.warning('%d shift/reduce conflicts', num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning('1 reduce/reduce conflict')
elif num_rr > 1:
errorlog.warning('%d reduce/reduce conflicts', num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning('')
debuglog.warning('Conflicts:')
debuglog.warning('')
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning('shift/reduce conflict for %s in state %d resolved as %s', tok, state, resolution)
already_reported = set()
for state, rule, rejected in lr.rr_conflicts:
if (state, id(rule), id(rejected)) in already_reported:
continue
debuglog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
debuglog.warning('rejected rule (%s) in state %d', rejected, state)
errorlog.warning('reduce/reduce conflict in state %d resolved using rule (%s)', state, rule)
errorlog.warning('rejected rule (%s) in state %d', rejected, state)
already_reported.add((state, id(rule), id(rejected)))
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning('Rule (%s) is never reduced', rejected)
errorlog.warning('Rule (%s) is never reduced', rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
try:
lr.write_table(tabmodule, outputdir, signature)
if tabmodule in sys.modules:
del sys.modules[tabmodule]
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (tabmodule, e))
# Write a pickled version of the tables
if picklefile:
try:
lr.pickle_table(picklefile, signature)
except IOError as e:
errorlog.warning("Couldn't create %r. %s" % (picklefile, e))
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr, pinfo.error_func)
parse = parser.parse
return parser
| 0 |
/Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages | /Users/nchebolu/work/raptor/taps/tap-okta/.meltano/extractors/tap-okta/venv/lib/python3.11/site-packages/ply/lex.py | # -----------------------------------------------------------------------------
# ply: lex.py
#
# Copyright (C) 2001-2018
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
__version__ = '3.11'
__tabversion__ = '3.10'
import re
import sys
import types
import copy
import os
import inspect
# This tuple contains known string types
try:
# Python 2.6
StringTypes = (types.StringType, types.UnicodeType)
except AttributeError:
# Python 3.0
StringTypes = (str, bytes)
# This regular expression is used to match valid token names
_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
# Exception thrown when invalid token encountered and no default error
# handler is defined.
class LexError(Exception):
def __init__(self, message, s):
self.args = (message,)
self.text = s
# Token class. This class is used to represent the tokens produced.
class LexToken(object):
def __str__(self):
return 'LexToken(%s,%r,%d,%d)' % (self.type, self.value, self.lineno, self.lexpos)
def __repr__(self):
return str(self)
# This object is a stand-in for a logging object created by the
# logging module.
class PlyLogger(object):
def __init__(self, f):
self.f = f
def critical(self, msg, *args, **kwargs):
self.f.write((msg % args) + '\n')
def warning(self, msg, *args, **kwargs):
self.f.write('WARNING: ' + (msg % args) + '\n')
def error(self, msg, *args, **kwargs):
self.f.write('ERROR: ' + (msg % args) + '\n')
info = critical
debug = critical
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self, name):
return self
def __call__(self, *args, **kwargs):
return self
# -----------------------------------------------------------------------------
# === Lexing Engine ===
#
# The following Lexer class implements the lexer runtime. There are only
# a few public methods and attributes:
#
# input() - Store a new string in the lexer
# token() - Get the next token
# clone() - Clone the lexer
#
# lineno - Current line number
# lexpos - Current position in the input string
# -----------------------------------------------------------------------------
class Lexer:
def __init__(self):
self.lexre = None # Master regular expression. This is a list of
# tuples (re, findex) where re is a compiled
# regular expression and findex is a list
# mapping regex group numbers to rules
self.lexretext = None # Current regular expression strings
self.lexstatere = {} # Dictionary mapping lexer states to master regexs
self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
self.lexstate = 'INITIAL' # Current lexer state
self.lexstatestack = [] # Stack of lexer states
self.lexstateinfo = None # State information
self.lexstateignore = {} # Dictionary of ignored characters for each state
self.lexstateerrorf = {} # Dictionary of error functions for each state
self.lexstateeoff = {} # Dictionary of eof functions for each state
self.lexreflags = 0 # Optional re compile flags
self.lexdata = None # Actual input data (as a string)
self.lexpos = 0 # Current position in input text
self.lexlen = 0 # Length of the input text
self.lexerrorf = None # Error rule (if any)
self.lexeoff = None # EOF rule (if any)
self.lextokens = None # List of valid tokens
self.lexignore = '' # Ignored characters
self.lexliterals = '' # Literal characters that can be passed through
self.lexmodule = None # Module
self.lineno = 1 # Current line number
self.lexoptimize = False # Optimized mode
def clone(self, object=None):
c = copy.copy(self)
# If the object parameter has been supplied, it means we are attaching the
# lexer to a new object. In this case, we have to rebind all methods in
# the lexstatere and lexstateerrorf tables.
if object:
newtab = {}
for key, ritem in self.lexstatere.items():
newre = []
for cre, findex in ritem:
newfindex = []
for f in findex:
if not f or not f[0]:
newfindex.append(f)
continue
newfindex.append((getattr(object, f[0].__name__), f[1]))
newre.append((cre, newfindex))
newtab[key] = newre
c.lexstatere = newtab
c.lexstateerrorf = {}
for key, ef in self.lexstateerrorf.items():
c.lexstateerrorf[key] = getattr(object, ef.__name__)
c.lexmodule = object
return c
# ------------------------------------------------------------
# writetab() - Write lexer information to a table file
# ------------------------------------------------------------
def writetab(self, lextab, outputdir=''):
if isinstance(lextab, types.ModuleType):
raise IOError("Won't overwrite existing lextab module")
basetabmodule = lextab.split('.')[-1]
filename = os.path.join(outputdir, basetabmodule) + '.py'
with open(filename, 'w') as tf:
tf.write('# %s.py. This file automatically created by PLY (version %s). Don\'t edit!\n' % (basetabmodule, __version__))
tf.write('_tabversion = %s\n' % repr(__tabversion__))
tf.write('_lextokens = set(%s)\n' % repr(tuple(sorted(self.lextokens))))
tf.write('_lexreflags = %s\n' % repr(int(self.lexreflags)))
tf.write('_lexliterals = %s\n' % repr(self.lexliterals))
tf.write('_lexstateinfo = %s\n' % repr(self.lexstateinfo))
# Rewrite the lexstatere table, replacing function objects with function names
tabre = {}
for statename, lre in self.lexstatere.items():
titem = []
for (pat, func), retext, renames in zip(lre, self.lexstateretext[statename], self.lexstaterenames[statename]):
titem.append((retext, _funcs_to_names(func, renames)))
tabre[statename] = titem
tf.write('_lexstatere = %s\n' % repr(tabre))
tf.write('_lexstateignore = %s\n' % repr(self.lexstateignore))
taberr = {}
for statename, ef in self.lexstateerrorf.items():
taberr[statename] = ef.__name__ if ef else None
tf.write('_lexstateerrorf = %s\n' % repr(taberr))
tabeof = {}
for statename, ef in self.lexstateeoff.items():
tabeof[statename] = ef.__name__ if ef else None
tf.write('_lexstateeoff = %s\n' % repr(tabeof))
# ------------------------------------------------------------
# readtab() - Read lexer information from a tab file
# ------------------------------------------------------------
def readtab(self, tabfile, fdict):
if isinstance(tabfile, types.ModuleType):
lextab = tabfile
else:
exec('import %s' % tabfile)
lextab = sys.modules[tabfile]
if getattr(lextab, '_tabversion', '0.0') != __tabversion__:
raise ImportError('Inconsistent PLY version')
self.lextokens = lextab._lextokens
self.lexreflags = lextab._lexreflags
self.lexliterals = lextab._lexliterals
self.lextokens_all = self.lextokens | set(self.lexliterals)
self.lexstateinfo = lextab._lexstateinfo
self.lexstateignore = lextab._lexstateignore
self.lexstatere = {}
self.lexstateretext = {}
for statename, lre in lextab._lexstatere.items():
titem = []
txtitem = []
for pat, func_name in lre:
titem.append((re.compile(pat, lextab._lexreflags), _names_to_funcs(func_name, fdict)))
self.lexstatere[statename] = titem
self.lexstateretext[statename] = txtitem
self.lexstateerrorf = {}
for statename, ef in lextab._lexstateerrorf.items():
self.lexstateerrorf[statename] = fdict[ef]
self.lexstateeoff = {}
for statename, ef in lextab._lexstateeoff.items():
self.lexstateeoff[statename] = fdict[ef]
self.begin('INITIAL')
# ------------------------------------------------------------
# input() - Push a new string into the lexer
# ------------------------------------------------------------
def input(self, s):
# Pull off the first character to see if s looks like a string
c = s[:1]
if not isinstance(c, StringTypes):
raise ValueError('Expected a string')
self.lexdata = s
self.lexpos = 0
self.lexlen = len(s)
# ------------------------------------------------------------
# begin() - Changes the lexing state
# ------------------------------------------------------------
def begin(self, state):
if state not in self.lexstatere:
raise ValueError('Undefined state')
self.lexre = self.lexstatere[state]
self.lexretext = self.lexstateretext[state]
self.lexignore = self.lexstateignore.get(state, '')
self.lexerrorf = self.lexstateerrorf.get(state, None)
self.lexeoff = self.lexstateeoff.get(state, None)
self.lexstate = state
# ------------------------------------------------------------
# push_state() - Changes the lexing state and saves old on stack
# ------------------------------------------------------------
def push_state(self, state):
self.lexstatestack.append(self.lexstate)
self.begin(state)
# ------------------------------------------------------------
# pop_state() - Restores the previous state
# ------------------------------------------------------------
def pop_state(self):
self.begin(self.lexstatestack.pop())
# ------------------------------------------------------------
# current_state() - Returns the current lexing state
# ------------------------------------------------------------
def current_state(self):
return self.lexstate
# ------------------------------------------------------------
# skip() - Skip ahead n characters
# ------------------------------------------------------------
def skip(self, n):
self.lexpos += n
# ------------------------------------------------------------
# opttoken() - Return the next token from the Lexer
#
# Note: This function has been carefully implemented to be as fast
# as possible. Don't make changes unless you really know what
# you are doing
# ------------------------------------------------------------
def token(self):
# Make local copies of frequently referenced attributes
lexpos = self.lexpos
lexlen = self.lexlen
lexignore = self.lexignore
lexdata = self.lexdata
while lexpos < lexlen:
# This code provides some short-circuit code for whitespace, tabs, and other ignored characters
if lexdata[lexpos] in lexignore:
lexpos += 1
continue
# Look for a regular expression match
for lexre, lexindexfunc in self.lexre:
m = lexre.match(lexdata, lexpos)
if not m:
continue
# Create a token for return
tok = LexToken()
tok.value = m.group()
tok.lineno = self.lineno
tok.lexpos = lexpos
i = m.lastindex
func, tok.type = lexindexfunc[i]
if not func:
# If no token type was set, it's an ignored token
if tok.type:
self.lexpos = m.end()
return tok
else:
lexpos = m.end()
break
lexpos = m.end()
# If token is processed by a function, call it
tok.lexer = self # Set additional attributes useful in token rules
self.lexmatch = m
self.lexpos = lexpos
newtok = func(tok)
# Every function must return a token, if nothing, we just move to next token
if not newtok:
lexpos = self.lexpos # This is here in case user has updated lexpos.
lexignore = self.lexignore # This is here in case there was a state change
break
# Verify type of the token. If not in the token map, raise an error
if not self.lexoptimize:
if newtok.type not in self.lextokens_all:
raise LexError("%s:%d: Rule '%s' returned an unknown token type '%s'" % (
func.__code__.co_filename, func.__code__.co_firstlineno,
func.__name__, newtok.type), lexdata[lexpos:])
return newtok
else:
# No match, see if in literals
if lexdata[lexpos] in self.lexliterals:
tok = LexToken()
tok.value = lexdata[lexpos]
tok.lineno = self.lineno
tok.type = tok.value
tok.lexpos = lexpos
self.lexpos = lexpos + 1
return tok
# No match. Call t_error() if defined.
if self.lexerrorf:
tok = LexToken()
tok.value = self.lexdata[lexpos:]
tok.lineno = self.lineno
tok.type = 'error'
tok.lexer = self
tok.lexpos = lexpos
self.lexpos = lexpos
newtok = self.lexerrorf(tok)
if lexpos == self.lexpos:
# Error method didn't change text position at all. This is an error.
raise LexError("Scanning error. Illegal character '%s'" % (lexdata[lexpos]), lexdata[lexpos:])
lexpos = self.lexpos
if not newtok:
continue
return newtok
self.lexpos = lexpos
raise LexError("Illegal character '%s' at index %d" % (lexdata[lexpos], lexpos), lexdata[lexpos:])
if self.lexeoff:
tok = LexToken()
tok.type = 'eof'
tok.value = ''
tok.lineno = self.lineno
tok.lexpos = lexpos
tok.lexer = self
self.lexpos = lexpos
newtok = self.lexeoff(tok)
return newtok
self.lexpos = lexpos + 1
if self.lexdata is None:
raise RuntimeError('No input string given with input()')
return None
# Iterator interface
def __iter__(self):
return self
def next(self):
t = self.token()
if t is None:
raise StopIteration
return t
__next__ = next
# -----------------------------------------------------------------------------
# ==== Lex Builder ===
#
# The functions and classes below are used to collect lexing information
# and build a Lexer object from it.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# _get_regex(func)
#
# Returns the regular expression assigned to a function either as a doc string
# or as a .regex attribute attached by the @TOKEN decorator.
# -----------------------------------------------------------------------------
def _get_regex(func):
return getattr(func, 'regex', func.__doc__)
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
f = sys._getframe(levels)
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# _funcs_to_names()
#
# Given a list of regular expression functions, this converts it to a list
# suitable for output to a table file
# -----------------------------------------------------------------------------
def _funcs_to_names(funclist, namelist):
result = []
for f, name in zip(funclist, namelist):
if f and f[0]:
result.append((name, f[1]))
else:
result.append(f)
return result
# -----------------------------------------------------------------------------
# _names_to_funcs()
#
# Given a list of regular expression function names, this converts it back to
# functions.
# -----------------------------------------------------------------------------
def _names_to_funcs(namelist, fdict):
result = []
for n in namelist:
if n and n[0]:
result.append((fdict[n[0]], n[1]))
else:
result.append(n)
return result
# -----------------------------------------------------------------------------
# _form_master_re()
#
# This function takes a list of all of the regex components and attempts to
# form the master regular expression. Given limitations in the Python re
# module, it may be necessary to break the master regex into separate expressions.
# -----------------------------------------------------------------------------
def _form_master_re(relist, reflags, ldict, toknames):
if not relist:
return []
regex = '|'.join(relist)
try:
lexre = re.compile(regex, reflags)
# Build the index to function map for the matching engine
lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
lexindexnames = lexindexfunc[:]
for f, i in lexre.groupindex.items():
handle = ldict.get(f, None)
if type(handle) in (types.FunctionType, types.MethodType):
lexindexfunc[i] = (handle, toknames[f])
lexindexnames[i] = f
elif handle is not None:
lexindexnames[i] = f
if f.find('ignore_') > 0:
lexindexfunc[i] = (None, None)
else:
lexindexfunc[i] = (None, toknames[f])
return [(lexre, lexindexfunc)], [regex], [lexindexnames]
except Exception:
m = int(len(relist)/2)
if m == 0:
m = 1
llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
return (llist+rlist), (lre+rre), (lnames+rnames)
# -----------------------------------------------------------------------------
# def _statetoken(s,names)
#
# Given a declaration name s of the form "t_" and a dictionary whose keys are
# state names, this function returns a tuple (states,tokenname) where states
# is a tuple of state names and tokenname is the name of the token. For example,
# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
# -----------------------------------------------------------------------------
def _statetoken(s, names):
parts = s.split('_')
for i, part in enumerate(parts[1:], 1):
if part not in names and part != 'ANY':
break
if i > 1:
states = tuple(parts[1:i])
else:
states = ('INITIAL',)
if 'ANY' in states:
states = tuple(names)
tokenname = '_'.join(parts[i:])
return (states, tokenname)
# -----------------------------------------------------------------------------
# LexerReflect()
#
# This class represents information needed to build a lexer as extracted from a
# user's input file.
# -----------------------------------------------------------------------------
class LexerReflect(object):
def __init__(self, ldict, log=None, reflags=0):
self.ldict = ldict
self.error_func = None
self.tokens = []
self.reflags = reflags
self.stateinfo = {'INITIAL': 'inclusive'}
self.modules = set()
self.error = False
self.log = PlyLogger(sys.stderr) if log is None else log
# Get all of the basic information
def get_all(self):
self.get_tokens()
self.get_literals()
self.get_states()
self.get_rules()
# Validate all of the information
def validate_all(self):
self.validate_tokens()
self.validate_literals()
self.validate_rules()
return self.error
# Get the tokens map
def get_tokens(self):
tokens = self.ldict.get('tokens', None)
if not tokens:
self.log.error('No token list is defined')
self.error = True
return
if not isinstance(tokens, (list, tuple)):
self.log.error('tokens must be a list or tuple')
self.error = True
return
if not tokens:
self.log.error('tokens is empty')
self.error = True
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
terminals = {}
for n in self.tokens:
if not _is_identifier.match(n):
self.log.error("Bad token name '%s'", n)
self.error = True
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the literals specifier
def get_literals(self):
self.literals = self.ldict.get('literals', '')
if not self.literals:
self.literals = ''
# Validate literals
def validate_literals(self):
try:
for c in self.literals:
if not isinstance(c, StringTypes) or len(c) > 1:
self.log.error('Invalid literal %s. Must be a single character', repr(c))
self.error = True
except TypeError:
self.log.error('Invalid literals specification. literals must be a sequence of characters')
self.error = True
def get_states(self):
self.states = self.ldict.get('states', None)
# Build statemap
if self.states:
if not isinstance(self.states, (tuple, list)):
self.log.error('states must be defined as a tuple or list')
self.error = True
else:
for s in self.states:
if not isinstance(s, tuple) or len(s) != 2:
self.log.error("Invalid state specifier %s. Must be a tuple (statename,'exclusive|inclusive')", repr(s))
self.error = True
continue
name, statetype = s
if not isinstance(name, StringTypes):
self.log.error('State name %s must be a string', repr(name))
self.error = True
continue
if not (statetype == 'inclusive' or statetype == 'exclusive'):
self.log.error("State type for state %s must be 'inclusive' or 'exclusive'", name)
self.error = True
continue
if name in self.stateinfo:
self.log.error("State '%s' already defined", name)
self.error = True
continue
self.stateinfo[name] = statetype
# Get all of the symbols with a t_ prefix and sort them into various
# categories (functions, strings, error functions, and ignore characters)
def get_rules(self):
tsymbols = [f for f in self.ldict if f[:2] == 't_']
# Now build up a list of functions and a list of strings
self.toknames = {} # Mapping of symbols to token names
self.funcsym = {} # Symbols defined as functions
self.strsym = {} # Symbols defined as strings
self.ignore = {} # Ignore strings by state
self.errorf = {} # Error functions by state
self.eoff = {} # EOF functions by state
for s in self.stateinfo:
self.funcsym[s] = []
self.strsym[s] = []
if len(tsymbols) == 0:
self.log.error('No rules of the form t_rulename are defined')
self.error = True
return
for f in tsymbols:
t = self.ldict[f]
states, tokname = _statetoken(f, self.stateinfo)
self.toknames[f] = tokname
if hasattr(t, '__call__'):
if tokname == 'error':
for s in states:
self.errorf[s] = t
elif tokname == 'eof':
for s in states:
self.eoff[s] = t
elif tokname == 'ignore':
line = t.__code__.co_firstlineno
file = t.__code__.co_filename
self.log.error("%s:%d: Rule '%s' must be defined as a string", file, line, t.__name__)
self.error = True
else:
for s in states:
self.funcsym[s].append((f, t))
elif isinstance(t, StringTypes):
if tokname == 'ignore':
for s in states:
self.ignore[s] = t
if '\\' in t:
self.log.warning("%s contains a literal backslash '\\'", f)
elif tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", f)
self.error = True
else:
for s in states:
self.strsym[s].append((f, t))
else:
self.log.error('%s not defined as a function or string', f)
self.error = True
# Sort the functions by line number
for f in self.funcsym.values():
f.sort(key=lambda x: x[1].__code__.co_firstlineno)
# Sort the strings by regular expression length
for s in self.strsym.values():
s.sort(key=lambda x: len(x[1]), reverse=True)
# Validate all of the t_rules collected
def validate_rules(self):
for state in self.stateinfo:
# Validate all rules defined by functions
for fname, f in self.funcsym[state]:
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
tokname = self.toknames[fname]
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
continue
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
continue
if not _get_regex(f):
self.log.error("%s:%d: No regular expression defined for rule '%s'", file, line, f.__name__)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
if c.match(''):
self.log.error("%s:%d: Regular expression for rule '%s' matches empty string", file, line, f.__name__)
self.error = True
except re.error as e:
self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
if '#' in _get_regex(f):
self.log.error("%s:%d. Make sure '#' in rule '%s' is escaped with '\\#'", file, line, f.__name__)
self.error = True
# Validate all rules defined by strings
for name, r in self.strsym[state]:
tokname = self.toknames[name]
if tokname == 'error':
self.log.error("Rule '%s' must be defined as a function", name)
self.error = True
continue
if tokname not in self.tokens and tokname.find('ignore_') < 0:
self.log.error("Rule '%s' defined for an unspecified token %s", name, tokname)
self.error = True
continue
try:
c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
if (c.match('')):
self.log.error("Regular expression for rule '%s' matches empty string", name)
self.error = True
except re.error as e:
self.log.error("Invalid regular expression for rule '%s'. %s", name, e)
if '#' in r:
self.log.error("Make sure '#' in rule '%s' is escaped with '\\#'", name)
self.error = True
if not self.funcsym[state] and not self.strsym[state]:
self.log.error("No rules defined for state '%s'", state)
self.error = True
# Validate the error function
efunc = self.errorf.get(state, None)
if efunc:
f = efunc
line = f.__code__.co_firstlineno
file = f.__code__.co_filename
module = inspect.getmodule(f)
self.modules.add(module)
if isinstance(f, types.MethodType):
reqargs = 2
else:
reqargs = 1
nargs = f.__code__.co_argcount
if nargs > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments", file, line, f.__name__)
self.error = True
if nargs < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument", file, line, f.__name__)
self.error = True
for module in self.modules:
self.validate_module(module)
# -----------------------------------------------------------------------------
# validate_module()
#
# This checks to see if there are duplicated t_rulename() functions or strings
# in the parser input file. This is done using a simple regular expression
# match on each line in the source code of the given module.
# -----------------------------------------------------------------------------
def validate_module(self, module):
try:
lines, linen = inspect.getsourcelines(module)
except IOError:
return
fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
counthash = {}
linen += 1
for line in lines:
m = fre.match(line)
if not m:
m = sre.match(line)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
filename = inspect.getsourcefile(module)
self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
self.error = True
linen += 1
# -----------------------------------------------------------------------------
# lex(module)
#
# Build all of the regular expression rules from definitions in the supplied module
# -----------------------------------------------------------------------------
def lex(module=None, object=None, debug=False, optimize=False, lextab='lextab',
reflags=int(re.VERBOSE), nowarn=False, outputdir=None, debuglog=None, errorlog=None):
if lextab is None:
lextab = 'lextab'
global lexer
ldict = None
stateinfo = {'INITIAL': 'inclusive'}
lexobj = Lexer()
lexobj.lexoptimize = optimize
global token, input
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
if debug:
if debuglog is None:
debuglog = PlyLogger(sys.stderr)
# Get the module dictionary used for the lexer
if object:
module = object
# Get the module dictionary used for the parser
if module:
_items = [(k, getattr(module, k)) for k in dir(module)]
ldict = dict(_items)
# If no __file__ attribute is available, try to obtain it from the __module__ instead
if '__file__' not in ldict:
ldict['__file__'] = sys.modules[ldict['__module__']].__file__
else:
ldict = get_caller_module_dict(2)
# Determine if the module is package of a package or not.
# If so, fix the tabmodule setting so that tables load correctly
pkg = ldict.get('__package__')
if pkg and isinstance(lextab, str):
if '.' not in lextab:
lextab = pkg + '.' + lextab
# Collect parser information from the dictionary
linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
linfo.get_all()
if not optimize:
if linfo.validate_all():
raise SyntaxError("Can't build lexer")
if optimize and lextab:
try:
lexobj.readtab(lextab, ldict)
token = lexobj.token
input = lexobj.input
lexer = lexobj
return lexobj
except ImportError:
pass
# Dump some basic debugging information
if debug:
debuglog.info('lex: tokens = %r', linfo.tokens)
debuglog.info('lex: literals = %r', linfo.literals)
debuglog.info('lex: states = %r', linfo.stateinfo)
# Build a dictionary of valid token names
lexobj.lextokens = set()
for n in linfo.tokens:
lexobj.lextokens.add(n)
# Get literals specification
if isinstance(linfo.literals, (list, tuple)):
lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
else:
lexobj.lexliterals = linfo.literals
lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
# Get the stateinfo dictionary
stateinfo = linfo.stateinfo
regexs = {}
# Build the master regular expressions
for state in stateinfo:
regex_list = []
# Add rules defined by functions first
for fname, f in linfo.funcsym[state]:
regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
# Now add all of the simple rules
for name, r in linfo.strsym[state]:
regex_list.append('(?P<%s>%s)' % (name, r))
if debug:
debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
regexs[state] = regex_list
# Build the master regular expressions
if debug:
debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
for state in regexs:
lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
lexobj.lexstatere[state] = lexre
lexobj.lexstateretext[state] = re_text
lexobj.lexstaterenames[state] = re_names
if debug:
for i, text in enumerate(re_text):
debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
# For inclusive states, we need to add the regular expressions from the INITIAL state
for state, stype in stateinfo.items():
if state != 'INITIAL' and stype == 'inclusive':
lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
lexobj.lexstateinfo = stateinfo
lexobj.lexre = lexobj.lexstatere['INITIAL']
lexobj.lexretext = lexobj.lexstateretext['INITIAL']
lexobj.lexreflags = reflags
# Set up ignore variables
lexobj.lexstateignore = linfo.ignore
lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
# Set up error functions
lexobj.lexstateerrorf = linfo.errorf
lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
if not lexobj.lexerrorf:
errorlog.warning('No t_error rule is defined')
# Set up eof functions
lexobj.lexstateeoff = linfo.eoff
lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
# Check state information for ignore and error rules
for s, stype in stateinfo.items():
if stype == 'exclusive':
if s not in linfo.errorf:
errorlog.warning("No error rule is defined for exclusive state '%s'", s)
if s not in linfo.ignore and lexobj.lexignore:
errorlog.warning("No ignore rule is defined for exclusive state '%s'", s)
elif stype == 'inclusive':
if s not in linfo.errorf:
linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
if s not in linfo.ignore:
linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
# Create global versions of the token() and input() functions
token = lexobj.token
input = lexobj.input
lexer = lexobj
# If in optimize mode, we write the lextab
if lextab and optimize:
if outputdir is None:
# If no output directory is set, the location of the output files
# is determined according to the following rules:
# - If lextab specifies a package, files go into that package directory
# - Otherwise, files go in the same directory as the specifying module
if isinstance(lextab, types.ModuleType):
srcfile = lextab.__file__
else:
if '.' not in lextab:
srcfile = ldict['__file__']
else:
parts = lextab.split('.')
pkgname = '.'.join(parts[:-1])
exec('import %s' % pkgname)
srcfile = getattr(sys.modules[pkgname], '__file__', '')
outputdir = os.path.dirname(srcfile)
try:
lexobj.writetab(lextab, outputdir)
if lextab in sys.modules:
del sys.modules[lextab]
except IOError as e:
errorlog.warning("Couldn't write lextab module %r. %s" % (lextab, e))
return lexobj
# -----------------------------------------------------------------------------
# runmain()
#
# This runs the lexer as a main program
# -----------------------------------------------------------------------------
def runmain(lexer=None, data=None):
if not data:
try:
filename = sys.argv[1]
f = open(filename)
data = f.read()
f.close()
except IndexError:
sys.stdout.write('Reading from standard input (type EOF to end):\n')
data = sys.stdin.read()
if lexer:
_input = lexer.input
else:
_input = input
_input(data)
if lexer:
_token = lexer.token
else:
_token = token
while True:
tok = _token()
if not tok:
break
sys.stdout.write('(%s,%r,%d,%d)\n' % (tok.type, tok.value, tok.lineno, tok.lexpos))
# -----------------------------------------------------------------------------
# @TOKEN(regex)
#
# This decorator function can be used to set the regex expression on a function
# when its docstring might need to be set in an alternative way
# -----------------------------------------------------------------------------
def TOKEN(r):
def set_regex(f):
if hasattr(r, '__call__'):
f.regex = _get_regex(r)
else:
f.regex = r
return f
return set_regex
# Alternative spelling of the TOKEN decorator
Token = TOKEN
| 0 |