diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..76add878f8dd778c3381fb3da45c8140db7db510 --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +node_modules +dist \ No newline at end of file diff --git a/.npmrc b/.npmrc new file mode 100644 index 0000000000000000000000000000000000000000..f263a6c7f9503dfb8e4b74cc3b5186ac324785bb --- /dev/null +++ b/.npmrc @@ -0,0 +1,2 @@ +shared-workspace-lockfile = false +include-workspace-root = true \ No newline at end of file diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..f6ac7ef9fce621d89bd2bd4452e8bed49ca524ce --- /dev/null +++ b/Dockerfile @@ -0,0 +1,13 @@ +# syntax=docker/dockerfile:1 +# read the doc: https://huggingface.co/docs/hub/spaces-sdks-docker +# you will also find guides on how best to write your Dockerfile +FROM node:20 + +WORKDIR /app + +RUN corepack enable + +COPY --link --chown=1000 . . + +RUN pnpm install +RUN pnpm --filter widgets dev \ No newline at end of file diff --git a/README.md b/README.md index 7e958246abedc8a92988ee1f6e942329e8ffbcc2..e2200bcda0d0c52ad7d4f54eaa2e32fd2f868f3f 100644 --- a/README.md +++ b/README.md @@ -5,6 +5,7 @@ colorFrom: pink colorTo: red sdk: docker pinned: false +app_port: 5173 --- -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference +Demo app for [Inference Widgets](https://github.com/huggingface/huggingface.js/tree/main/packages/widgets). \ No newline at end of file diff --git a/package.json b/package.json new file mode 100644 index 0000000000000000000000000000000000000000..c45d934995f1cbfc2c765e4d65c09256e54c0775 --- /dev/null +++ b/package.json @@ -0,0 +1,30 @@ +{ + "license": "MIT", + "packageManager": "pnpm@8.10.5", + "dependencies": { + "@typescript-eslint/eslint-plugin": "^5.51.0", + "@typescript-eslint/parser": "^5.51.0", + "eslint": "^8.35.0", + "eslint-config-prettier": "^9.0.0", + "eslint-plugin-prettier": "^4.2.1", + "eslint-plugin-svelte": "^2.30.0", + "prettier": "^3.0.0", + "prettier-plugin-svelte": "^3.0.0", + "typescript": "^5.0.0", + "vite": "4.1.4" + }, + "scripts": { + "lint": "eslint --quiet --fix --ext .cjs,.ts .eslintrc.cjs", + "lint:check": "eslint --ext .cjs,.ts .eslintrc.cjs", + "format": "prettier --write package.json .prettierrc .vscode .eslintrc.cjs e2e .github *.md", + "format:check": "prettier --check package.json .prettierrc .vscode .eslintrc.cjs .github *.md" + }, + "devDependencies": { + "@vitest/browser": "^0.29.7", + "semver": "^7.5.0", + "ts-node": "^10.9.1", + "tsup": "^6.7.0", + "vitest": "^0.29.4", + "webdriverio": "^8.6.7" + } +} diff --git a/packages/tasks/.prettierignore b/packages/tasks/.prettierignore new file mode 100644 index 0000000000000000000000000000000000000000..cac0c694965d419e7145c6ae3f371c733d5dba15 --- /dev/null +++ b/packages/tasks/.prettierignore @@ -0,0 +1,4 @@ +pnpm-lock.yaml +# In order to avoid code samples to have tabs, they don't display well on npm +README.md +dist \ No newline at end of file diff --git a/packages/tasks/README.md b/packages/tasks/README.md new file mode 100644 index 0000000000000000000000000000000000000000..67285ef19d3f8a36cc57f8cd8f9022b5a7308c50 --- /dev/null +++ b/packages/tasks/README.md @@ -0,0 +1,20 @@ +# Tasks + +This package contains data used for https://huggingface.co/tasks. + +## Philosophy behind Tasks + +The Task pages are made to lower the barrier of entry to understand a task that can be solved with machine learning and use or train a model to accomplish it. It's a collaborative documentation effort made to help out software developers, social scientists, or anyone with no background in machine learning that is interested in understanding how machine learning models can be used to solve a problem. + +The task pages avoid jargon to let everyone understand the documentation, and if specific terminology is needed, it is explained on the most basic level possible. This is important to understand before contributing to Tasks: at the end of every task page, the user is expected to be able to find and pull a model from the Hub and use it on their data and see if it works for their use case to come up with a proof of concept. + +## How to Contribute +You can open a pull request to contribute a new documentation about a new task. Under `src` we have a folder for every task that contains two files, `about.md` and `data.ts`. `about.md` contains the markdown part of the page, use cases, resources and minimal code block to infer a model that belongs to the task. `data.ts` contains redirections to canonical models and datasets, metrics, the schema of the task and the information the inference widget needs. + +![Anatomy of a Task Page](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/contribution-guide/anatomy.png) + +We have a [`dataset`](https://huggingface.co/datasets/huggingfacejs/tasks) that contains data used in the inference widget. The last file is `const.ts`, which has the task to library mapping (e.g. spacy to token-classification) where you can add a library. They will look in the top right corner like below. + +![Libraries of a Task](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/contribution-guide/libraries.png) + +This might seem overwhelming, but you don't necessarily need to add all of these in one pull request or on your own, you can simply contribute one section. Feel free to ask for help whenever you need. \ No newline at end of file diff --git a/packages/tasks/package.json b/packages/tasks/package.json new file mode 100644 index 0000000000000000000000000000000000000000..2ee60dac623d6e28d6c86855cf2ec673e652ca51 --- /dev/null +++ b/packages/tasks/package.json @@ -0,0 +1,46 @@ +{ + "name": "@huggingface/tasks", + "packageManager": "pnpm@8.10.5", + "version": "0.0.5", + "description": "List of ML tasks for huggingface.co/tasks", + "repository": "https://github.com/huggingface/huggingface.js.git", + "publishConfig": { + "access": "public" + }, + "main": "./dist/index.js", + "module": "./dist/index.mjs", + "types": "./dist/index.d.ts", + "exports": { + ".": { + "types": "./dist/index.d.ts", + "require": "./dist/index.js", + "import": "./dist/index.mjs" + } + }, + "source": "src/index.ts", + "scripts": { + "lint": "eslint --quiet --fix --ext .cjs,.ts .", + "lint:check": "eslint --ext .cjs,.ts .", + "format": "prettier --write .", + "format:check": "prettier --check .", + "prepublishOnly": "pnpm run build", + "build": "tsup src/index.ts --format cjs,esm --clean --dts", + "prepare": "pnpm run build", + "check": "tsc" + }, + "files": [ + "dist", + "src", + "tsconfig.json" + ], + "keywords": [ + "huggingface", + "hub", + "languages" + ], + "author": "Hugging Face", + "license": "MIT", + "devDependencies": { + "typescript": "^5.0.4" + } +} diff --git a/packages/tasks/pnpm-lock.yaml b/packages/tasks/pnpm-lock.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a3ed38c891dea128b57a69af6d76aa1473decd4c --- /dev/null +++ b/packages/tasks/pnpm-lock.yaml @@ -0,0 +1,14 @@ +lockfileVersion: '6.0' + +devDependencies: + typescript: + specifier: ^5.0.4 + version: 5.0.4 + +packages: + + /typescript@5.0.4: + resolution: {integrity: sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==} + engines: {node: '>=12.20'} + hasBin: true + dev: true diff --git a/packages/tasks/src/Types.ts b/packages/tasks/src/Types.ts new file mode 100644 index 0000000000000000000000000000000000000000..0824893f11271a7fe7873a2a0ddc803c8cdc1017 --- /dev/null +++ b/packages/tasks/src/Types.ts @@ -0,0 +1,64 @@ +import type { ModelLibraryKey } from "./modelLibraries"; +import type { PipelineType } from "./pipelines"; + +export interface ExampleRepo { + description: string; + id: string; +} + +export type TaskDemoEntry = + | { + filename: string; + type: "audio"; + } + | { + data: Array<{ + label: string; + score: number; + }>; + type: "chart"; + } + | { + filename: string; + type: "img"; + } + | { + table: string[][]; + type: "tabular"; + } + | { + content: string; + label: string; + type: "text"; + } + | { + text: string; + tokens: Array<{ + end: number; + start: number; + type: string; + }>; + type: "text-with-tokens"; + }; + +export interface TaskDemo { + inputs: TaskDemoEntry[]; + outputs: TaskDemoEntry[]; +} + +export interface TaskData { + datasets: ExampleRepo[]; + demo: TaskDemo; + id: PipelineType; + isPlaceholder?: boolean; + label: string; + libraries: ModelLibraryKey[]; + metrics: ExampleRepo[]; + models: ExampleRepo[]; + spaces: ExampleRepo[]; + summary: string; + widgetModels: string[]; + youtubeId?: string; +} + +export type TaskDataCustom = Omit; diff --git a/packages/tasks/src/audio-classification/about.md b/packages/tasks/src/audio-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..9b1d7c6e9d8900375db0ba0d638cad0bb171676d --- /dev/null +++ b/packages/tasks/src/audio-classification/about.md @@ -0,0 +1,85 @@ +## Use Cases + +### Command Recognition + +Command recognition or keyword spotting classifies utterances into a predefined set of commands. This is often done on-device for fast response time. + +As an example, using the Google Speech Commands dataset, given an input, a model can classify which of the following commands the user is typing: + +``` +'yes', 'no', 'up', 'down', 'left', 'right', 'on', 'off', 'stop', 'go', 'unknown', 'silence' +``` + +Speechbrain models can easily perform this task with just a couple of lines of code! + +```python +from speechbrain.pretrained import EncoderClassifier +model = EncoderClassifier.from_hparams( + "speechbrain/google_speech_command_xvector" +) +model.classify_file("file.wav") +``` + +### Language Identification + +Datasets such as VoxLingua107 allow anyone to train language identification models for up to 107 languages! This can be extremely useful as a preprocessing step for other systems. Here's an example [model](https://huggingface.co/TalTechNLP/voxlingua107-epaca-tdnn)trained on VoxLingua107. + +### Emotion recognition + +Emotion recognition is self explanatory. In addition to trying the widgets, you can use the Inference API to perform audio classification. Here is a simple example that uses a [HuBERT](https://huggingface.co/superb/hubert-large-superb-er) model fine-tuned for this task. + +```python +import json +import requests + +headers = {"Authorization": f"Bearer {API_TOKEN}"} +API_URL = "https://api-inference.huggingface.co/models/superb/hubert-large-superb-er" + +def query(filename): + with open(filename, "rb") as f: + data = f.read() + response = requests.request("POST", API_URL, headers=headers, data=data) + return json.loads(response.content.decode("utf-8")) + +data = query("sample1.flac") +# [{'label': 'neu', 'score': 0.60}, +# {'label': 'hap', 'score': 0.20}, +# {'label': 'ang', 'score': 0.13}, +# {'label': 'sad', 'score': 0.07}] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer with audio classification models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.audioClassification({ + data: await (await fetch("sample.flac")).blob(), + model: "facebook/mms-lid-126", +}); +``` + +### Speaker Identification + +Speaker Identification is classifying the audio of the person speaking. Speakers are usually predefined. You can try out this task with [this model](https://huggingface.co/superb/wav2vec2-base-superb-sid). A useful dataset for this task is VoxCeleb1. + +## Solving audio classification for your own data + +We have some great news! You can do fine-tuning (transfer learning) to train a well-performing model without requiring as much data. Pretrained models such as Wav2Vec2 and HuBERT exist. [Facebook's Wav2Vec2 XLS-R model](https://ai.facebook.com/blog/wav2vec-20-learning-the-structure-of-speech-from-raw-audio/) is a large multilingual model trained on 128 languages and with 436K hours of speech. + +## Useful Resources + +Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful! + +### Notebooks + +- [PyTorch](https://colab.research.google.com/github/huggingface/notebooks/blob/master/examples/audio_classification.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/audio-classification) + +### Documentation + +- [Audio classification task guide](https://huggingface.co/docs/transformers/tasks/audio_classification) diff --git a/packages/tasks/src/audio-classification/data.ts b/packages/tasks/src/audio-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..92e879c5cbe5e83011dd665b803433901ebe2096 --- /dev/null +++ b/packages/tasks/src/audio-classification/data.ts @@ -0,0 +1,77 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A benchmark of 10 different audio tasks.", + id: "superb", + }, + ], + demo: { + inputs: [ + { + filename: "audio.wav", + type: "audio", + }, + ], + outputs: [ + { + data: [ + { + label: "Up", + score: 0.2, + }, + { + label: "Down", + score: 0.8, + }, + ], + type: "chart", + }, + ], + }, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: "", + id: "recall", + }, + { + description: "", + id: "precision", + }, + { + description: "", + id: "f1", + }, + ], + models: [ + { + description: "An easy-to-use model for Command Recognition.", + id: "speechbrain/google_speech_command_xvector", + }, + { + description: "An Emotion Recognition model.", + id: "ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition", + }, + { + description: "A language identification model.", + id: "facebook/mms-lid-126", + }, + ], + spaces: [ + { + description: "An application that can predict the language spoken in a given audio.", + id: "akhaliq/Speechbrain-audio-classification", + }, + ], + summary: + "Audio classification is the task of assigning a label or class to a given audio. It can be used for recognizing which command a user is giving or the emotion of a statement, as well as identifying a speaker.", + widgetModels: ["facebook/mms-lid-126"], + youtubeId: "KWwzcmG98Ds", +}; + +export default taskData; diff --git a/packages/tasks/src/audio-to-audio/about.md b/packages/tasks/src/audio-to-audio/about.md new file mode 100644 index 0000000000000000000000000000000000000000..e56275277d211906c0bea7891f7bdb5fa0aeae7f --- /dev/null +++ b/packages/tasks/src/audio-to-audio/about.md @@ -0,0 +1,56 @@ +## Use Cases + +### Speech Enhancement (Noise removal) + +Speech Enhancement is a bit self explanatory. It improves (or enhances) the quality of an audio by removing noise. There are multiple libraries to solve this task, such as Speechbrain, Asteroid and ESPNet. Here is a simple example using Speechbrain + +```python +from speechbrain.pretrained import SpectralMaskEnhancement +model = SpectralMaskEnhancement.from_hparams( + "speechbrain/mtl-mimic-voicebank" +) +model.enhance_file("file.wav") +``` + +Alternatively, you can use the [Inference API](https://huggingface.co/inference-api) to solve this task + +```python +import json +import requests + +headers = {"Authorization": f"Bearer {API_TOKEN}"} +API_URL = "https://api-inference.huggingface.co/models/speechbrain/mtl-mimic-voicebank" + +def query(filename): + with open(filename, "rb") as f: + data = f.read() + response = requests.request("POST", API_URL, headers=headers, data=data) + return json.loads(response.content.decode("utf-8")) + +data = query("sample1.flac") +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer with audio-to-audio models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.audioToAudio({ + data: await (await fetch("sample.flac")).blob(), + model: "speechbrain/sepformer-wham", +}); +``` + +### Audio Source Separation + +Audio Source Separation allows you to isolate different sounds from individual sources. For example, if you have an audio file with multiple people speaking, you can get an audio file for each of them. You can then use an Automatic Speech Recognition system to extract the text from each of these sources as an initial step for your system! + +Audio-to-Audio can also be used to remove noise from audio files: you get one audio for the person speaking and another audio for the noise. This can also be useful when you have multi-person audio with some noise: yyou can get one audio for each person and then one audio for the noise. + +## Training a model for your own data + +If you want to learn how to train models for the Audio-to-Audio task, we recommend the following tutorials: + +- [Speech Enhancement](https://speechbrain.github.io/tutorial_enhancement.html) +- [Source Separation](https://speechbrain.github.io/tutorial_separation.html) diff --git a/packages/tasks/src/audio-to-audio/data.ts b/packages/tasks/src/audio-to-audio/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..56f03188e3e9bbe93d3ebd72b54b06fe1756f8cb --- /dev/null +++ b/packages/tasks/src/audio-to-audio/data.ts @@ -0,0 +1,66 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "512-element X-vector embeddings of speakers from CMU ARCTIC dataset.", + id: "Matthijs/cmu-arctic-xvectors", + }, + ], + demo: { + inputs: [ + { + filename: "input.wav", + type: "audio", + }, + ], + outputs: [ + { + filename: "label-0.wav", + type: "audio", + }, + { + filename: "label-1.wav", + type: "audio", + }, + ], + }, + metrics: [ + { + description: + "The Signal-to-Noise ratio is the relationship between the target signal level and the background noise level. It is calculated as the logarithm of the target signal divided by the background noise, in decibels.", + id: "snri", + }, + { + description: + "The Signal-to-Distortion ratio is the relationship between the target signal and the sum of noise, interference, and artifact errors", + id: "sdri", + }, + ], + models: [ + { + description: "A solid model of audio source separation.", + id: "speechbrain/sepformer-wham", + }, + { + description: "A speech enhancement model.", + id: "speechbrain/metricgan-plus-voicebank", + }, + ], + spaces: [ + { + description: "An application for speech separation.", + id: "younver/speechbrain-speech-separation", + }, + { + description: "An application for audio style transfer.", + id: "nakas/audio-diffusion_style_transfer", + }, + ], + summary: + "Audio-to-Audio is a family of tasks in which the input is an audio and the output is one or multiple generated audios. Some example tasks are speech enhancement and source separation.", + widgetModels: ["speechbrain/sepformer-wham"], + youtubeId: "iohj7nCCYoM", +}; + +export default taskData; diff --git a/packages/tasks/src/automatic-speech-recognition/about.md b/packages/tasks/src/automatic-speech-recognition/about.md new file mode 100644 index 0000000000000000000000000000000000000000..3871cba1c377a25e0d1041c24966748a50d2f5ed --- /dev/null +++ b/packages/tasks/src/automatic-speech-recognition/about.md @@ -0,0 +1,87 @@ +## Use Cases + +### Virtual Speech Assistants + +Many edge devices have an embedded virtual assistant to interact with the end users better. These assistances rely on ASR models to recognize different voice commands to perform various tasks. For instance, you can ask your phone for dialing a phone number, ask a general question, or schedule a meeting. + +### Caption Generation + +A caption generation model takes audio as input from sources to generate automatic captions through transcription, for live-streamed or recorded videos. This can help with content accessibility. For example, an audience watching a video that includes a non-native language, can rely on captions to interpret the content. It can also help with information retention at online-classes environments improving knowledge assimilation while reading and taking notes faster. + +## Task Variants + +### Multilingual ASR + +Multilingual ASR models can convert audio inputs with multiple languages into transcripts. Some multilingual ASR models include [language identification](https://huggingface.co/tasks/audio-classification) blocks to improve the performance. + +The use of Multilingual ASR has become popular, the idea of maintaining just a single model for all language can simplify the production pipeline. Take a look at [Whisper](https://huggingface.co/openai/whisper-large-v2) to get an idea on how 100+ languages can be processed by a single model. + +## Inference + +The Hub contains over [~9,000 ASR models](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&sort=downloads) that you can use right away by trying out the widgets directly in the browser or calling the models as a service using the Inference API. Here is a simple code snippet to do exactly this: + +```python +import json +import requests + +headers = {"Authorization": f"Bearer {API_TOKEN}"} +API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v2" + +def query(filename): + with open(filename, "rb") as f: + data = f.read() + response = requests.request("POST", API_URL, headers=headers, data=data) + return json.loads(response.content.decode("utf-8")) + +data = query("sample1.flac") +``` + +You can also use libraries such as [transformers](https://huggingface.co/models?library=transformers&pipeline_tag=automatic-speech-recognition&sort=downloads), [speechbrain](https://huggingface.co/models?library=speechbrain&pipeline_tag=automatic-speech-recognition&sort=downloads), [NeMo](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition&library=nemo&sort=downloads) and [espnet](https://huggingface.co/models?library=espnet&pipeline_tag=automatic-speech-recognition&sort=downloads) if you want one-click managed Inference without any hassle. + +```python +from transformers import pipeline + +with open("sample.flac", "rb") as f: + data = f.read() + +pipe = pipeline("automatic-speech-recognition", "openai/whisper-large-v2") +pipe("sample.flac") +# {'text': "GOING ALONG SLUSHY COUNTRY ROADS AND SPEAKING TO DAMP AUDIENCES IN DRAUGHTY SCHOOL ROOMS DAY AFTER DAY FOR A FORTNIGHT HE'LL HAVE TO PUT IN AN APPEARANCE AT SOME PLACE OF WORSHIP ON SUNDAY MORNING AND HE CAN COME TO US IMMEDIATELY AFTERWARDS"} +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to transcribe text with javascript using models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.automaticSpeechRecognition({ + data: await (await fetch("sample.flac")).blob(), + model: "openai/whisper-large-v2", +}); +``` + +## Solving ASR for your own data + +We have some great news! You can fine-tune (transfer learning) a foundational speech model on a specific language without tonnes of data. Pretrained models such as Whisper, Wav2Vec2-MMS and HuBERT exist. [OpenAI's Whisper model](https://huggingface.co/openai/whisper-large-v2) is a large multilingual model trained on 100+ languages and with 680K hours of speech. + +The following detailed [blog post](https://huggingface.co/blog/fine-tune-whisper) shows how to fine-tune a pre-trained Whisper checkpoint on labeled data for ASR. With the right data and strategy you can fine-tune a high-performant model on a free Google Colab instance too. We suggest to read the blog post for more info! + +## Hugging Face Whisper Event + +On December 2022, over 450 participants collaborated, fine-tuned and shared 600+ ASR Whisper models in 100+ different languages. You can compare these models on the event's speech recognition [leaderboard](https://huggingface.co/spaces/whisper-event/leaderboard?dataset=mozilla-foundation%2Fcommon_voice_11_0&config=ar&split=test). + +These events help democratize ASR for all languages, including low-resource languages. In addition to the trained models, the [event](https://github.com/huggingface/community-events/tree/main/whisper-fine-tuning-event) helps to build practical collaborative knowledge. + +## Useful Resources + +- [Fine-tuning MetaAI's MMS Adapter Models for Multi-Lingual ASR](https://huggingface.co/blog/mms_adapters) +- [Making automatic speech recognition work on large files with Wav2Vec2 in 🤗 Transformers](https://huggingface.co/blog/asr-chunking) +- [Boosting Wav2Vec2 with n-grams in 🤗 Transformers](https://huggingface.co/blog/wav2vec2-with-ngram) +- [ML for Audio Study Group - Intro to Audio and ASR Deep Dive](https://www.youtube.com/watch?v=D-MH6YjuIlE) +- [Massively Multilingual ASR: 50 Languages, 1 Model, 1 Billion Parameters](https://arxiv.org/pdf/2007.03001.pdf) +- An ASR toolkit made by [NVIDIA: NeMo](https://github.com/NVIDIA/NeMo) with code and pretrained models useful for new ASR models. Watch the [introductory video](https://www.youtube.com/embed/wBgpMf_KQVw) for an overview. +- [An introduction to SpeechT5, a multi-purpose speech recognition and synthesis model](https://huggingface.co/blog/speecht5) +- [A guide on Fine-tuning Whisper For Multilingual ASR with 🤗Transformers](https://huggingface.co/blog/fine-tune-whisper) +- [Automatic speech recognition task guide](https://huggingface.co/docs/transformers/tasks/asr) +- [Speech Synthesis, Recognition, and More With SpeechT5](https://huggingface.co/blog/speecht5) diff --git a/packages/tasks/src/automatic-speech-recognition/data.ts b/packages/tasks/src/automatic-speech-recognition/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..05d13e14cfa306df4000ebed85aee46660b886f1 --- /dev/null +++ b/packages/tasks/src/automatic-speech-recognition/data.ts @@ -0,0 +1,78 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "18,000 hours of multilingual audio-text dataset in 108 languages.", + id: "mozilla-foundation/common_voice_13_0", + }, + { + description: "An English dataset with 1,000 hours of data.", + id: "librispeech_asr", + }, + { + description: "High quality, multi-speaker audio data and their transcriptions in various languages.", + id: "openslr", + }, + ], + demo: { + inputs: [ + { + filename: "input.flac", + type: "audio", + }, + ], + outputs: [ + { + /// GOING ALONG SLUSHY COUNTRY ROADS AND SPEAKING TO DAMP AUDIENCES I + label: "Transcript", + content: "Going along slushy country roads and speaking to damp audiences in...", + type: "text", + }, + ], + }, + metrics: [ + { + description: "", + id: "wer", + }, + { + description: "", + id: "cer", + }, + ], + models: [ + { + description: "A powerful ASR model by OpenAI.", + id: "openai/whisper-large-v2", + }, + { + description: "A good generic ASR model by MetaAI.", + id: "facebook/wav2vec2-base-960h", + }, + { + description: "An end-to-end model that performs ASR and Speech Translation by MetaAI.", + id: "facebook/s2t-small-mustc-en-fr-st", + }, + ], + spaces: [ + { + description: "A powerful general-purpose speech recognition application.", + id: "openai/whisper", + }, + { + description: "Fastest speech recognition application.", + id: "sanchit-gandhi/whisper-jax", + }, + { + description: "An application that transcribes speeches in YouTube videos.", + id: "jeffistyping/Youtube-Whisperer", + }, + ], + summary: + "Automatic Speech Recognition (ASR), also known as Speech to Text (STT), is the task of transcribing a given audio to text. It has many applications, such as voice user interfaces.", + widgetModels: ["openai/whisper-large-v2"], + youtubeId: "TksaY_FDgnk", +}; + +export default taskData; diff --git a/packages/tasks/src/const.ts b/packages/tasks/src/const.ts new file mode 100644 index 0000000000000000000000000000000000000000..34fb9b24a4b3d92fef965a3490de09693f8bf584 --- /dev/null +++ b/packages/tasks/src/const.ts @@ -0,0 +1,59 @@ +import type { ModelLibraryKey } from "./modelLibraries"; +import type { PipelineType } from "./pipelines"; + +/** + * Model libraries compatible with each ML task + */ +export const TASKS_MODEL_LIBRARIES: Record = { + "audio-classification": ["speechbrain", "transformers"], + "audio-to-audio": ["asteroid", "speechbrain"], + "automatic-speech-recognition": ["espnet", "nemo", "speechbrain", "transformers", "transformers.js"], + conversational: ["transformers"], + "depth-estimation": ["transformers"], + "document-question-answering": ["transformers"], + "feature-extraction": ["sentence-transformers", "transformers", "transformers.js"], + "fill-mask": ["transformers", "transformers.js"], + "graph-ml": ["transformers"], + "image-classification": ["keras", "timm", "transformers", "transformers.js"], + "image-segmentation": ["transformers", "transformers.js"], + "image-to-image": [], + "image-to-text": ["transformers.js"], + "video-classification": [], + "multiple-choice": ["transformers"], + "object-detection": ["transformers", "transformers.js"], + other: [], + "question-answering": ["adapter-transformers", "allennlp", "transformers", "transformers.js"], + robotics: [], + "reinforcement-learning": ["transformers", "stable-baselines3", "ml-agents", "sample-factory"], + "sentence-similarity": ["sentence-transformers", "spacy", "transformers.js"], + summarization: ["transformers", "transformers.js"], + "table-question-answering": ["transformers"], + "table-to-text": ["transformers"], + "tabular-classification": ["sklearn"], + "tabular-regression": ["sklearn"], + "tabular-to-text": ["transformers"], + "text-classification": ["adapter-transformers", "spacy", "transformers", "transformers.js"], + "text-generation": ["transformers", "transformers.js"], + "text-retrieval": [], + "text-to-image": [], + "text-to-speech": ["espnet", "tensorflowtts", "transformers"], + "text-to-audio": ["transformers"], + "text-to-video": [], + "text2text-generation": ["transformers", "transformers.js"], + "time-series-forecasting": [], + "token-classification": [ + "adapter-transformers", + "flair", + "spacy", + "span-marker", + "stanza", + "transformers", + "transformers.js", + ], + translation: ["transformers", "transformers.js"], + "unconditional-image-generation": [], + "visual-question-answering": [], + "voice-activity-detection": [], + "zero-shot-classification": ["transformers", "transformers.js"], + "zero-shot-image-classification": ["transformers.js"], +}; diff --git a/packages/tasks/src/conversational/about.md b/packages/tasks/src/conversational/about.md new file mode 100644 index 0000000000000000000000000000000000000000..d2141ba20fbaa7c1093e3a2f03208d62e36b0ac6 --- /dev/null +++ b/packages/tasks/src/conversational/about.md @@ -0,0 +1,50 @@ +## Use Cases + +### Chatbot 💬 + +Chatbots are used to have conversations instead of providing direct contact with a live human. They are used to provide customer service, sales, and can even be used to play games (see [ELIZA](https://en.wikipedia.org/wiki/ELIZA) from 1966 for one of the earliest examples). + +## Voice Assistants 🎙️ + +Conversational response models are used as part of voice assistants to provide appropriate responses to voice based queries. + +## Inference + +You can infer with Conversational models with the 🤗 Transformers library using the `conversational` pipeline. This pipeline takes a conversation prompt or a list of conversations and generates responses for each prompt. The models that this pipeline can use are models that have been fine-tuned on a multi-turn conversational task (see https://huggingface.co/models?filter=conversational for a list of updated Conversational models). + +```python +from transformers import pipeline, Conversation +converse = pipeline("conversational") + +conversation_1 = Conversation("Going to the movies tonight - any suggestions?") +conversation_2 = Conversation("What's the last book you have read?") +converse([conversation_1, conversation_2]) + +## Output: +## Conversation 1 +## user >> Going to the movies tonight - any suggestions? +## bot >> The Big Lebowski , +## Conversation 2 +## user >> What's the last book you have read? +## bot >> The Last Question +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer with conversational models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.conversational({ + model: "facebook/blenderbot-400M-distill", + inputs: "Going to the movies tonight - any suggestions?", +}); +``` + +## Useful Resources + +- Learn how ChatGPT and InstructGPT work in this blog: [Illustrating Reinforcement Learning from Human Feedback (RLHF)](https://huggingface.co/blog/rlhf) +- [Reinforcement Learning from Human Feedback From Zero to ChatGPT](https://www.youtube.com/watch?v=EAd4oQtEJOM) +- [A guide on Dialog Agents](https://huggingface.co/blog/dialog-agents) + +This page was made possible thanks to the efforts of [Viraat Aryabumi](https://huggingface.co/viraat). diff --git a/packages/tasks/src/conversational/data.ts b/packages/tasks/src/conversational/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..85c4057612b31883e21b208bedf235199055e721 --- /dev/null +++ b/packages/tasks/src/conversational/data.ts @@ -0,0 +1,66 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: + "A dataset of 7k conversations explicitly designed to exhibit multiple conversation modes: displaying personality, having empathy, and demonstrating knowledge.", + id: "blended_skill_talk", + }, + { + description: + "ConvAI is a dataset of human-to-bot conversations labeled for quality. This data can be used to train a metric for evaluating dialogue systems", + id: "conv_ai_2", + }, + { + description: "EmpatheticDialogues, is a dataset of 25k conversations grounded in emotional situations", + id: "empathetic_dialogues", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "Hey my name is Julien! How are you?", + type: "text", + }, + ], + outputs: [ + { + label: "Answer", + content: "Hi Julien! My name is Julia! I am well.", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "BLEU score is calculated by counting the number of shared single or subsequent tokens between the generated sequence and the reference. Subsequent n tokens are called “n-grams”. Unigram refers to a single token while bi-gram refers to token pairs and n-grams refer to n subsequent tokens. The score ranges from 0 to 1, where 1 means the translation perfectly matched and 0 did not match at all", + id: "bleu", + }, + ], + models: [ + { + description: "A faster and smaller model than the famous BERT model.", + id: "facebook/blenderbot-400M-distill", + }, + { + description: + "DialoGPT is a large-scale pretrained dialogue response generation model for multiturn conversations.", + id: "microsoft/DialoGPT-large", + }, + ], + spaces: [ + { + description: "A chatbot based on Blender model.", + id: "EXFINITE/BlenderBot-UI", + }, + ], + summary: + "Conversational response modelling is the task of generating conversational text that is relevant, coherent and knowledgable given a prompt. These models have applications in chatbots, and as a part of voice assistants", + widgetModels: ["facebook/blenderbot-400M-distill"], + youtubeId: "", +}; + +export default taskData; diff --git a/packages/tasks/src/depth-estimation/about.md b/packages/tasks/src/depth-estimation/about.md new file mode 100644 index 0000000000000000000000000000000000000000..b83d60e24f1441129412ed1d4ebd562fec560453 --- /dev/null +++ b/packages/tasks/src/depth-estimation/about.md @@ -0,0 +1,36 @@ +## Use Cases +Depth estimation models can be used to estimate the depth of different objects present in an image. + +### Estimation of Volumetric Information +Depth estimation models are widely used to study volumetric formation of objects present inside an image. This is an important use case in the domain of computer graphics. + +### 3D Representation + +Depth estimation models can also be used to develop a 3D representation from a 2D image. + +## Inference + +With the `transformers` library, you can use the `depth-estimation` pipeline to infer with image classification models. You can initialize the pipeline with a model id from the Hub. If you do not provide a model id it will initialize with [Intel/dpt-large](https://huggingface.co/Intel/dpt-large) by default. When calling the pipeline you just need to specify a path, http link or an image loaded in PIL. Additionally, you can find a comprehensive list of various depth estimation models at [this link](https://huggingface.co/models?pipeline_tag=depth-estimation). + +```python +from transformers import pipeline + +estimator = pipeline(task="depth-estimation", model="Intel/dpt-large") +result = estimator(images="http://images.cocodataset.org/val2017/000000039769.jpg") +result + +# {'predicted_depth': tensor([[[ 6.3199, 6.3629, 6.4148, ..., 10.4104, 10.5109, 10.3847], +# [ 6.3850, 6.3615, 6.4166, ..., 10.4540, 10.4384, 10.4554], +# [ 6.3519, 6.3176, 6.3575, ..., 10.4247, 10.4618, 10.4257], +# ..., +# [22.3772, 22.4624, 22.4227, ..., 22.5207, 22.5593, 22.5293], +# [22.5073, 22.5148, 22.5114, ..., 22.6604, 22.6344, 22.5871], +# [22.5176, 22.5275, 22.5218, ..., 22.6282, 22.6216, 22.6108]]]), +# 'depth': } + +# You can visualize the result just by calling `result["depth"]`. +``` + +## Useful Resources + +- [Monocular depth estimation task guide](https://huggingface.co/docs/transformers/tasks/monocular_depth_estimation) diff --git a/packages/tasks/src/depth-estimation/data.ts b/packages/tasks/src/depth-estimation/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..1a9b0d2a183028679b0f606e84fb07e16f40f8a6 --- /dev/null +++ b/packages/tasks/src/depth-estimation/data.ts @@ -0,0 +1,52 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "NYU Depth V2 Dataset: Video dataset containing both RGB and depth sensor data", + id: "sayakpaul/nyu_depth_v2", + }, + ], + demo: { + inputs: [ + { + filename: "depth-estimation-input.jpg", + type: "img", + }, + ], + outputs: [ + { + filename: "depth-estimation-output.png", + type: "img", + }, + ], + }, + metrics: [], + models: [ + { + // TO DO: write description + description: "Strong Depth Estimation model trained on 1.4 million images.", + id: "Intel/dpt-large", + }, + { + // TO DO: write description + description: "Strong Depth Estimation model trained on the KITTI dataset.", + id: "vinvino02/glpn-kitti", + }, + ], + spaces: [ + { + description: "An application that predicts the depth of an image and then reconstruct the 3D model as voxels.", + id: "radames/dpt-depth-estimation-3d-voxels", + }, + { + description: "An application that can estimate the depth in a given image.", + id: "keras-io/Monocular-Depth-Estimation", + }, + ], + summary: "Depth estimation is the task of predicting depth of the objects present in an image.", + widgetModels: [""], + youtubeId: "", +}; + +export default taskData; diff --git a/packages/tasks/src/document-question-answering/about.md b/packages/tasks/src/document-question-answering/about.md new file mode 100644 index 0000000000000000000000000000000000000000..528c29ec917ace00387344b09671e9a90fcc6e06 --- /dev/null +++ b/packages/tasks/src/document-question-answering/about.md @@ -0,0 +1,53 @@ +## Use Cases + +Document Question Answering models can be used to answer natural language questions about documents. Typically, document QA models consider textual, layout and potentially visual information. This is useful when the question requires some understanding of the visual aspects of the document. +Nevertheless, certain document QA models can work without document images. Hence the task is not limited to visually-rich documents and allows users to ask questions based on spreadsheets, text PDFs, etc! + +### Document Parsing + +One of the most popular use cases of document question answering models is the parsing of structured documents. For example, you can extract the name, address, and other information from a form. You can also use the model to extract information from a table, or even a resume. + +### Invoice Information Extraction + +Another very popular use case is invoice information extraction. For example, you can extract the invoice number, the invoice date, the total amount, the VAT number, and the invoice recipient. + +## Inference + +You can infer with Document QA models with the 🤗 Transformers library using the [`document-question-answering` pipeline](https://huggingface.co/docs/transformers/en/main_classes/pipelines#transformers.DocumentQuestionAnsweringPipeline). If no model checkpoint is given, the pipeline will be initialized with [`impira/layoutlm-document-qa`](https://huggingface.co/impira/layoutlm-document-qa). This pipeline takes question(s) and document(s) as input, and returns the answer. +👉 Note that the question answering task solved here is extractive: the model extracts the answer from a context (the document). + +```python +from transformers import pipeline +from PIL import Image + +pipe = pipeline("document-question-answering", model="naver-clova-ix/donut-base-finetuned-docvqa") + +question = "What is the purchase amount?" +image = Image.open("your-document.png") + +pipe(image=image, question=question) + +## [{'answer': '20,000$'}] +``` + +## Useful Resources + +Would you like to learn more about Document QA? Awesome! Here are some curated resources that you may find helpful! + +- [Document Visual Question Answering (DocVQA) challenge](https://rrc.cvc.uab.es/?ch=17) +- [DocVQA: A Dataset for Document Visual Question Answering](https://arxiv.org/abs/2007.00398) (Dataset paper) +- [ICDAR 2021 Competition on Document Visual Question Answering](https://lilianweng.github.io/lil-log/2020/10/29/open-domain-question-answering.html) (Conference paper) +- [HuggingFace's Document Question Answering pipeline](https://huggingface.co/docs/transformers/en/main_classes/pipelines#transformers.DocumentQuestionAnsweringPipeline) +- [Github repo: DocQuery - Document Query Engine Powered by Large Language Models](https://github.com/impira/docquery) + +### Notebooks + +- [Fine-tuning Donut on DocVQA dataset](https://github.com/NielsRogge/Transformers-Tutorials/tree/0ea77f29d01217587d7e32a848f3691d9c15d6ab/Donut/DocVQA) +- [Fine-tuning LayoutLMv2 on DocVQA dataset](https://github.com/NielsRogge/Transformers-Tutorials/tree/1b4bad710c41017d07a8f63b46a12523bfd2e835/LayoutLMv2/DocVQA) +- [Accelerating Document AI](https://huggingface.co/blog/document-ai) + +### Documentation + +- [Document question answering task guide](https://huggingface.co/docs/transformers/tasks/document_question_answering) + +The contents of this page are contributed by [Eliott Zemour](https://huggingface.co/eliolio) and reviewed by [Kwadwo Agyapon-Ntra](https://huggingface.co/KayO) and [Ankur Goyal](https://huggingface.co/ankrgyl). diff --git a/packages/tasks/src/document-question-answering/data.ts b/packages/tasks/src/document-question-answering/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..275173fa873a8eea24e6ddb04534a1a6758d16d2 --- /dev/null +++ b/packages/tasks/src/document-question-answering/data.ts @@ -0,0 +1,70 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + // TODO write proper description + description: + "Dataset from the 2020 DocVQA challenge. The documents are taken from the UCSF Industry Documents Library.", + id: "eliolio/docvqa", + }, + ], + demo: { + inputs: [ + { + label: "Question", + content: "What is the idea behind the consumer relations efficiency team?", + type: "text", + }, + { + filename: "document-question-answering-input.png", + type: "img", + }, + ], + outputs: [ + { + label: "Answer", + content: "Balance cost efficiency with quality customer service", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "The evaluation metric for the DocVQA challenge is the Average Normalized Levenshtein Similarity (ANLS). This metric is flexible to character regognition errors and compares the predicted answer with the ground truth answer.", + id: "anls", + }, + { + description: + "Exact Match is a metric based on the strict character match of the predicted answer and the right answer. For answers predicted correctly, the Exact Match will be 1. Even if only one character is different, Exact Match will be 0", + id: "exact-match", + }, + ], + models: [ + { + description: "A LayoutLM model for the document QA task, fine-tuned on DocVQA and SQuAD2.0.", + id: "impira/layoutlm-document-qa", + }, + { + description: "A special model for OCR-free Document QA task. Donut model fine-tuned on DocVQA.", + id: "naver-clova-ix/donut-base-finetuned-docvqa", + }, + ], + spaces: [ + { + description: "A robust document question answering application.", + id: "impira/docquery", + }, + { + description: "An application that can answer questions from invoices.", + id: "impira/invoices", + }, + ], + summary: + "Document Question Answering (also known as Document Visual Question Answering) is the task of answering questions on document images. Document question answering models take a (document, question) pair as input and return an answer in natural language. Models usually rely on multi-modal features, combining text, position of words (bounding-boxes) and image.", + widgetModels: ["impira/layoutlm-document-qa"], + youtubeId: "", +}; + +export default taskData; diff --git a/packages/tasks/src/feature-extraction/about.md b/packages/tasks/src/feature-extraction/about.md new file mode 100644 index 0000000000000000000000000000000000000000..60c7c7ed33c16ce43962edf2d3af6f0f963f6508 --- /dev/null +++ b/packages/tasks/src/feature-extraction/about.md @@ -0,0 +1,34 @@ +## About the Task + +Feature extraction is the task of building features intended to be informative from a given dataset, +facilitating the subsequent learning and generalization steps in various domains of machine learning. + +## Use Cases + +Feature extraction can be used to do transfer learning in natural language processing, computer vision and audio models. + +## Inference + +#### Feature Extraction + +```python +from transformers import pipeline +checkpoint = "facebook/bart-base" +feature_extractor = pipeline("feature-extraction",framework="pt",model=checkpoint) +text = "Transformers is an awesome library!" + +#Reducing along the first dimension to get a 768 dimensional array +feature_extractor(text,return_tensors = "pt")[0].numpy().mean(axis=0) + +'''tensor([[[ 2.5834, 2.7571, 0.9024, ..., 1.5036, -0.0435, -0.8603], + [-1.2850, -1.0094, -2.0826, ..., 1.5993, -0.9017, 0.6426], + [ 0.9082, 0.3896, -0.6843, ..., 0.7061, 0.6517, 1.0550], + ..., + [ 0.6919, -1.1946, 0.2438, ..., 1.3646, -1.8661, -0.1642], + [-0.1701, -2.0019, -0.4223, ..., 0.3680, -1.9704, -0.0068], + [ 0.2520, -0.6869, -1.0582, ..., 0.5198, -2.2106, 0.4547]]])''' +``` + +## Useful resources + +- [Documentation for feature extractor of 🤗Transformers](https://huggingface.co/docs/transformers/main_classes/feature_extractor) diff --git a/packages/tasks/src/feature-extraction/data.ts b/packages/tasks/src/feature-extraction/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..fe5f1785b92b078fe03f7c92dba5644120eeafe5 --- /dev/null +++ b/packages/tasks/src/feature-extraction/data.ts @@ -0,0 +1,54 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: + "Wikipedia dataset containing cleaned articles of all languages. Can be used to train `feature-extraction` models.", + id: "wikipedia", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "India, officially the Republic of India, is a country in South Asia.", + type: "text", + }, + ], + outputs: [ + { + table: [ + ["Dimension 1", "Dimension 2", "Dimension 3"], + ["2.583383083343506", "2.757075071334839", "0.9023529887199402"], + ["8.29393482208252", "1.1071064472198486", "2.03399395942688"], + ["-0.7754912972450256", "-1.647324562072754", "-0.6113331913948059"], + ["0.07087723910808563", "1.5942802429199219", "1.4610432386398315"], + ], + type: "tabular", + }, + ], + }, + metrics: [ + { + description: "", + id: "", + }, + ], + models: [ + { + description: "A powerful feature extraction model for natural language processing tasks.", + id: "facebook/bart-base", + }, + { + description: "A strong feature extraction model for coding tasks.", + id: "microsoft/codebert-base", + }, + ], + spaces: [], + summary: + "Feature extraction refers to the process of transforming raw data into numerical features that can be processed while preserving the information in the original dataset.", + widgetModels: ["facebook/bart-base"], +}; + +export default taskData; diff --git a/packages/tasks/src/fill-mask/about.md b/packages/tasks/src/fill-mask/about.md new file mode 100644 index 0000000000000000000000000000000000000000..4fabd3cf6d06d8ba9e676eb1f637c5f688b456fb --- /dev/null +++ b/packages/tasks/src/fill-mask/about.md @@ -0,0 +1,51 @@ +## Use Cases + +### Domain Adaptation 👩‍⚕️ + +Masked language models do not require labelled data! They are trained by masking a couple of words in sentences and the model is expected to guess the masked word. This makes it very practical! + +For example, masked language modeling is used to train large models for domain-specific problems. If you have to work on a domain-specific task, such as retrieving information from medical research papers, you can train a masked language model using those papers. 📄 + +The resulting model has a statistical understanding of the language used in medical research papers, and can be further trained in a process called fine-tuning to solve different tasks, such as [Text Classification](/tasks/text-classification) or [Question Answering](/tasks/question-answering) to build a medical research papers information extraction system. 👩‍⚕️ Pre-training on domain-specific data tends to yield better results (see [this paper](https://arxiv.org/abs/2007.15779) for an example). + +If you don't have the data to train a masked language model, you can also use an existing [domain-specific masked language model](https://huggingface.co/microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext) from the Hub and fine-tune it with your smaller task dataset. That's the magic of Open Source and sharing your work! 🎉 + +## Inference with Fill-Mask Pipeline + +You can use the 🤗 Transformers library `fill-mask` pipeline to do inference with masked language models. If a model name is not provided, the pipeline will be initialized with [distilroberta-base](/distilroberta-base). You can provide masked text and it will return a list of possible mask values ​​ranked according to the score. + +```python +from transformers import pipeline + +classifier = pipeline("fill-mask") +classifier("Paris is the of France.") + +# [{'score': 0.7, 'sequence': 'Paris is the capital of France.'}, +# {'score': 0.2, 'sequence': 'Paris is the birthplace of France.'}, +# {'score': 0.1, 'sequence': 'Paris is the heart of France.'}] +``` + +## Useful Resources + +Would you like to learn more about the topic? Awesome! Here you can find some curated resources that can be helpful to you! + +- [Course Chapter on Fine-tuning a Masked Language Model](https://huggingface.co/course/chapter7/3?fw=pt) +- [Workshop on Pretraining Language Models and CodeParrot](https://www.youtube.com/watch?v=ExUR7w6xe94) +- [BERT 101: State Of The Art NLP Model Explained](https://huggingface.co/blog/bert-101) +- [Nyströmformer: Approximating self-attention in linear time and memory via the Nyström method](https://huggingface.co/blog/nystromformer) + +### Notebooks + +- [Pre-training an MLM for JAX/Flax](https://github.com/huggingface/notebooks/blob/master/examples/masked_language_modeling_flax.ipynb) +- [Masked language modeling in TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling-tf.ipynb) +- [Masked language modeling in PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling) +- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/language-modeling) +- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling) + +### Documentation + +- [Masked language modeling task guide](https://huggingface.co/docs/transformers/tasks/masked_language_modeling) diff --git a/packages/tasks/src/fill-mask/data.ts b/packages/tasks/src/fill-mask/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..4e8204b159ff19257fe19877947236a7e29442bb --- /dev/null +++ b/packages/tasks/src/fill-mask/data.ts @@ -0,0 +1,79 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A common dataset that is used to train models for many languages.", + id: "wikipedia", + }, + { + description: "A large English dataset with text crawled from the web.", + id: "c4", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "The barked at me", + type: "text", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "wolf", + score: 0.487, + }, + { + label: "dog", + score: 0.061, + }, + { + label: "cat", + score: 0.058, + }, + { + label: "fox", + score: 0.047, + }, + { + label: "squirrel", + score: 0.025, + }, + ], + }, + ], + }, + metrics: [ + { + description: + "Cross Entropy is a metric that calculates the difference between two probability distributions. Each probability distribution is the distribution of predicted words", + id: "cross_entropy", + }, + { + description: + "Perplexity is the exponential of the cross-entropy loss. It evaluates the probabilities assigned to the next word by the model. Lower perplexity indicates better performance", + id: "perplexity", + }, + ], + models: [ + { + description: "A faster and smaller model than the famous BERT model.", + id: "distilbert-base-uncased", + }, + { + description: "A multilingual model trained on 100 languages.", + id: "xlm-roberta-base", + }, + ], + spaces: [], + summary: + "Masked language modeling is the task of masking some of the words in a sentence and predicting which words should replace those masks. These models are useful when we want to get a statistical understanding of the language in which the model is trained in.", + widgetModels: ["distilroberta-base"], + youtubeId: "mqElG5QJWUg", +}; + +export default taskData; diff --git a/packages/tasks/src/image-classification/about.md b/packages/tasks/src/image-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..593f3b1ddd5d61ff155ebddeb0ce339adcff4e85 --- /dev/null +++ b/packages/tasks/src/image-classification/about.md @@ -0,0 +1,50 @@ +## Use Cases + +Image classification models can be used when we are not interested in specific instances of objects with location information or their shape. + +### Keyword Classification + +Image classification models are used widely in stock photography to assign each image a keyword. + +### Image Search + +Models trained in image classification can improve user experience by organizing and categorizing photo galleries on the phone or in the cloud, on multiple keywords or tags. + +## Inference + +With the `transformers` library, you can use the `image-classification` pipeline to infer with image classification models. You can initialize the pipeline with a model id from the Hub. If you do not provide a model id it will initialize with [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) by default. When calling the pipeline you just need to specify a path, http link or an image loaded in PIL. You can also provide a `top_k` parameter which determines how many results it should return. + +```python +from transformers import pipeline +clf = pipeline("image-classification") +clf("path_to_a_cat_image") + +[{'label': 'tabby cat', 'score': 0.731}, +... +] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to classify images using models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.imageClassification({ + data: await (await fetch("https://picsum.photos/300/300")).blob(), + model: "microsoft/resnet-50", +}); +``` + +## Useful Resources + +- [Let's Play Pictionary with Machine Learning!](https://www.youtube.com/watch?v=LS9Y2wDVI0k) +- [Fine-Tune ViT for Image Classification with 🤗Transformers](https://huggingface.co/blog/fine-tune-vit) +- [Walkthrough of Computer Vision Ecosystem in Hugging Face - CV Study Group](https://www.youtube.com/watch?v=oL-xmufhZM8) +- [Computer Vision Study Group: Swin Transformer](https://www.youtube.com/watch?v=Ngikt-K1Ecc) +- [Computer Vision Study Group: Masked Autoencoders Paper Walkthrough](https://www.youtube.com/watch?v=Ngikt-K1Ecc) +- [Image classification task guide](https://huggingface.co/docs/transformers/tasks/image_classification) + +### Creating your own image classifier in just a few minutes + +With [HuggingPics](https://github.com/nateraw/huggingpics), you can fine-tune Vision Transformers for anything using images found on the web. This project downloads images of classes defined by you, trains a model, and pushes it to the Hub. You even get to try out the model directly with a working widget in the browser, ready to be shared with all your friends! diff --git a/packages/tasks/src/image-classification/data.ts b/packages/tasks/src/image-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..4dcbff4f17ba2f5811d7a1b3421916a5f3aa83aa --- /dev/null +++ b/packages/tasks/src/image-classification/data.ts @@ -0,0 +1,88 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + // TODO write proper description + description: "Benchmark dataset used for image classification with images that belong to 100 classes.", + id: "cifar100", + }, + { + // TODO write proper description + description: "Dataset consisting of images of garments.", + id: "fashion_mnist", + }, + ], + demo: { + inputs: [ + { + filename: "image-classification-input.jpeg", + type: "img", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Egyptian cat", + score: 0.514, + }, + { + label: "Tabby cat", + score: 0.193, + }, + { + label: "Tiger cat", + score: 0.068, + }, + ], + }, + ], + }, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: "", + id: "recall", + }, + { + description: "", + id: "precision", + }, + { + description: "", + id: "f1", + }, + ], + models: [ + { + description: "A strong image classification model.", + id: "google/vit-base-patch16-224", + }, + { + description: "A robust image classification model.", + id: "facebook/deit-base-distilled-patch16-224", + }, + { + description: "A strong image classification model.", + id: "facebook/convnext-large-224", + }, + ], + spaces: [ + { + // TO DO: write description + description: "An application that classifies what a given image is about.", + id: "nielsr/perceiver-image-classification", + }, + ], + summary: + "Image classification is the task of assigning a label or class to an entire image. Images are expected to have only one class for each image. Image classification models take an image as input and return a prediction about which class the image belongs to.", + widgetModels: ["google/vit-base-patch16-224"], + youtubeId: "tjAIM7BOYhw", +}; + +export default taskData; diff --git a/packages/tasks/src/image-segmentation/about.md b/packages/tasks/src/image-segmentation/about.md new file mode 100644 index 0000000000000000000000000000000000000000..3f26fb8caef4ac8668af9f1f4863c7deb4933e21 --- /dev/null +++ b/packages/tasks/src/image-segmentation/about.md @@ -0,0 +1,63 @@ +## Use Cases + +### Autonomous Driving + +Segmentation models are used to identify road patterns such as lanes and obstacles for safer driving. + +### Background Removal + +Image Segmentation models are used in cameras to erase the background of certain objects and apply filters to them. + +### Medical Imaging + +Image Segmentation models are used to distinguish organs or tissues, improving medical imaging workflows. Models are used to segment dental instances, analyze X-Ray scans or even segment cells for pathological diagnosis. This [dataset](https://github.com/v7labs/covid-19-xray-dataset) contains images of lungs of healthy patients and patients with COVID-19 segmented with masks. Another [segmentation dataset](https://ivdm3seg.weebly.com/data.html) contains segmented MRI data of the lower spine to analyze the effect of spaceflight simulation. + +## Task Variants + +### Semantic Segmentation + +Semantic Segmentation is the task of segmenting parts of an image that belong to the same class. Semantic Segmentation models make predictions for each pixel and return the probabilities of the classes for each pixel. These models are evaluated on Mean Intersection Over Union (Mean IoU). + +### Instance Segmentation + +Instance Segmentation is the variant of Image Segmentation where every distinct object is segmented, instead of one segment per class. + +### Panoptic Segmentation + +Panoptic Segmentation is the Image Segmentation task that segments the image both by instance and by class, assigning each pixel a different instance of the class. + +## Inference + +You can infer with Image Segmentation models using the `image-segmentation` pipeline. You need to install [timm](https://github.com/rwightman/pytorch-image-models) first. + +```python +!pip install timm +model = pipeline("image-segmentation") +model("cat.png") +#[{'label': 'cat', +# 'mask': mask_code, +# 'score': 0.999} +# ...] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer image segmentation models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.imageSegmentation({ + data: await (await fetch("https://picsum.photos/300/300")).blob(), + model: "facebook/detr-resnet-50-panoptic", +}); +``` + +## Useful Resources + +Would you like to learn more about image segmentation? Great! Here you can find some curated resources that you may find helpful! + +- [Fine-Tune a Semantic Segmentation Model with a Custom Dataset](https://huggingface.co/blog/fine-tune-segformer) +- [Walkthrough of Computer Vision Ecosystem in Hugging Face - CV Study Group](https://www.youtube.com/watch?v=oL-xmufhZM8) +- [A Guide on Universal Image Segmentation with Mask2Former and OneFormer](https://huggingface.co/blog/mask2former) +- [Zero-shot image segmentation with CLIPSeg](https://huggingface.co/blog/clipseg-zero-shot) +- [Semantic segmentation task guide](https://huggingface.co/docs/transformers/tasks/semantic_segmentation) diff --git a/packages/tasks/src/image-segmentation/data.ts b/packages/tasks/src/image-segmentation/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..c6bb835e79575d57b8658b6ae15bd54ec3a7a9b6 --- /dev/null +++ b/packages/tasks/src/image-segmentation/data.ts @@ -0,0 +1,99 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "Scene segmentation dataset.", + id: "scene_parse_150", + }, + ], + demo: { + inputs: [ + { + filename: "image-segmentation-input.jpeg", + type: "img", + }, + ], + outputs: [ + { + filename: "image-segmentation-output.png", + type: "img", + }, + ], + }, + metrics: [ + { + description: + "Average Precision (AP) is the Area Under the PR Curve (AUC-PR). It is calculated for each semantic class separately", + id: "Average Precision", + }, + { + description: "Mean Average Precision (mAP) is the overall average of the AP values", + id: "Mean Average Precision", + }, + { + description: + "Intersection over Union (IoU) is the overlap of segmentation masks. Mean IoU is the average of the IoU of all semantic classes", + id: "Mean Intersection over Union", + }, + { + description: "APα is the Average Precision at the IoU threshold of a α value, for example, AP50 and AP75", + id: "APα", + }, + ], + models: [ + { + // TO DO: write description + description: "Solid panoptic segmentation model trained on the COCO 2017 benchmark dataset.", + id: "facebook/detr-resnet-50-panoptic", + }, + { + description: "Semantic segmentation model trained on ADE20k benchmark dataset.", + id: "microsoft/beit-large-finetuned-ade-640-640", + }, + { + description: "Semantic segmentation model trained on ADE20k benchmark dataset with 512x512 resolution.", + id: "nvidia/segformer-b0-finetuned-ade-512-512", + }, + { + description: "Semantic segmentation model trained Cityscapes dataset.", + id: "facebook/mask2former-swin-large-cityscapes-semantic", + }, + { + description: "Panoptic segmentation model trained COCO (common objects) dataset.", + id: "facebook/mask2former-swin-large-coco-panoptic", + }, + ], + spaces: [ + { + description: "A semantic segmentation application that can predict unseen instances out of the box.", + id: "facebook/ov-seg", + }, + { + description: "One of the strongest segmentation applications.", + id: "jbrinkma/segment-anything", + }, + { + description: "A semantic segmentation application that predicts human silhouettes.", + id: "keras-io/Human-Part-Segmentation", + }, + { + description: "An instance segmentation application to predict neuronal cell types from microscopy images.", + id: "rashmi/sartorius-cell-instance-segmentation", + }, + { + description: "An application that segments videos.", + id: "ArtGAN/Segment-Anything-Video", + }, + { + description: "An panoptic segmentation application built for outdoor environments.", + id: "segments/panoptic-segment-anything", + }, + ], + summary: + "Image Segmentation divides an image into segments where each pixel in the image is mapped to an object. This task has multiple variants such as instance segmentation, panoptic segmentation and semantic segmentation.", + widgetModels: ["facebook/detr-resnet-50-panoptic"], + youtubeId: "dKE8SIt9C-w", +}; + +export default taskData; diff --git a/packages/tasks/src/image-to-image/about.md b/packages/tasks/src/image-to-image/about.md new file mode 100644 index 0000000000000000000000000000000000000000..d133bafcee0a36643ef43b3bb041d198bafd6934 --- /dev/null +++ b/packages/tasks/src/image-to-image/about.md @@ -0,0 +1,79 @@ +## Use Cases + +### Style transfer + +One of the most popular use cases of image to image is the style transfer. Style transfer models can convert a regular photography into a painting in the style of a famous painter. + +## Task Variants + +### Image inpainting + +Image inpainting is widely used during photography editing to remove unwanted objects, such as poles, wires or sensor +dust. + +### Image colorization + +Old, black and white images can be brought up to life using an image colorization model. + +### Super Resolution + +Super resolution models increase the resolution of an image, allowing for higher quality viewing and printing. + +## Inference + +You can use pipelines for image-to-image in 🧨diffusers library to easily use image-to-image models. See an example for `StableDiffusionImg2ImgPipeline` below. + +```python +from PIL import Image +from diffusers import StableDiffusionImg2ImgPipeline + +model_id_or_path = "runwayml/stable-diffusion-v1-5" +pipe = StableDiffusionImg2ImgPipeline.from_pretrained(model_id_or_path, torch_dtype=torch.float16) +pipe = pipe.to(cuda) + +init_image = Image.open("mountains_image.jpeg").convert("RGB").resize((768, 512)) +prompt = "A fantasy landscape, trending on artstation" + +images = pipe(prompt=prompt, image=init_image, strength=0.75, guidance_scale=7.5).images +images[0].save("fantasy_landscape.png") +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer image-to-image models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.imageToImage({ + data: await (await fetch("image")).blob(), + model: "timbrooks/instruct-pix2pix", + parameters: { + prompt: "Deblur this image", + }, +}); +``` + +## ControlNet + +Controlling outputs of diffusion models only with a text prompt is a challenging problem. ControlNet is a neural network type that provides an image based control to diffusion models. These controls can be edges or landmarks in an image. + +Many ControlNet models were trained in our community event, JAX Diffusers sprint. You can see the full list of the ControlNet models available [here](https://huggingface.co/spaces/jax-diffusers-event/leaderboard). + +## Most Used Model for the Task + +Pix2Pix is a popular model used for image to image translation tasks. It is based on a conditional-GAN (generative adversarial network) where instead of a noise vector a 2D image is given as input. More information about Pix2Pix can be retrieved from this [link](https://phillipi.github.io/pix2pix/) where the associated paper and the GitHub repository can be found. + +Below images show some of the examples shared in the paper that can be obtained using Pix2Pix. There are various cases this model can be applied on. It is capable of relatively simpler things, e.g. converting a grayscale image to its colored version. But more importantly, it can generate realistic pictures from rough sketches (can be seen in the purse example) or from painting-like images (can be seen in the street and facade examples below). + +![Examples](https://huggingface.co/datasets/huggingfacejs/tasks/resolve/main/image-to-image/pix2pix_examples.jpg) + +## Useful Resources + +- [Train your ControlNet with diffusers 🧨](https://huggingface.co/blog/train-your-controlnet) +- [Ultra fast ControlNet with 🧨 Diffusers](https://huggingface.co/blog/controlnet) + +## References + +[1] P. Isola, J. -Y. Zhu, T. Zhou and A. A. Efros, "Image-to-Image Translation with Conditional Adversarial Networks," 2017 IEEE Conference on Computer Vision and Pattern Recognition (CVPR), 2017, pp. 5967-5976, doi: 10.1109/CVPR.2017.632. + +This page was made possible thanks to the efforts of [Paul Gafton](https://github.com/Paul92) and [Osman Alenbey](https://huggingface.co/osman93). diff --git a/packages/tasks/src/image-to-image/data.ts b/packages/tasks/src/image-to-image/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..9688dc249e923dc55661e30ece51c39e7e04510f --- /dev/null +++ b/packages/tasks/src/image-to-image/data.ts @@ -0,0 +1,101 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "Synthetic dataset, for image relighting", + id: "VIDIT", + }, + { + description: "Multiple images of celebrities, used for facial expression translation", + id: "huggan/CelebA-faces", + }, + ], + demo: { + inputs: [ + { + filename: "image-to-image-input.jpeg", + type: "img", + }, + ], + outputs: [ + { + filename: "image-to-image-output.png", + type: "img", + }, + ], + }, + isPlaceholder: false, + metrics: [ + { + description: + "Peak Signal to Noise Ratio (PSNR) is an approximation of the human perception, considering the ratio of the absolute intensity with respect to the variations. Measured in dB, a high value indicates a high fidelity.", + id: "PSNR", + }, + { + description: + "Structural Similarity Index (SSIM) is a perceptual metric which compares the luminance, contrast and structure of two images. The values of SSIM range between -1 and 1, and higher values indicate closer resemblance to the original image.", + id: "SSIM", + }, + { + description: + "Inception Score (IS) is an analysis of the labels predicted by an image classification model when presented with a sample of the generated images.", + id: "IS", + }, + ], + models: [ + { + description: "A model that enhances images captured in low light conditions.", + id: "keras-io/low-light-image-enhancement", + }, + { + description: "A model that increases the resolution of an image.", + id: "keras-io/super-resolution", + }, + { + description: + "A model that creates a set of variations of the input image in the style of DALL-E using Stable Diffusion.", + id: "lambdalabs/sd-image-variations-diffusers", + }, + { + description: "A model that generates images based on segments in the input image and the text prompt.", + id: "mfidabel/controlnet-segment-anything", + }, + { + description: "A model that takes an image and an instruction to edit the image.", + id: "timbrooks/instruct-pix2pix", + }, + ], + spaces: [ + { + description: "Image enhancer application for low light.", + id: "keras-io/low-light-image-enhancement", + }, + { + description: "Style transfer application.", + id: "keras-io/neural-style-transfer", + }, + { + description: "An application that generates images based on segment control.", + id: "mfidabel/controlnet-segment-anything", + }, + { + description: "Image generation application that takes image control and text prompt.", + id: "hysts/ControlNet", + }, + { + description: "Colorize any image using this app.", + id: "ioclab/brightness-controlnet", + }, + { + description: "Edit images with instructions.", + id: "timbrooks/instruct-pix2pix", + }, + ], + summary: + "Image-to-image is the task of transforming a source image to match the characteristics of a target image or a target image domain. Any image manipulation and enhancement is possible with image to image models.", + widgetModels: ["lllyasviel/sd-controlnet-canny"], + youtubeId: "", +}; + +export default taskData; diff --git a/packages/tasks/src/image-to-text/about.md b/packages/tasks/src/image-to-text/about.md new file mode 100644 index 0000000000000000000000000000000000000000..a209ae22bc5073bc86f9da2a998b71b28c63ecf1 --- /dev/null +++ b/packages/tasks/src/image-to-text/about.md @@ -0,0 +1,65 @@ +## Use Cases + +### Image Captioning + +Image Captioning is the process of generating textual description of an image. +This can help the visually impaired people to understand what's happening in their surroundings. + +### Optical Character Recognition (OCR) + +OCR models convert the text present in an image, e.g. a scanned document, to text. + +## Pix2Struct + +Pix2Struct is a state-of-the-art model built and released by Google AI. The model itself has to be trained on a downstream task to be used. These tasks include, captioning UI components, images including text, visual questioning infographics, charts, scientific diagrams and more. You can find these models on recommended models of this page. + +## Inference + +### Image Captioning + +You can use the 🤗 Transformers library's `image-to-text` pipeline to generate caption for the Image input. + +```python +from transformers import pipeline + +captioner = pipeline("image-to-text",model="Salesforce/blip-image-captioning-base") +captioner("https://huggingface.co/datasets/Narsil/image_dummy/resolve/main/parrots.png") +## [{'generated_text': 'two birds are standing next to each other '}] +``` + +### OCR + +This code snippet uses Microsoft’s TrOCR, an encoder-decoder model consisting of an image Transformer encoder and a text Transformer decoder for state-of-the-art optical character recognition (OCR) on single-text line images. + +```python +from transformers import TrOCRProcessor, VisionEncoderDecoderModel + +processor = TrOCRProcessor.from_pretrained('microsoft/trocr-base-handwritten') +model = VisionEncoderDecoderModel.from_pretrained('microsoft/trocr-base-handwritten') +pixel_values = processor(images="image.jpeg", return_tensors="pt").pixel_values + +generated_ids = model.generate(pixel_values) +generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0] + +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer image-to-text models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.imageToText({ + data: await (await fetch("https://picsum.photos/300/300")).blob(), + model: "Salesforce/blip-image-captioning-base", +}); +``` + +## Useful Resources + +- [Image Captioning](https://huggingface.co/docs/transformers/main/en/tasks/image_captioning) +- [Image captioning use case](https://blog.google/outreach-initiatives/accessibility/get-image-descriptions/) +- [Train Image Captioning model on your dataset](https://github.com/NielsRogge/Transformers-Tutorials/blob/master/GIT/Fine_tune_GIT_on_an_image_captioning_dataset.ipynb) +- [Train OCR model on your dataset ](https://github.com/NielsRogge/Transformers-Tutorials/tree/master/TrOCR) + +This page was made possible thanks to efforts of [Sukesh Perla](https://huggingface.co/hitchhiker3010) and [Johannes Kolbe](https://huggingface.co/johko). diff --git a/packages/tasks/src/image-to-text/data.ts b/packages/tasks/src/image-to-text/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..6a838ebead9cf02e8104151e88d419d7921581e9 --- /dev/null +++ b/packages/tasks/src/image-to-text/data.ts @@ -0,0 +1,86 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + // TODO write proper description + description: "Dataset from 12M image-text of Reddit", + id: "red_caps", + }, + { + // TODO write proper description + description: "Dataset from 3.3M images of Google", + id: "datasets/conceptual_captions", + }, + ], + demo: { + inputs: [ + { + filename: "savanna.jpg", + type: "img", + }, + ], + outputs: [ + { + label: "Detailed description", + content: "a herd of giraffes and zebras grazing in a field", + type: "text", + }, + ], + }, + metrics: [], + models: [ + { + description: "A robust image captioning model.", + id: "Salesforce/blip-image-captioning-large", + }, + { + description: "A strong image captioning model.", + id: "nlpconnect/vit-gpt2-image-captioning", + }, + { + description: "A strong optical character recognition model.", + id: "microsoft/trocr-base-printed", + }, + { + description: "A strong visual question answering model for scientific diagrams.", + id: "google/pix2struct-ai2d-base", + }, + { + description: "A strong captioning model for UI components.", + id: "google/pix2struct-widget-captioning-base", + }, + { + description: "A captioning model for images that contain text.", + id: "google/pix2struct-textcaps-base", + }, + ], + spaces: [ + { + description: "A robust image captioning application.", + id: "flax-community/image-captioning", + }, + { + description: "An application that transcribes handwritings into text.", + id: "nielsr/TrOCR-handwritten", + }, + { + description: "An application that can caption images and answer questions about a given image.", + id: "Salesforce/BLIP", + }, + { + description: "An application that can caption images and answer questions with a conversational agent.", + id: "Salesforce/BLIP2", + }, + { + description: "An image captioning application that demonstrates the effect of noise on captions.", + id: "johko/capdec-image-captioning", + }, + ], + summary: + "Image to text models output a text from a given image. Image captioning or optical character recognition can be considered as the most common applications of image to text.", + widgetModels: ["Salesforce/blip-image-captioning-base"], + youtubeId: "", +}; + +export default taskData; diff --git a/packages/tasks/src/index.ts b/packages/tasks/src/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..d45f5d5410ce84b54b7646164aadb1f18370051a --- /dev/null +++ b/packages/tasks/src/index.ts @@ -0,0 +1,13 @@ +export type { TaskData, TaskDemo, TaskDemoEntry, ExampleRepo } from "./Types"; +export { TASKS_DATA } from "./tasksData"; +export { + PIPELINE_DATA, + PIPELINE_TYPES, + type PipelineType, + type PipelineData, + type Modality, + MODALITIES, + MODALITY_LABELS, +} from "./pipelines"; +export { ModelLibrary } from "./modelLibraries"; +export type { ModelLibraryKey } from "./modelLibraries"; diff --git a/packages/tasks/src/modelLibraries.ts b/packages/tasks/src/modelLibraries.ts new file mode 100644 index 0000000000000000000000000000000000000000..6d76980f515c42d5f104289c97cab1535ad4419b --- /dev/null +++ b/packages/tasks/src/modelLibraries.ts @@ -0,0 +1,43 @@ +/** + * Add your new library here. + * + * This is for modeling (= architectures) libraries, not for file formats (like ONNX, etc). + * File formats live in an enum inside the internal codebase. + */ +export enum ModelLibrary { + "adapter-transformers" = "Adapter Transformers", + "allennlp" = "allenNLP", + "asteroid" = "Asteroid", + "bertopic" = "BERTopic", + "diffusers" = "Diffusers", + "doctr" = "docTR", + "espnet" = "ESPnet", + "fairseq" = "Fairseq", + "flair" = "Flair", + "keras" = "Keras", + "k2" = "K2", + "nemo" = "NeMo", + "open_clip" = "OpenCLIP", + "paddlenlp" = "PaddleNLP", + "peft" = "PEFT", + "pyannote-audio" = "pyannote.audio", + "sample-factory" = "Sample Factory", + "sentence-transformers" = "Sentence Transformers", + "sklearn" = "Scikit-learn", + "spacy" = "spaCy", + "span-marker" = "SpanMarker", + "speechbrain" = "speechbrain", + "tensorflowtts" = "TensorFlowTTS", + "timm" = "Timm", + "fastai" = "fastai", + "transformers" = "Transformers", + "transformers.js" = "Transformers.js", + "stanza" = "Stanza", + "fasttext" = "fastText", + "stable-baselines3" = "Stable-Baselines3", + "ml-agents" = "ML-Agents", + "pythae" = "Pythae", + "mindspore" = "MindSpore", +} + +export type ModelLibraryKey = keyof typeof ModelLibrary; diff --git a/packages/tasks/src/object-detection/about.md b/packages/tasks/src/object-detection/about.md new file mode 100644 index 0000000000000000000000000000000000000000..4dda21224f937a27a5f56d40ee877ff03eaf1d09 --- /dev/null +++ b/packages/tasks/src/object-detection/about.md @@ -0,0 +1,37 @@ +## Use Cases + +### Autonomous Driving + +Object Detection is widely used in computer vision for autonomous driving. Self-driving cars use Object Detection models to detect pedestrians, bicycles, traffic lights and road signs to decide which step to take. + +### Object Tracking in Matches + +Object Detection models are widely used in sports where the ball or a player is tracked for monitoring and refereeing during matches. + +### Image Search + +Object Detection models are widely used in image search. Smartphones use Object Detection models to detect entities (such as specific places or objects) and allow the user to search for the entity on the Internet. + +### Object Counting + +Object Detection models are used to count instances of objects in a given image, this can include counting the objects in warehouses or stores, or counting the number of visitors in a store. They are also used to manage crowds at events to prevent disasters. + +## Inference + +You can infer with Object Detection models through the `object-detection` pipeline. When calling the pipeline you just need to specify a path or http link to an image. + +```python +model = pipeline("object-detection") + +model("path_to_cat_image") + +# [{'label': 'blanket', +# 'mask': mask_string, +# 'score': 0.917}, +#...] +``` + +# Useful Resources + +- [Walkthrough of Computer Vision Ecosystem in Hugging Face - CV Study Group](https://www.youtube.com/watch?v=oL-xmufhZM8) +- [Object detection task guide](https://huggingface.co/docs/transformers/tasks/object_detection) diff --git a/packages/tasks/src/object-detection/data.ts b/packages/tasks/src/object-detection/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..8ffe8ea1b070c7cd89427197093c3e9a18cefbee --- /dev/null +++ b/packages/tasks/src/object-detection/data.ts @@ -0,0 +1,76 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + // TODO write proper description + description: "Widely used benchmark dataset for multiple Vision tasks.", + id: "merve/coco2017", + }, + ], + demo: { + inputs: [ + { + filename: "object-detection-input.jpg", + type: "img", + }, + ], + outputs: [ + { + filename: "object-detection-output.jpg", + type: "img", + }, + ], + }, + metrics: [ + { + description: + "The Average Precision (AP) metric is the Area Under the PR Curve (AUC-PR). It is calculated for each class separately", + id: "Average Precision", + }, + { + description: "The Mean Average Precision (mAP) metric is the overall average of the AP values", + id: "Mean Average Precision", + }, + { + description: + "The APα metric is the Average Precision at the IoU threshold of a α value, for example, AP50 and AP75", + id: "APα", + }, + ], + models: [ + { + // TO DO: write description + description: "Solid object detection model trained on the benchmark dataset COCO 2017.", + id: "facebook/detr-resnet-50", + }, + { + description: "Strong object detection model trained on ImageNet-21k dataset.", + id: "microsoft/beit-base-patch16-224-pt22k-ft22k", + }, + ], + spaces: [ + { + description: "An object detection application that can detect unseen objects out of the box.", + id: "adirik/OWL-ViT", + }, + { + description: "An application that contains various object detection models to try from.", + id: "Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS", + }, + { + description: "An application that shows multiple cutting edge techniques for object detection and tracking.", + id: "kadirnar/torchyolo", + }, + { + description: "An object tracking, segmentation and inpainting application.", + id: "VIPLab/Track-Anything", + }, + ], + summary: + "Object Detection models allow users to identify objects of certain defined classes. Object detection models receive an image as input and output the images with bounding boxes and labels on detected objects.", + widgetModels: ["facebook/detr-resnet-50"], + youtubeId: "WdAeKSOpxhw", +}; + +export default taskData; diff --git a/packages/tasks/src/pipelines.ts b/packages/tasks/src/pipelines.ts new file mode 100644 index 0000000000000000000000000000000000000000..ae487d5e30bb88aa4119953e7230ba2ebb31e8c0 --- /dev/null +++ b/packages/tasks/src/pipelines.ts @@ -0,0 +1,619 @@ +export const MODALITIES = ["cv", "nlp", "audio", "tabular", "multimodal", "rl", "other"] as const; + +export type Modality = (typeof MODALITIES)[number]; + +export const MODALITY_LABELS = { + multimodal: "Multimodal", + nlp: "Natural Language Processing", + audio: "Audio", + cv: "Computer Vision", + rl: "Reinforcement Learning", + tabular: "Tabular", + other: "Other", +} satisfies Record; + +/** + * Public interface for a sub task. + * + * This can be used in a model card's `model-index` metadata. + * and is more granular classification that can grow significantly + * over time as new tasks are added. + */ +export interface SubTask { + /** + * type of the task (e.g. audio-source-separation) + */ + type: string; + /** + * displayed name of the task (e.g. Audio Source Separation) + */ + name: string; +} + +/** + * Public interface for a PipelineData. + * + * This information corresponds to a pipeline type (aka task) + * in the Hub. + */ +export interface PipelineData { + /** + * displayed name of the task (e.g. Text Classification) + */ + name: string; + subtasks?: SubTask[]; + modality: Modality; + /** + * color for the tag icon. + */ + color: "blue" | "green" | "indigo" | "orange" | "red" | "yellow"; + /** + * whether to hide in /models filters + */ + hideInModels?: boolean; + /** + * whether to hide in /datasets filters + */ + hideInDatasets?: boolean; +} + +/// Coarse-grained taxonomy of tasks +/// +/// This type is used in multiple places in the Hugging Face +/// ecosystem: +/// - To determine which widget to show. +/// - To determine which endpoint of Inference API to use. +/// - As filters at the left of models and datasets page. +/// +/// Note that this is sensitive to order. +/// For each domain, the order should be of decreasing specificity. +/// This will impact the default pipeline tag of a model when not +/// specified. +export const PIPELINE_DATA = { + "text-classification": { + name: "Text Classification", + subtasks: [ + { + type: "acceptability-classification", + name: "Acceptability Classification", + }, + { + type: "entity-linking-classification", + name: "Entity Linking Classification", + }, + { + type: "fact-checking", + name: "Fact Checking", + }, + { + type: "intent-classification", + name: "Intent Classification", + }, + { + type: "language-identification", + name: "Language Identification", + }, + { + type: "multi-class-classification", + name: "Multi Class Classification", + }, + { + type: "multi-label-classification", + name: "Multi Label Classification", + }, + { + type: "multi-input-text-classification", + name: "Multi-input Text Classification", + }, + { + type: "natural-language-inference", + name: "Natural Language Inference", + }, + { + type: "semantic-similarity-classification", + name: "Semantic Similarity Classification", + }, + { + type: "sentiment-classification", + name: "Sentiment Classification", + }, + { + type: "topic-classification", + name: "Topic Classification", + }, + { + type: "semantic-similarity-scoring", + name: "Semantic Similarity Scoring", + }, + { + type: "sentiment-scoring", + name: "Sentiment Scoring", + }, + { + type: "sentiment-analysis", + name: "Sentiment Analysis", + }, + { + type: "hate-speech-detection", + name: "Hate Speech Detection", + }, + { + type: "text-scoring", + name: "Text Scoring", + }, + ], + modality: "nlp", + color: "orange", + }, + "token-classification": { + name: "Token Classification", + subtasks: [ + { + type: "named-entity-recognition", + name: "Named Entity Recognition", + }, + { + type: "part-of-speech", + name: "Part of Speech", + }, + { + type: "parsing", + name: "Parsing", + }, + { + type: "lemmatization", + name: "Lemmatization", + }, + { + type: "word-sense-disambiguation", + name: "Word Sense Disambiguation", + }, + { + type: "coreference-resolution", + name: "Coreference-resolution", + }, + ], + modality: "nlp", + color: "blue", + }, + "table-question-answering": { + name: "Table Question Answering", + modality: "nlp", + color: "green", + }, + "question-answering": { + name: "Question Answering", + subtasks: [ + { + type: "extractive-qa", + name: "Extractive QA", + }, + { + type: "open-domain-qa", + name: "Open Domain QA", + }, + { + type: "closed-domain-qa", + name: "Closed Domain QA", + }, + ], + modality: "nlp", + color: "blue", + }, + "zero-shot-classification": { + name: "Zero-Shot Classification", + modality: "nlp", + color: "yellow", + }, + translation: { + name: "Translation", + modality: "nlp", + color: "green", + }, + summarization: { + name: "Summarization", + subtasks: [ + { + type: "news-articles-summarization", + name: "News Articles Summarization", + }, + { + type: "news-articles-headline-generation", + name: "News Articles Headline Generation", + }, + ], + modality: "nlp", + color: "indigo", + }, + conversational: { + name: "Conversational", + subtasks: [ + { + type: "dialogue-generation", + name: "Dialogue Generation", + }, + ], + modality: "nlp", + color: "green", + }, + "feature-extraction": { + name: "Feature Extraction", + modality: "multimodal", + color: "red", + }, + "text-generation": { + name: "Text Generation", + subtasks: [ + { + type: "dialogue-modeling", + name: "Dialogue Modeling", + }, + { + type: "language-modeling", + name: "Language Modeling", + }, + ], + modality: "nlp", + color: "indigo", + }, + "text2text-generation": { + name: "Text2Text Generation", + subtasks: [ + { + type: "text-simplification", + name: "Text simplification", + }, + { + type: "explanation-generation", + name: "Explanation Generation", + }, + { + type: "abstractive-qa", + name: "Abstractive QA", + }, + { + type: "open-domain-abstractive-qa", + name: "Open Domain Abstractive QA", + }, + { + type: "closed-domain-qa", + name: "Closed Domain QA", + }, + { + type: "open-book-qa", + name: "Open Book QA", + }, + { + type: "closed-book-qa", + name: "Closed Book QA", + }, + ], + modality: "nlp", + color: "indigo", + }, + "fill-mask": { + name: "Fill-Mask", + subtasks: [ + { + type: "slot-filling", + name: "Slot Filling", + }, + { + type: "masked-language-modeling", + name: "Masked Language Modeling", + }, + ], + modality: "nlp", + color: "red", + }, + "sentence-similarity": { + name: "Sentence Similarity", + modality: "nlp", + color: "yellow", + }, + "text-to-speech": { + name: "Text-to-Speech", + modality: "audio", + color: "yellow", + }, + "text-to-audio": { + name: "Text-to-Audio", + modality: "audio", + color: "yellow", + }, + "automatic-speech-recognition": { + name: "Automatic Speech Recognition", + modality: "audio", + color: "yellow", + }, + "audio-to-audio": { + name: "Audio-to-Audio", + modality: "audio", + color: "blue", + }, + "audio-classification": { + name: "Audio Classification", + subtasks: [ + { + type: "keyword-spotting", + name: "Keyword Spotting", + }, + { + type: "speaker-identification", + name: "Speaker Identification", + }, + { + type: "audio-intent-classification", + name: "Audio Intent Classification", + }, + { + type: "audio-emotion-recognition", + name: "Audio Emotion Recognition", + }, + { + type: "audio-language-identification", + name: "Audio Language Identification", + }, + ], + modality: "audio", + color: "green", + }, + "voice-activity-detection": { + name: "Voice Activity Detection", + modality: "audio", + color: "red", + }, + "depth-estimation": { + name: "Depth Estimation", + modality: "cv", + color: "yellow", + }, + "image-classification": { + name: "Image Classification", + subtasks: [ + { + type: "multi-label-image-classification", + name: "Multi Label Image Classification", + }, + { + type: "multi-class-image-classification", + name: "Multi Class Image Classification", + }, + ], + modality: "cv", + color: "blue", + }, + "object-detection": { + name: "Object Detection", + subtasks: [ + { + type: "face-detection", + name: "Face Detection", + }, + { + type: "vehicle-detection", + name: "Vehicle Detection", + }, + ], + modality: "cv", + color: "yellow", + }, + "image-segmentation": { + name: "Image Segmentation", + subtasks: [ + { + type: "instance-segmentation", + name: "Instance Segmentation", + }, + { + type: "semantic-segmentation", + name: "Semantic Segmentation", + }, + { + type: "panoptic-segmentation", + name: "Panoptic Segmentation", + }, + ], + modality: "cv", + color: "green", + }, + "text-to-image": { + name: "Text-to-Image", + modality: "multimodal", + color: "yellow", + }, + "image-to-text": { + name: "Image-to-Text", + subtasks: [ + { + type: "image-captioning", + name: "Image Captioning", + }, + ], + modality: "multimodal", + color: "red", + }, + "image-to-image": { + name: "Image-to-Image", + modality: "cv", + color: "indigo", + }, + "unconditional-image-generation": { + name: "Unconditional Image Generation", + modality: "cv", + color: "green", + }, + "video-classification": { + name: "Video Classification", + modality: "cv", + color: "blue", + }, + "reinforcement-learning": { + name: "Reinforcement Learning", + modality: "rl", + color: "red", + }, + robotics: { + name: "Robotics", + modality: "rl", + subtasks: [ + { + type: "grasping", + name: "Grasping", + }, + { + type: "task-planning", + name: "Task Planning", + }, + ], + color: "blue", + }, + "tabular-classification": { + name: "Tabular Classification", + modality: "tabular", + subtasks: [ + { + type: "tabular-multi-class-classification", + name: "Tabular Multi Class Classification", + }, + { + type: "tabular-multi-label-classification", + name: "Tabular Multi Label Classification", + }, + ], + color: "blue", + }, + "tabular-regression": { + name: "Tabular Regression", + modality: "tabular", + subtasks: [ + { + type: "tabular-single-column-regression", + name: "Tabular Single Column Regression", + }, + ], + color: "blue", + }, + "tabular-to-text": { + name: "Tabular to Text", + modality: "tabular", + subtasks: [ + { + type: "rdf-to-text", + name: "RDF to text", + }, + ], + color: "blue", + hideInModels: true, + }, + "table-to-text": { + name: "Table to Text", + modality: "nlp", + color: "blue", + hideInModels: true, + }, + "multiple-choice": { + name: "Multiple Choice", + subtasks: [ + { + type: "multiple-choice-qa", + name: "Multiple Choice QA", + }, + { + type: "multiple-choice-coreference-resolution", + name: "Multiple Choice Coreference Resolution", + }, + ], + modality: "nlp", + color: "blue", + hideInModels: true, + }, + "text-retrieval": { + name: "Text Retrieval", + subtasks: [ + { + type: "document-retrieval", + name: "Document Retrieval", + }, + { + type: "utterance-retrieval", + name: "Utterance Retrieval", + }, + { + type: "entity-linking-retrieval", + name: "Entity Linking Retrieval", + }, + { + type: "fact-checking-retrieval", + name: "Fact Checking Retrieval", + }, + ], + modality: "nlp", + color: "indigo", + hideInModels: true, + }, + "time-series-forecasting": { + name: "Time Series Forecasting", + modality: "tabular", + subtasks: [ + { + type: "univariate-time-series-forecasting", + name: "Univariate Time Series Forecasting", + }, + { + type: "multivariate-time-series-forecasting", + name: "Multivariate Time Series Forecasting", + }, + ], + color: "blue", + hideInModels: true, + }, + "text-to-video": { + name: "Text-to-Video", + modality: "multimodal", + color: "green", + }, + "visual-question-answering": { + name: "Visual Question Answering", + subtasks: [ + { + type: "visual-question-answering", + name: "Visual Question Answering", + }, + ], + modality: "multimodal", + color: "red", + }, + "document-question-answering": { + name: "Document Question Answering", + subtasks: [ + { + type: "document-question-answering", + name: "Document Question Answering", + }, + ], + modality: "multimodal", + color: "blue", + hideInDatasets: true, + }, + "zero-shot-image-classification": { + name: "Zero-Shot Image Classification", + modality: "cv", + color: "yellow", + }, + "graph-ml": { + name: "Graph Machine Learning", + modality: "multimodal", + color: "green", + }, + other: { + name: "Other", + modality: "other", + color: "blue", + hideInModels: true, + hideInDatasets: true, + }, +} satisfies Record; + +export type PipelineType = keyof typeof PIPELINE_DATA; +export const PIPELINE_TYPES = Object.keys(PIPELINE_DATA) as PipelineType[]; diff --git a/packages/tasks/src/placeholder/about.md b/packages/tasks/src/placeholder/about.md new file mode 100644 index 0000000000000000000000000000000000000000..fdb45584410dcd07e530607d469140038ede6b25 --- /dev/null +++ b/packages/tasks/src/placeholder/about.md @@ -0,0 +1,15 @@ +## Use Cases + +You can contribute this area with common use cases of the task! + +## Task Variants + +This place can be filled with variants of this task if there's any. + +## Inference + +This section should have useful information about how to pull a model from Hugging Face Hub that is a part of a library specialized in a task and use it. + +## Useful Resources + +In this area, you can insert useful resources about how to train or use a model for this task. diff --git a/packages/tasks/src/placeholder/data.ts b/packages/tasks/src/placeholder/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..3660b52194e88b0ae1391b34b970d6b6c1e27cc4 --- /dev/null +++ b/packages/tasks/src/placeholder/data.ts @@ -0,0 +1,18 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [], + demo: { + inputs: [], + outputs: [], + }, + isPlaceholder: true, + metrics: [], + models: [], + spaces: [], + summary: "", + widgetModels: [], + youtubeId: undefined, +}; + +export default taskData; diff --git a/packages/tasks/src/question-answering/about.md b/packages/tasks/src/question-answering/about.md new file mode 100644 index 0000000000000000000000000000000000000000..d5934ee80c7ca32c53726ef5b9e715af54a0b5d6 --- /dev/null +++ b/packages/tasks/src/question-answering/about.md @@ -0,0 +1,56 @@ +## Use Cases + +### Frequently Asked Questions + +You can use Question Answering (QA) models to automate the response to frequently asked questions by using a knowledge base (documents) as context. Answers to customer questions can be drawn from those documents. + +⚡⚡ If you’d like to save inference time, you can first use [passage ranking models](/tasks/sentence-similarity) to see which document might contain the answer to the question and iterate over that document with the QA model instead. + +## Task Variants +There are different QA variants based on the inputs and outputs: + +- **Extractive QA:** The model **extracts** the answer from a context. The context here could be a provided text, a table or even HTML! This is usually solved with BERT-like models. +- **Open Generative QA:** The model **generates** free text directly based on the context. You can learn more about the Text Generation task in [its page](/tasks/text-generation). +- **Closed Generative QA:** In this case, no context is provided. The answer is completely generated by a model. + +The schema above illustrates extractive, open book QA. The model takes a context and the question and extracts the answer from the given context. + +You can also differentiate QA models depending on whether they are open-domain or closed-domain. Open-domain models are not restricted to a specific domain, while closed-domain models are restricted to a specific domain (e.g. legal, medical documents). + +## Inference + +You can infer with QA models with the 🤗 Transformers library using the `question-answering` pipeline. If no model checkpoint is given, the pipeline will be initialized with `distilbert-base-cased-distilled-squad`. This pipeline takes a question and a context from which the answer will be extracted and returned. + +```python +from transformers import pipeline + +qa_model = pipeline("question-answering") +question = "Where do I live?" +context = "My name is Merve and I live in İstanbul." +qa_model(question = question, context = context) +## {'answer': 'İstanbul', 'end': 39, 'score': 0.953, 'start': 31} +``` + +## Useful Resources + +Would you like to learn more about QA? Awesome! Here are some curated resources that you may find helpful! + +- [Course Chapter on Question Answering](https://huggingface.co/course/chapter7/7?fw=pt) +- [Question Answering Workshop](https://www.youtube.com/watch?v=Ihgk8kGLpIE&ab_channel=HuggingFace) +- [How to Build an Open-Domain Question Answering System?](https://lilianweng.github.io/lil-log/2020/10/29/open-domain-question-answering.html) +- [Blog Post: ELI5 A Model for Open Domain Long Form Question Answering](https://yjernite.github.io/lfqa.html) + +### Notebooks + +- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/question_answering.ipynb) +- [TensorFlow](https://github.com/huggingface/notebooks/blob/main/examples/question_answering-tf.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/question-answering) +- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/question-answering) +- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/question-answering) + +### Documentation + +- [Question answering task guide](https://huggingface.co/docs/transformers/tasks/question_answering) diff --git a/packages/tasks/src/question-answering/data.ts b/packages/tasks/src/question-answering/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..dee5ccf644cb53a148a6a6c0bc4bb398b3bf6c27 --- /dev/null +++ b/packages/tasks/src/question-answering/data.ts @@ -0,0 +1,71 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + // TODO write proper description + description: "A famous question answering dataset based on English articles from Wikipedia.", + id: "squad_v2", + }, + { + // TODO write proper description + description: "A dataset of aggregated anonymized actual queries issued to the Google search engine.", + id: "natural_questions", + }, + ], + demo: { + inputs: [ + { + label: "Question", + content: "Which name is also used to describe the Amazon rainforest in English?", + type: "text", + }, + { + label: "Context", + content: "The Amazon rainforest, also known in English as Amazonia or the Amazon Jungle", + type: "text", + }, + ], + outputs: [ + { + label: "Answer", + content: "Amazonia", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "Exact Match is a metric based on the strict character match of the predicted answer and the right answer. For answers predicted correctly, the Exact Match will be 1. Even if only one character is different, Exact Match will be 0", + id: "exact-match", + }, + { + description: + " The F1-Score metric is useful if we value both false positives and false negatives equally. The F1-Score is calculated on each word in the predicted sequence against the correct answer", + id: "f1", + }, + ], + models: [ + { + description: "A robust baseline model for most question answering domains.", + id: "deepset/roberta-base-squad2", + }, + { + description: "A special model that can answer questions from tables!", + id: "google/tapas-base-finetuned-wtq", + }, + ], + spaces: [ + { + description: "An application that can answer a long question from Wikipedia.", + id: "deepset/wikipedia-assistant", + }, + ], + summary: + "Question Answering models can retrieve the answer to a question from a given text, which is useful for searching for an answer in a document. Some question answering models can generate answers without context!", + widgetModels: ["deepset/roberta-base-squad2"], + youtubeId: "ajPx5LwJD-I", +}; + +export default taskData; diff --git a/packages/tasks/src/reinforcement-learning/about.md b/packages/tasks/src/reinforcement-learning/about.md new file mode 100644 index 0000000000000000000000000000000000000000..13f79cfff65cea30b22a4c667b8f83964f7c00f5 --- /dev/null +++ b/packages/tasks/src/reinforcement-learning/about.md @@ -0,0 +1,167 @@ +## Use Cases + +### Gaming + +Reinforcement learning is known for its application to video games. Since the games provide a safe environment for the agent to be trained in the sense that it is perfectly defined and controllable, this makes them perfect candidates for experimentation and will help a lot to learn about the capabilities and limitations of various RL algorithms. + +There are many videos on the Internet where a game-playing reinforcement learning agent starts with a terrible gaming strategy due to random initialization of its settings, but over iterations, the agent gets better and better with each episode of the training. This [paper](https://arxiv.org/abs/1912.10944) mainly investigates the performance of RL in popular games such as Minecraft or Dota2. The agent's performance can exceed a human player's, although there are still some challenges mainly related to efficiency in constructing the gaming policy of the reinforcement learning agent. + +### Trading and Finance + +Reinforcement learning is the science to train computers to make decisions and thus has a novel use in trading and finance. All time-series models are helpful in predicting prices, volume and future sales of a product or a stock. Reinforcement based automated agents can decide to sell, buy or hold a stock. It shifts the impact of AI in this field to real time decision making rather than just prediction of prices. The glossary given below will clear some parameters to as to how we can train a model to take these decisions. + +## Task Variants + +### Model Based RL + +In model based reinforcement learning techniques intend to create a model of the environment, learn the state transition probabilities and the reward function, to find the optimal action. Some typical examples for model based reinforcement learning algorithms are dynamic programming, value iteration and policy iteration. + +### Model Free RL + +In model free reinforcement learning, agent decides on optimal actions based on its experience in the environment and the reward it collects from it. This is one of the most commonly used algorithms beneficial in complex environments, where modeling of state transition probabilities and reward functions are difficult. Some of the examples of model free reinforcement learning are SARSA, Q-Learning, actor-critic and proximal policy optimization (PPO) algorithms. + +## Glossary + + + +**Agent:** The learner and the decision maker. + +**Environment:** The part of the world the agent interacts, comprising everything outside the agent. + +Observations and states are the information our agent gets from the environment. In the case of a video game, it can be a frame (a screenshot). In the case of the trading agent, it can be the value of a certain stock. + +**State:** Complete description of the state of the environment with no hidden information. + +**Observation:** Partial description of the state, in a partially observed environment. + +**Action:** The decision taken by the agent. + +**Reward:** The numerical feedback signal that the agent receives from the environment based on the chosen action. + +**Return:** Cumulative Reward. In the simplest case, the return is the sum of the rewards. + +**Episode:** For some applications there is a natural notion of final time step. In this case, there is a starting point and an ending point (a terminal state). This creates an episode: a list of States, Actions, Rewards, and new States. For instance, think about Chess: an episode begins at the initial board position and ends when the game is over. + +**Policy:** The Policy is the brain of the Agent, it’s the function that tells what action to take given the state. So it defines the agent’s behavior at a given time. Reinforcement learning methods specify how the agent’s policy is changed as a result of its experience. + +## Inference + +Inference in reinforcement learning differs from other modalities, in which there's a model and test data. In reinforcement learning, once you have trained an agent in an environment, you try to run the trained agent for additional steps to get the average reward. + +A typical training cycle consists of gathering experience from the environment, training the agent, and running the agent on a test environment to obtain average reward. Below there's a snippet on how you can interact with the environment using the `gymnasium` library, train an agent using `stable-baselines3`, evalute the agent on test environment and infer actions from the trained agent. + +```python +# Here we are running 20 episodes of CartPole-v1 environment, taking random actions +import gymnasium as gym + +env = gym.make("CartPole-v1") +observation, info = env.reset() + +for _ in range(20): + action = env.action_space.sample() # samples random action from action sample space + + # the agent takes the action + observation, reward, terminated, truncated, info = env.step(action) + + +# if the agent reaches terminal state, we reset the environment +if terminated or truncated: + + print("Environment is reset") + observation = env.reset() + +env.close() +``` + +Below snippet shows how to train a PPO model on LunarLander-v2 environment using `stable-baselines3` library and saving the model + +```python +from stable_baselines3 import PPO + +# initialize the environment + +env = gym.make("LunarLander-v2") + +# initialize the model + +model = PPO(policy = "MlpPolicy", + env = env, + n_steps = 1024, + batch_size = 64, + n_epochs = 4, + verbose = 1) + +# train the model for 1000 time steps +model.learn(total_timesteps = 1000) + +# Saving the model in desired directory +model_name = "PPO-LunarLander-v2" +model.save(model_name) +``` + +Below code shows how to evaluate an agent trained using `stable-baselines3` + +```python +# Loading a saved model and evaluating the model for 10 episodes +from stable_baselines3.common.evaluation import evaluate_policy +from stable_baselines3 import PPO + + +env = gym.make("LunarLander-v2") +# Loading the saved model +model = PPO.load("PPO-LunarLander-v2",env=env) + +# Initializating the evaluation environment +eval_env = gym.make("LunarLander-v2") + +# Running the trained agent on eval_env for 10 time steps and getting the mean reward +mean_reward, std_reward = evaluate_policy(model, eval_env, n_eval_episodes = 10, + deterministic=True) + +print(f"mean_reward={mean_reward:.2f} +/- {std_reward}") +``` + +Below code snippet shows how to infer actions from an agent trained using `stable-baselines3` + +```python +from stable_baselines3.common.evaluation import evaluate_policy +from stable_baselines3 import PPO + +# Loading the saved model +model = PPO.load("PPO-LunarLander-v2",env=env) + +# Getting the environment from the trained agent +env = model.get_env() + +obs = env.reset() +for i in range(1000): + # getting action predictions from the trained agent + action, _states = model.predict(obs, deterministic=True) + + # taking the predicted action in the environment to observe next state and rewards + obs, rewards, dones, info = env.step(action) +``` + +For more information, you can check out the documentations of the respective libraries. + +[Gymnasium Documentation](https://gymnasium.farama.org/) +[Stable Baselines Documentation](https://stable-baselines3.readthedocs.io/en/master/) + +## Useful Resources + +Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful! + +- [HuggingFace Deep Reinforcement Learning Class](https://github.com/huggingface/deep-rl-class) +- [Introduction to Deep Reinforcement Learning](https://huggingface.co/blog/deep-rl-intro) +- [Stable Baselines Integration with HuggingFace](https://huggingface.co/blog/sb3) +- Learn how reinforcement learning is used in conversational agents in this blog: [Illustrating Reinforcement Learning from Human Feedback (RLHF)](https://huggingface.co/blog/rlhf) +- [Reinforcement Learning from Human Feedback From Zero to ChatGPT](https://www.youtube.com/watch?v=EAd4oQtEJOM) +- [Guide on Multi-Agent Competition Systems](https://huggingface.co/blog/aivsai) + +### Notebooks + +- [Train a Deep Reinforcement Learning lander agent to land correctly on the Moon 🌕 using Stable-Baselines3](https://github.com/huggingface/deep-rl-class/blob/main/notebooks/unit1/unit1.ipynb) +- [Introduction to Unity MLAgents](https://github.com/huggingface/deep-rl-class/blob/main/notebooks/unit5/unit5.ipynb) +- [Training Decision Transformers with 🤗 transformers](https://github.com/huggingface/blog/blob/main/notebooks/101_train-decision-transformers.ipynb) + +This page was made possible thanks to the efforts of [Ram Ananth](https://huggingface.co/RamAnanth1), [Emilio Lehoucq](https://huggingface.co/emiliol), [Sagar Mathpal](https://huggingface.co/sagarmathpal) and [Osman Alenbey](https://huggingface.co/osman93). diff --git a/packages/tasks/src/reinforcement-learning/data.ts b/packages/tasks/src/reinforcement-learning/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..78731ec20366f3551e0366e3001c7b708c909487 --- /dev/null +++ b/packages/tasks/src/reinforcement-learning/data.ts @@ -0,0 +1,75 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A curation of widely used datasets for Data Driven Deep Reinforcement Learning (D4RL)", + id: "edbeeching/decision_transformer_gym_replay", + }, + ], + demo: { + inputs: [ + { + label: "State", + content: "Red traffic light, pedestrians are about to pass.", + type: "text", + }, + ], + outputs: [ + { + label: "Action", + content: "Stop the car.", + type: "text", + }, + { + label: "Next State", + content: "Yellow light, pedestrians have crossed.", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "Accumulated reward across all time steps discounted by a factor that ranges between 0 and 1 and determines how much the agent optimizes for future relative to immediate rewards. Measures how good is the policy ultimately found by a given algorithm considering uncertainty over the future.", + id: "Discounted Total Reward", + }, + { + description: + "Average return obtained after running the policy for a certain number of evaluation episodes. As opposed to total reward, mean reward considers how much reward a given algorithm receives while learning.", + id: "Mean Reward", + }, + { + description: + "Measures how good a given algorithm is after a predefined time. Some algorithms may be guaranteed to converge to optimal behavior across many time steps. However, an agent that reaches an acceptable level of optimality after a given time horizon may be preferable to one that ultimately reaches optimality but takes a long time.", + id: "Level of Performance After Some Time", + }, + ], + models: [ + { + description: "A Reinforcement Learning model trained on expert data from the Gym Hopper environment", + + id: "edbeeching/decision-transformer-gym-hopper-expert", + }, + { + description: "A PPO agent playing seals/CartPole-v0 using the stable-baselines3 library and the RL Zoo.", + id: "HumanCompatibleAI/ppo-seals-CartPole-v0", + }, + ], + spaces: [ + { + description: "An application for a cute puppy agent learning to catch a stick.", + id: "ThomasSimonini/Huggy", + }, + { + description: "An application to play Snowball Fight with a reinforcement learning agent.", + id: "ThomasSimonini/SnowballFight", + }, + ], + summary: + "Reinforcement learning is the computational approach of learning from action by interacting with an environment through trial and error and receiving rewards (negative or positive) as feedback", + widgetModels: [], + youtubeId: "q0BiUn5LiBc", +}; + +export default taskData; diff --git a/packages/tasks/src/sentence-similarity/about.md b/packages/tasks/src/sentence-similarity/about.md new file mode 100644 index 0000000000000000000000000000000000000000..ee536235d610cd144c1681ef4618c47ecce2a15c --- /dev/null +++ b/packages/tasks/src/sentence-similarity/about.md @@ -0,0 +1,97 @@ +## Use Cases 🔍 + +### Information Retrieval + +You can extract information from documents using Sentence Similarity models. The first step is to rank documents using Passage Ranking models. You can then get to the top ranked document and search it with Sentence Similarity models by selecting the sentence that has the most similarity to the input query. + +## The Sentence Transformers library + +The [Sentence Transformers](https://www.sbert.net/) library is very powerful for calculating embeddings of sentences, paragraphs, and entire documents. An embedding is just a vector representation of a text and is useful for finding how similar two texts are. + +You can find and use [hundreds of Sentence Transformers](https://huggingface.co/models?library=sentence-transformers&sort=downloads) models from the Hub by directly using the library, playing with the widgets in the browser or using the Inference API. + +## Task Variants + +### Passage Ranking + +Passage Ranking is the task of ranking documents based on their relevance to a given query. The task is evaluated on Mean Reciprocal Rank. These models take one query and multiple documents and return ranked documents according to the relevancy to the query. 📄 + +You can infer with Passage Ranking models using the [Inference API](https://huggingface.co/inference-api). The Passage Ranking model inputs are a query for which we look for relevancy in the documents and the documents we want to search. The model will return scores according to the relevancy of these documents for the query. + +```python +import json +import requests + +API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/msmarco-distilbert-base-tas-b" +headers = {"Authorization": f"Bearer {api_token}"} + +def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +data = query( + { + "inputs": { + "source_sentence": "That is a happy person", + "sentences": [ + "That is a happy dog", + "That is a very happy person", + "Today is a sunny day" + ] + } + } +## [0.853, 0.981, 0.655] +``` + +### Semantic Textual Similarity + +Semantic Textual Similarity is the task of evaluating how similar two texts are in terms of meaning. These models take a source sentence and a list of sentences in which we will look for similarities and will return a list of similarity scores. The benchmark dataset is the [Semantic Textual Similarity Benchmark](http://ixa2.si.ehu.eus/stswiki/index.php/STSbenchmark). The task is evaluated on Pearson’s Rank Correlation. + +```python +import json +import requests + +API_URL = "https://api-inference.huggingface.co/models/sentence-transformers/all-MiniLM-L6-v2" +headers = {"Authorization": f"Bearer {api_token}"} + +def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +data = query( + { + "inputs": { + "source_sentence": "I'm very happy", + "sentences":["I'm filled with happiness", "I'm happy"] + } + }) + +## [0.605, 0.894] +``` + +You can also infer with the models in the Hub using Sentence Transformer models. + +```python +pip install -U sentence-transformers + +from sentence_transformers import SentenceTransformer, util +sentences = ["I'm happy", "I'm full of happiness"] + +model = SentenceTransformer('sentence-transformers/all-MiniLM-L6-v2') + +#Compute embedding for both lists +embedding_1= model.encode(sentences[0], convert_to_tensor=True) +embedding_2 = model.encode(sentences[1], convert_to_tensor=True) + +util.pytorch_cos_sim(embedding_1, embedding_2) +## tensor([[0.6003]]) +``` + +## Useful Resources + +Would you like to learn more about Sentence Transformers and Sentence Similarity? Awesome! Here you can find some curated resources that you may find helpful! + +- [Sentence Transformers Documentation](https://www.sbert.net/) +- [Sentence Transformers in the Hub](https://huggingface.co/blog/sentence-transformers-in-the-hub) +- [Building a Playlist Generator with Sentence Transformers](https://huggingface.co/blog/playlist-generator) +- [Getting Started With Embeddings](https://huggingface.co/blog/getting-started-with-embeddings) diff --git a/packages/tasks/src/sentence-similarity/data.ts b/packages/tasks/src/sentence-similarity/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..0f71b39468e209b3faf3690c8d0a1f827d9212c9 --- /dev/null +++ b/packages/tasks/src/sentence-similarity/data.ts @@ -0,0 +1,101 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "Bing queries with relevant passages from various web sources.", + id: "ms_marco", + }, + ], + demo: { + inputs: [ + { + label: "Source sentence", + content: "Machine learning is so easy.", + type: "text", + }, + { + label: "Sentences to compare to", + content: "Deep learning is so straightforward.", + type: "text", + }, + { + label: "", + content: "This is so difficult, like rocket science.", + type: "text", + }, + { + label: "", + content: "I can't believe how much I struggled with this.", + type: "text", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Deep learning is so straightforward.", + score: 0.623, + }, + { + label: "This is so difficult, like rocket science.", + score: 0.413, + }, + { + label: "I can't believe how much I struggled with this.", + score: 0.256, + }, + ], + }, + ], + }, + metrics: [ + { + description: + "Reciprocal Rank is a measure used to rank the relevancy of documents given a set of documents. Reciprocal Rank is the reciprocal of the rank of the document retrieved, meaning, if the rank is 3, the Reciprocal Rank is 0.33. If the rank is 1, the Reciprocal Rank is 1", + id: "Mean Reciprocal Rank", + }, + { + description: + "The similarity of the embeddings is evaluated mainly on cosine similarity. It is calculated as the cosine of the angle between two vectors. It is particularly useful when your texts are not the same length", + id: "Cosine Similarity", + }, + ], + models: [ + { + description: + "This model works well for sentences and paragraphs and can be used for clustering/grouping and semantic searches.", + id: "sentence-transformers/all-mpnet-base-v2", + }, + { + description: "A multilingual model trained for FAQ retrieval.", + id: "clips/mfaq", + }, + ], + spaces: [ + { + description: "An application that leverages sentence similarity to answer questions from YouTube videos.", + id: "Gradio-Blocks/Ask_Questions_To_YouTube_Videos", + }, + { + description: + "An application that retrieves relevant PubMed abstracts for a given online article which can be used as further references.", + id: "Gradio-Blocks/pubmed-abstract-retriever", + }, + { + description: "An application that leverages sentence similarity to summarize text.", + id: "nickmuchi/article-text-summarizer", + }, + { + description: "A guide that explains how Sentence Transformers can be used for semantic search.", + id: "sentence-transformers/Sentence_Transformers_for_semantic_search", + }, + ], + summary: + "Sentence Similarity is the task of determining how similar two texts are. Sentence similarity models convert input texts into vectors (embeddings) that capture semantic information and calculate how close (similar) they are between them. This task is particularly useful for information retrieval and clustering/grouping.", + widgetModels: ["sentence-transformers/all-MiniLM-L6-v2"], + youtubeId: "VCZq5AkbNEU", +}; + +export default taskData; diff --git a/packages/tasks/src/summarization/about.md b/packages/tasks/src/summarization/about.md new file mode 100644 index 0000000000000000000000000000000000000000..ec82c946f383457d311c5721ee75c96fa8047c87 --- /dev/null +++ b/packages/tasks/src/summarization/about.md @@ -0,0 +1,58 @@ +## Use Cases + +### Research Paper Summarization 🧐 + +Research papers can be summarized to allow researchers to spend less time selecting which articles to read. There are several approaches you can take for a task like this: + +1. Use an existing extractive summarization model on the Hub to do inference. +2. Pick an existing language model trained for academic papers. This model can then be trained in a process called fine-tuning so it can solve the summarization task. +3. Use a sequence-to-sequence model like [T5](https://huggingface.co/docs/transformers/model_doc/t5) for abstractive text summarization. + +## Inference + +You can use the 🤗 Transformers library `summarization` pipeline to infer with existing Summarization models. If no model name is provided the pipeline will be initialized with [sshleifer/distilbart-cnn-12-6](https://huggingface.co/sshleifer/distilbart-cnn-12-6). + +```python +from transformers import pipeline + +classifier = pipeline("summarization") +classifier("Paris is the capital and most populous city of France, with an estimated population of 2,175,601 residents as of 2018, in an area of more than 105 square kilometres (41 square miles). The City of Paris is the centre and seat of government of the region and province of Île-de-France, or Paris Region, which has an estimated population of 12,174,880, or about 18 percent of the population of France as of 2017.") +## [{ "summary_text": " Paris is the capital and most populous city of France..." }] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer summarization models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +const inputs = + "Paris is the capital and most populous city of France, with an estimated population of 2,175,601 residents as of 2018, in an area of more than 105 square kilometres (41 square miles). The City of Paris is the centre and seat of government of the region and province of Île-de-France, or Paris Region, which has an estimated population of 12,174,880, or about 18 percent of the population of France as of 2017."; + +await inference.summarization({ + model: "sshleifer/distilbart-cnn-12-6", + inputs, +}); +``` + +## Useful Resources + +Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful! + +- [Course Chapter on Summarization](https://huggingface.co/course/chapter7/5?fw=pt) +- [Distributed Training: Train BART/T5 for Summarization using 🤗 Transformers and Amazon SageMaker](https://huggingface.co/blog/sagemaker-distributed-training-seq2seq) + +### Notebooks + +- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/summarization.ipynb) +- [TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/summarization-tf.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/summarization) +- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/summarization) +- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/summarization) + +### Documentation + +- [Summarization task guide](https://huggingface.co/docs/transformers/tasks/summarization) diff --git a/packages/tasks/src/summarization/data.ts b/packages/tasks/src/summarization/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..d0afc85282a6ec9732b9ae56487a4355ca7872c9 --- /dev/null +++ b/packages/tasks/src/summarization/data.ts @@ -0,0 +1,75 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: + "News articles in five different languages along with their summaries. Widely used for benchmarking multilingual summarization models.", + id: "mlsum", + }, + { + description: "English conversations and their summaries. Useful for benchmarking conversational agents.", + id: "samsum", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: + "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. It was the first structure to reach a height of 300 metres. Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.", + type: "text", + }, + ], + outputs: [ + { + label: "Output", + content: + "The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building. It was the first structure to reach a height of 300 metres.", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "The generated sequence is compared against its summary, and the overlap of tokens are counted. ROUGE-N refers to overlap of N subsequent tokens, ROUGE-1 refers to overlap of single tokens and ROUGE-2 is the overlap of two subsequent tokens.", + id: "rouge", + }, + ], + models: [ + { + description: + "A strong summarization model trained on English news articles. Excels at generating factual summaries.", + id: "facebook/bart-large-cnn", + }, + { + description: "A summarization model trained on medical articles.", + id: "google/bigbird-pegasus-large-pubmed", + }, + ], + spaces: [ + { + description: "An application that can summarize long paragraphs.", + id: "pszemraj/summarize-long-text", + }, + { + description: "A much needed summarization application for terms and conditions.", + id: "ml6team/distilbart-tos-summarizer-tosdr", + }, + { + description: "An application that summarizes long documents.", + id: "pszemraj/document-summarization", + }, + { + description: "An application that can detect errors in abstractive summarization.", + id: "ml6team/post-processing-summarization", + }, + ], + summary: + "Summarization is the task of producing a shorter version of a document while preserving its important information. Some models can extract text from the original input, while other models can generate entirely new text.", + widgetModels: ["sshleifer/distilbart-cnn-12-6"], + youtubeId: "yHnr5Dk2zCI", +}; + +export default taskData; diff --git a/packages/tasks/src/table-question-answering/about.md b/packages/tasks/src/table-question-answering/about.md new file mode 100644 index 0000000000000000000000000000000000000000..684c85c22e3f196b1801ba6fcd0868b1f7ccb77b --- /dev/null +++ b/packages/tasks/src/table-question-answering/about.md @@ -0,0 +1,43 @@ +## Use Cases + +### SQL execution + +You can use the Table Question Answering models to simulate SQL execution by inputting a table. + +### Table Question Answering + +Table Question Answering models are capable of answering questions based on a table. + +## Task Variants + +This place can be filled with variants of this task if there's any. + +## Inference + +You can infer with TableQA models using the 🤗 Transformers library. + +```python +from transformers import pipeline +import pandas as pd + +# prepare table + question +data = {"Actors": ["Brad Pitt", "Leonardo Di Caprio", "George Clooney"], "Number of movies": ["87", "53", "69"]} +table = pd.DataFrame.from_dict(data) +question = "how many movies does Leonardo Di Caprio have?" + +# pipeline model +# Note: you must to install torch-scatter first. +tqa = pipeline(task="table-question-answering", model="google/tapas-large-finetuned-wtq") + +# result + +print(tqa(table=table, query=query)['cells'][0]) +#53 + +``` + +## Useful Resources + +In this area, you can insert useful resources about how to train or use a model for this task. + +This task page is complete thanks to the efforts of [Hao Kim Tieu](https://huggingface.co/haotieu). 🦸 diff --git a/packages/tasks/src/table-question-answering/data.ts b/packages/tasks/src/table-question-answering/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..6ad9fd0afbe82d4ef0c2454c4fa29b865e62461c --- /dev/null +++ b/packages/tasks/src/table-question-answering/data.ts @@ -0,0 +1,59 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: + "The WikiTableQuestions dataset is a large-scale dataset for the task of question answering on semi-structured tables.", + id: "wikitablequestions", + }, + { + description: + "WikiSQL is a dataset of 80654 hand-annotated examples of questions and SQL queries distributed across 24241 tables from Wikipedia.", + id: "wikisql", + }, + ], + demo: { + inputs: [ + { + table: [ + ["Rank", "Name", "No.of reigns", "Combined days"], + ["1", "lou Thesz", "3", "3749"], + ["2", "Ric Flair", "8", "3103"], + ["3", "Harley Race", "7", "1799"], + ], + type: "tabular", + }, + + { label: "Question", content: "What is the number of reigns for Harley Race?", type: "text" }, + ], + outputs: [{ label: "Result", content: "7", type: "text" }], + }, + metrics: [ + { + description: "Checks whether the predicted answer(s) is the same as the ground-truth answer(s).", + id: "Denotation Accuracy", + }, + ], + models: [ + { + description: + "A table question answering model that is capable of neural SQL execution, i.e., employ TAPEX to execute a SQL query on a given table.", + id: "microsoft/tapex-base", + }, + { + description: "A robust table question answering model.", + id: "google/tapas-base-finetuned-wtq", + }, + ], + spaces: [ + { + description: "An application that answers questions based on table CSV files.", + id: "katanaml/table-query", + }, + ], + summary: "Table Question Answering (Table QA) is the answering a question about an information on a given table.", + widgetModels: ["google/tapas-base-finetuned-wtq"], +}; + +export default taskData; diff --git a/packages/tasks/src/tabular-classification/about.md b/packages/tasks/src/tabular-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..9af38bceec2a4c49842dac8926950359485619b4 --- /dev/null +++ b/packages/tasks/src/tabular-classification/about.md @@ -0,0 +1,65 @@ +## About the Task + +Tabular classification is the task of assigning a label or class given a limited number of attributes. For example, the input can be data related to a customer (balance of the customer, the time being a customer, or more) and the output can be whether the customer will churn from the service or not. +There are three types of categorical variables: + +- Binary variables: Variables that can take two values, like yes or no, open or closed. The task of predicting binary variables is called binary classification. +- Ordinal variables: Variables with a ranking relationship, e.g., good, insignificant, and bad product reviews. The task of predicting ordinal variables is called ordinal classification. +- Nominal variables: Variables with no ranking relationship among them, e.g., predicting an animal from their weight and height, where categories are cat, dog, or bird. The task of predicting nominal variables is called multinomial classification. + +## Use Cases + +### Fraud Detection +Tabular classification models can be used in detecting fraudulent credit card transactions, where the features could be the amount of the transaction and the account balance, and the target to predict could be whether the transaction is fraudulent or not. This is an example of binary classification. + +### Churn Prediction +Tabular classification models can be used in predicting customer churn in telecommunication. An example dataset for the task is hosted [here](https://huggingface.co/datasets/scikit-learn/churn-prediction). + +# Model Hosting and Inference + +You can use [skops](https://skops.readthedocs.io/) for model hosting and inference on the Hugging Face Hub. This library is built to improve production workflows of various libraries that are used to train tabular models, including [sklearn](https://scikit-learn.org/stable/) and [xgboost](https://xgboost.readthedocs.io/en/stable/). Using `skops` you can: + +- Easily use inference API, +- Build neat UIs with one line of code, +- Programmatically create model cards, +- Securely serialize your scikit-learn model. (See limitations of using pickle [here](https://huggingface.co/docs/hub/security-pickle).) + +You can push your model as follows: + +```python +from skops import hub_utils +# initialize a repository with a trained model +local_repo = "/path_to_new_repo" +hub_utils.init(model, dst=local_repo) +# push to Hub! +hub_utils.push("username/my-awesome-model", source=local_repo) +``` + +Once the model is pushed, you can infer easily. + +```python +import skops.hub_utils as hub_utils +import pandas as pd +data = pd.DataFrame(your_data) +# Load the model from the Hub +res = hub_utils.get_model_output("username/my-awesome-model", data) +``` + +You can launch a UI for your model with only one line of code! + +```python +import gradio as gr +gr.Interface.load("huggingface/username/my-awesome-model").launch() +``` + +## Useful Resources + +- Check out the [scikit-learn organization](https://huggingface.co/scikit-learn) to learn more about different algorithms used for this task. +- [Skops documentation](https://skops.readthedocs.io/en/latest/) +- [Skops announcement blog](https://huggingface.co/blog/skops) +- [Notebook: Persisting your scikit-learn model using skops](https://www.kaggle.com/code/unofficialmerve/persisting-your-scikit-learn-model-using-skops) +- Check out [interactive sklearn examples](https://huggingface.co/sklearn-docs) built with ❤️ using Gradio. + +### Training your own model in just a few seconds + +We have built a [baseline trainer](https://huggingface.co/spaces/scikit-learn/baseline-trainer) application to which you can drag and drop your dataset. It will train a baseline and push it to your Hugging Face Hub profile with a model card containing information about the model. diff --git a/packages/tasks/src/tabular-classification/data.ts b/packages/tasks/src/tabular-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..5cc3f4b9a98066d9df0c62c8d284f70dfa883db8 --- /dev/null +++ b/packages/tasks/src/tabular-classification/data.ts @@ -0,0 +1,68 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A comprehensive curation of datasets covering all benchmarks.", + id: "inria-soda/tabular-benchmark", + }, + ], + demo: { + inputs: [ + { + table: [ + ["Glucose", "Blood Pressure ", "Skin Thickness", "Insulin", "BMI"], + ["148", "72", "35", "0", "33.6"], + ["150", "50", "30", "0", "35.1"], + ["141", "60", "29", "1", "39.2"], + ], + type: "tabular", + }, + ], + outputs: [ + { + table: [["Diabetes"], ["1"], ["1"], ["0"]], + type: "tabular", + }, + ], + }, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: "", + id: "recall", + }, + { + description: "", + id: "precision", + }, + { + description: "", + id: "f1", + }, + ], + models: [ + { + description: "Breast cancer prediction model based on decision trees.", + id: "scikit-learn/cancer-prediction-trees", + }, + ], + spaces: [ + { + description: "An application that can predict defective products on a production line.", + id: "scikit-learn/tabular-playground", + }, + { + description: "An application that compares various tabular classification techniques on different datasets.", + id: "scikit-learn/classification", + }, + ], + summary: "Tabular classification is the task of classifying a target category (a group) based on set of attributes.", + widgetModels: ["scikit-learn/tabular-playground"], + youtubeId: "", +}; + +export default taskData; diff --git a/packages/tasks/src/tabular-regression/about.md b/packages/tasks/src/tabular-regression/about.md new file mode 100644 index 0000000000000000000000000000000000000000..eb8861779047fe6073dbbf5baeca7655adddb66f --- /dev/null +++ b/packages/tasks/src/tabular-regression/about.md @@ -0,0 +1,87 @@ +## About the Task + +Tabular regression is the task of predicting a numerical value given a set of attributes/features. _Tabular_ meaning that data is stored in a table (like an excel sheet), and each sample is contained in its own row. The features used to predict our target can be both numerical and categorical. However, including categorical features often requires additional preprocessing/feature engineering (a few models do accept categorical features directly, like [CatBoost](https://catboost.ai/)). An example of tabular regression would be predicting the weight of a fish given its' species and length. + +## Use Cases + +### Sales Prediction: a Use Case for Predicting a Continuous Target Variable + +Here the objective is to predict a continuous variable based on a set of input variable(s). For example, predicting `sales` of an ice cream shop based on `temperature` of weather and `duration of hours` shop was open. Here we can build a regression model with `temperature` and `duration of hours` as input variable and `sales` as target variable. + +### Missing Value Imputation for Other Tabular Tasks +In real-world applications, due to human error or other reasons, some of the input values can be missing or there might not be any recorded data. Considering the example above, say the shopkeeper's watch was broken and they forgot to calculate the `hours` for which the shop was open. This will lead to a missing value in their dataset. In this case, missing values could be replaced it with zero, or average hours for which the shop is kept open. Another approach we can try is to use `temperature` and `sales` variables to predict the `hours` variable here. + +## Model Training + +A simple regression model can be created using `sklearn` as follows: + +```python +#set the input features +X = data[["Feature 1", "Feature 2", "Feature 3"]] +#set the target variable +y = data["Target Variable"] +#initialize the model +model = LinearRegression() +#Fit the model +model.fit(X, y) +``` + +# Model Hosting and Inference + +You can use [skops](https://skops.readthedocs.io/) for model hosting and inference on the Hugging Face Hub. This library is built to improve production workflows of various libraries that are used to train tabular models, including [sklearn](https://scikit-learn.org/stable/) and [xgboost](https://xgboost.readthedocs.io/en/stable/). Using `skops` you can: + +- Easily use inference API, +- Build neat UIs with one line of code, +- Programmatically create model cards, +- Securely serialize your models. (See limitations of using pickle [here](https://huggingface.co/docs/hub/security-pickle).) + +You can push your model as follows: + +```python +from skops import hub_utils +# initialize a repository with a trained model +local_repo = "/path_to_new_repo" +hub_utils.init(model, dst=local_repo) +# push to Hub! +hub_utils.push("username/my-awesome-model", source=local_repo) +``` + +Once the model is pushed, you can infer easily. + +```python +import skops.hub_utils as hub_utils +import pandas as pd +data = pd.DataFrame(your_data) +# Load the model from the Hub +res = hub_utils.get_model_output("username/my-awesome-model", data) +``` + +You can launch a UI for your model with only one line of code! + +```python +import gradio as gr +gr.Interface.load("huggingface/username/my-awesome-model").launch() +``` + +## Useful Resources + +- [Skops documentation](https://skops.readthedocs.io/en/stable/index.html) + +- Check out [interactive sklearn examples](https://huggingface.co/sklearn-docs) built with ❤️ using Gradio. +- [Notebook: Persisting your scikit-learn model using skops](https://www.kaggle.com/code/unofficialmerve/persisting-your-scikit-learn-model-using-skops) + +- For starting with tabular regression: + + - Doing [Exploratory Data Analysis](https://neptune.ai/blog/exploratory-data-analysis-for-tabular-data) for tabular data. + - The data considered here consists of details of Olympic athletes and medal results from Athens 1896 to Rio 2016. + - Here you can learn more about how to explore and analyse the data and visualize them in order to get a better understanding of dataset. + - Building your [first ML model](https://www.kaggle.com/code/dansbecker/your-first-machine-learning-model). + +- Intermediate level tutorials on tabular regression: + - [A Short Chronology of Deep Learning for Tabular Data](https://sebastianraschka.com/blog/2022/deep-learning-for-tabular-data.html) by Sebastian Raschka. + +### Training your own model in just a few seconds + +We have built a [baseline trainer](https://huggingface.co/spaces/scikit-learn/baseline-trainer) application to which you can drag and drop your dataset. It will train a baseline and push it to your Hugging Face Hub profile with a model card containing information about the model. + +This page was made possible thanks to efforts of [Brenden Connors](https://huggingface.co/brendenc) and [Ayush Bihani](https://huggingface.co/hsuyab). diff --git a/packages/tasks/src/tabular-regression/data.ts b/packages/tasks/src/tabular-regression/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..c5b78753861505113a4a5eab8ab31939dbcf4458 --- /dev/null +++ b/packages/tasks/src/tabular-regression/data.ts @@ -0,0 +1,57 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A comprehensive curation of datasets covering all benchmarks.", + id: "inria-soda/tabular-benchmark", + }, + ], + demo: { + inputs: [ + { + table: [ + ["Car Name", "Horsepower", "Weight"], + ["ford torino", "140", "3,449"], + ["amc hornet", "97", "2,774"], + ["toyota corolla", "65", "1,773"], + ], + type: "tabular", + }, + ], + outputs: [ + { + table: [["MPG (miles per gallon)"], ["17"], ["18"], ["31"]], + type: "tabular", + }, + ], + }, + metrics: [ + { + description: "", + id: "mse", + }, + { + description: + "Coefficient of determination (or R-squared) is a measure of how well the model fits the data. Higher R-squared is considered a better fit.", + id: "r-squared", + }, + ], + models: [ + { + description: "Fish weight prediction based on length measurements and species.", + id: "scikit-learn/Fish-Weight", + }, + ], + spaces: [ + { + description: "An application that can predict weight of a fish based on set of attributes.", + id: "scikit-learn/fish-weight-prediction", + }, + ], + summary: "Tabular regression is the task of predicting a numerical value given a set of attributes.", + widgetModels: ["scikit-learn/Fish-Weight"], + youtubeId: "", +}; + +export default taskData; diff --git a/packages/tasks/src/tasksData.ts b/packages/tasks/src/tasksData.ts new file mode 100644 index 0000000000000000000000000000000000000000..db2609971100e91d25ba68a075e3ce48f88c787f --- /dev/null +++ b/packages/tasks/src/tasksData.ts @@ -0,0 +1,101 @@ +import { type PipelineType, PIPELINE_DATA } from "./pipelines"; +import type { TaskDataCustom, TaskData } from "./Types"; + +import audioClassification from "./audio-classification/data"; +import audioToAudio from "./audio-to-audio/data"; +import automaticSpeechRecognition from "./automatic-speech-recognition/data"; +import conversational from "./conversational/data"; +import documentQuestionAnswering from "./document-question-answering/data"; +import featureExtraction from "./feature-extraction/data"; +import fillMask from "./fill-mask/data"; +import imageClassification from "./image-classification/data"; +import imageToImage from "./image-to-image/data"; +import imageToText from "./image-to-text/data"; +import imageSegmentation from "./image-segmentation/data"; +import objectDetection from "./object-detection/data"; +import depthEstimation from "./depth-estimation/data"; +import placeholder from "./placeholder/data"; +import reinforcementLearning from "./reinforcement-learning/data"; +import questionAnswering from "./question-answering/data"; +import sentenceSimilarity from "./sentence-similarity/data"; +import summarization from "./summarization/data"; +import tableQuestionAnswering from "./table-question-answering/data"; +import tabularClassification from "./tabular-classification/data"; +import tabularRegression from "./tabular-regression/data"; +import textToImage from "./text-to-image/data"; +import textToSpeech from "./text-to-speech/data"; +import tokenClassification from "./token-classification/data"; +import translation from "./translation/data"; +import textClassification from "./text-classification/data"; +import textGeneration from "./text-generation/data"; +import textToVideo from "./text-to-video/data"; +import unconditionalImageGeneration from "./unconditional-image-generation/data"; +import videoClassification from "./video-classification/data"; +import visualQuestionAnswering from "./visual-question-answering/data"; +import zeroShotClassification from "./zero-shot-classification/data"; +import zeroShotImageClassification from "./zero-shot-image-classification/data"; +import { TASKS_MODEL_LIBRARIES } from "./const"; + +// To make comparisons easier, task order is the same as in const.ts +// Tasks set to undefined won't have an associated task page. +// Tasks that call getData() without the second argument will +// have a "placeholder" page. +export const TASKS_DATA: Record = { + "audio-classification": getData("audio-classification", audioClassification), + "audio-to-audio": getData("audio-to-audio", audioToAudio), + "automatic-speech-recognition": getData("automatic-speech-recognition", automaticSpeechRecognition), + conversational: getData("conversational", conversational), + "depth-estimation": getData("depth-estimation", depthEstimation), + "document-question-answering": getData("document-question-answering", documentQuestionAnswering), + "feature-extraction": getData("feature-extraction", featureExtraction), + "fill-mask": getData("fill-mask", fillMask), + "graph-ml": undefined, + "image-classification": getData("image-classification", imageClassification), + "image-segmentation": getData("image-segmentation", imageSegmentation), + "image-to-image": getData("image-to-image", imageToImage), + "image-to-text": getData("image-to-text", imageToText), + "multiple-choice": undefined, + "object-detection": getData("object-detection", objectDetection), + "video-classification": getData("video-classification", videoClassification), + other: undefined, + "question-answering": getData("question-answering", questionAnswering), + "reinforcement-learning": getData("reinforcement-learning", reinforcementLearning), + robotics: undefined, + "sentence-similarity": getData("sentence-similarity", sentenceSimilarity), + summarization: getData("summarization", summarization), + "table-question-answering": getData("table-question-answering", tableQuestionAnswering), + "table-to-text": undefined, + "tabular-classification": getData("tabular-classification", tabularClassification), + "tabular-regression": getData("tabular-regression", tabularRegression), + "tabular-to-text": undefined, + "text-classification": getData("text-classification", textClassification), + "text-generation": getData("text-generation", textGeneration), + "text-retrieval": undefined, + "text-to-image": getData("text-to-image", textToImage), + "text-to-speech": getData("text-to-speech", textToSpeech), + "text-to-audio": undefined, + "text-to-video": getData("text-to-video", textToVideo), + "text2text-generation": undefined, + "time-series-forecasting": undefined, + "token-classification": getData("token-classification", tokenClassification), + translation: getData("translation", translation), + "unconditional-image-generation": getData("unconditional-image-generation", unconditionalImageGeneration), + "visual-question-answering": getData("visual-question-answering", visualQuestionAnswering), + "voice-activity-detection": undefined, + "zero-shot-classification": getData("zero-shot-classification", zeroShotClassification), + "zero-shot-image-classification": getData("zero-shot-image-classification", zeroShotImageClassification), +} as const; + +/** + * Return the whole TaskData object for a certain task. + * If the partialTaskData argument is left undefined, + * the default placholder data will be used. + */ +function getData(type: PipelineType, partialTaskData: TaskDataCustom = placeholder): TaskData { + return { + ...partialTaskData, + id: type, + label: PIPELINE_DATA[type].name, + libraries: TASKS_MODEL_LIBRARIES[type], + }; +} diff --git a/packages/tasks/src/text-classification/about.md b/packages/tasks/src/text-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..448eb7138209974bbb5d282fb573f1c8197e38f3 --- /dev/null +++ b/packages/tasks/src/text-classification/about.md @@ -0,0 +1,172 @@ +## Use Cases + +### Sentiment Analysis on Customer Reviews + +You can track the sentiments of your customers from the product reviews using sentiment analysis models. This can help understand churn and retention by grouping reviews by sentiment, to later analyze the text and make strategic decisions based on this knowledge. + +## Task Variants + +### Natural Language Inference (NLI) + +In NLI the model determines the relationship between two given texts. Concretely, the model takes a premise and a hypothesis and returns a class that can either be: + +- **entailment**, which means the hypothesis is true. +- **contraction**, which means the hypothesis is false. +- **neutral**, which means there's no relation between the hypothesis and the premise. + +The benchmark dataset for this task is GLUE (General Language Understanding Evaluation). NLI models have different variants, such as Multi-Genre NLI, Question NLI and Winograd NLI. + +### Multi-Genre NLI (MNLI) + +MNLI is used for general NLI. Here are som examples: + +``` +Example 1: + Premise: A man inspects the uniform of a figure in some East Asian country. + Hypothesis: The man is sleeping. + Label: Contradiction + +Example 2: + Premise: Soccer game with multiple males playing. + Hypothesis: Some men are playing a sport. + Label: Entailment +``` + +#### Inference + +You can use the 🤗 Transformers library `text-classification` pipeline to infer with NLI models. + +```python +from transformers import pipeline + +classifier = pipeline("text-classification", model = "roberta-large-mnli") +classifier("A soccer game with multiple males playing. Some men are playing a sport.") +## [{'label': 'ENTAILMENT', 'score': 0.98}] +``` + +### Question Natural Language Inference (QNLI) + +QNLI is the task of determining if the answer to a certain question can be found in a given document. If the answer can be found the label is “entailment”. If the answer cannot be found the label is “not entailment". + +``` +Question: What percentage of marine life died during the extinction? +Sentence: It is also known as the “Great Dying” because it is considered the largest mass extinction in the Earth’s history. +Label: not entailment + +Question: Who was the London Weekend Television’s Managing Director? +Sentence: The managing director of London Weekend Television (LWT), Greg Dyke, met with the representatives of the "big five" football clubs in England in 1990. +Label: entailment +``` + +#### Inference + +You can use the 🤗 Transformers library `text-classification` pipeline to infer with QNLI models. The model returns the label and the confidence. + +```python +from transformers import pipeline + +classifier = pipeline("text-classification", model = "cross-encoder/qnli-electra-base") +classifier("Where is the capital of France?, Paris is the capital of France.") +## [{'label': 'entailment', 'score': 0.997}] +``` + +### Sentiment Analysis + +In Sentiment Analysis, the classes can be polarities like positive, negative, neutral, or sentiments such as happiness or anger. + +#### Inference + +You can use the 🤗 Transformers library with the `sentiment-analysis` pipeline to infer with Sentiment Analysis models. The model returns the label with the score. + +```python +from transformers import pipeline + +classifier = pipeline("sentiment-analysis") +classifier("I loved Star Wars so much!") +## [{'label': 'POSITIVE', 'score': 0.99} +``` + +### Quora Question Pairs + +Quora Question Pairs models assess whether two provided questions are paraphrases of each other. The model takes two questions and returns a binary value, with 0 being mapped to “not paraphrase” and 1 to “paraphrase". The benchmark dataset is [Quora Question Pairs](https://huggingface.co/datasets/glue/viewer/qqp/test) inside the [GLUE benchmark](https://huggingface.co/datasets/glue). The dataset consists of question pairs and their labels. + +``` +Question1: “How can I increase the speed of my internet connection while using a VPN?” +Question2: How can Internet speed be increased by hacking through DNS? +Label: Not paraphrase + +Question1: “What can make Physics easy to learn?” +Question2: “How can you make physics easy to learn?” +Label: Paraphrase +``` + +#### Inference + +You can use the 🤗 Transformers library `text-classification` pipeline to infer with QQPI models. + +```python +from transformers import pipeline + +classifier = pipeline("text-classification", model = "textattack/bert-base-uncased-QQP") +classifier("Which city is the capital of France?, Where is the capital of France?") +## [{'label': 'paraphrase', 'score': 0.998}] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer text classification models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.conversational({ + model: "distilbert-base-uncased-finetuned-sst-2-english", + inputs: "I love this movie!", +}); +``` + +### Grammatical Correctness + +Linguistic Acceptability is the task of assessing the grammatical acceptability of a sentence. The classes in this task are “acceptable” and “unacceptable”. The benchmark dataset used for this task is [Corpus of Linguistic Acceptability (CoLA)](https://huggingface.co/datasets/glue/viewer/cola/test). The dataset consists of texts and their labels. + +``` +Example: Books were sent to each other by the students. +Label: Unacceptable + +Example: She voted for herself. +Label: Acceptable. +``` + +#### Inference + +```python +from transformers import pipeline + +classifier = pipeline("text-classification", model = "textattack/distilbert-base-uncased-CoLA") +classifier("I will walk to home when I went through the bus.") +## [{'label': 'unacceptable', 'score': 0.95}] +``` + +## Useful Resources + +Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful! + +- [Course Chapter on Fine-tuning a Text Classification Model](https://huggingface.co/course/chapter3/1?fw=pt) +- [Getting Started with Sentiment Analysis using Python](https://huggingface.co/blog/sentiment-analysis-python) +- [Sentiment Analysis on Encrypted Data with Homomorphic Encryption](https://huggingface.co/blog/sentiment-analysis-fhe) +- [Leveraging Hugging Face for complex text classification use cases](https://huggingface.co/blog/classification-use-cases) + +### Notebooks + +- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/text_classification.ipynb) +- [TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/text_classification-tf.ipynb) +- [Flax](https://github.com/huggingface/notebooks/blob/master/examples/text_classification_flax.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-classification) +- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/text-classification) +- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/text-classification) + +### Documentation + +- [Text classification task guide](https://huggingface.co/docs/transformers/tasks/sequence_classification) diff --git a/packages/tasks/src/text-classification/data.ts b/packages/tasks/src/text-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..7893753caae5de3036692e32b8135a47f139924b --- /dev/null +++ b/packages/tasks/src/text-classification/data.ts @@ -0,0 +1,91 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A widely used dataset used to benchmark multiple variants of text classification.", + id: "glue", + }, + { + description: "A text classification dataset used to benchmark natural language inference models", + id: "snli", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "I love Hugging Face!", + type: "text", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "POSITIVE", + score: 0.9, + }, + { + label: "NEUTRAL", + score: 0.1, + }, + { + label: "NEGATIVE", + score: 0.0, + }, + ], + }, + ], + }, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: "", + id: "recall", + }, + { + description: "", + id: "precision", + }, + { + description: + "The F1 metric is the harmonic mean of the precision and recall. It can be calculated as: F1 = 2 * (precision * recall) / (precision + recall)", + id: "f1", + }, + ], + models: [ + { + description: "A robust model trained for sentiment analysis.", + id: "distilbert-base-uncased-finetuned-sst-2-english", + }, + { + description: "Multi-genre natural language inference model.", + id: "roberta-large-mnli", + }, + ], + spaces: [ + { + description: "An application that can classify financial sentiment.", + id: "IoannisTr/Tech_Stocks_Trading_Assistant", + }, + { + description: "A dashboard that contains various text classification tasks.", + id: "miesnerjacob/Multi-task-NLP", + }, + { + description: "An application that analyzes user reviews in healthcare.", + id: "spacy/healthsea-demo", + }, + ], + summary: + "Text Classification is the task of assigning a label or class to a given text. Some use cases are sentiment analysis, natural language inference, and assessing grammatical correctness.", + widgetModels: ["distilbert-base-uncased-finetuned-sst-2-english"], + youtubeId: "leNG9fN9FQU", +}; + +export default taskData; diff --git a/packages/tasks/src/text-generation/about.md b/packages/tasks/src/text-generation/about.md new file mode 100644 index 0000000000000000000000000000000000000000..c8ed8120f65903a669af705beddcdb00ea814422 --- /dev/null +++ b/packages/tasks/src/text-generation/about.md @@ -0,0 +1,129 @@ +This task covers guides on both [text-generation](https://huggingface.co/models?pipeline_tag=text-generation&sort=downloads) and [text-to-text generation](https://huggingface.co/models?pipeline_tag=text2text-generation&sort=downloads) models. Popular large language models that are used for chats or following instructions are also covered in this task. You can find the list of selected open-source large language models [here](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard), ranked by their performance scores. + +## Use Cases + +### Instruction Models + +A model trained for text generation can be later adapted to follow instructions. One of the most used open-source models for instruction is OpenAssistant, which you can try [at Hugging Chat](https://huggingface.co/chat). + +### Code Generation + +A Text Generation model, also known as a causal language model, can be trained on code from scratch to help the programmers in their repetitive coding tasks. One of the most popular open-source models for code generation is StarCoder, which can generate code in 80+ languages. You can try it [here](https://huggingface.co/spaces/bigcode/bigcode-playground). + +### Stories Generation + +A story generation model can receive an input like "Once upon a time" and proceed to create a story-like text based on those first words. You can try [this application](https://huggingface.co/spaces/mosaicml/mpt-7b-storywriter) which contains a model trained on story generation, by MosaicML. + +If your generative model training data is different than your use case, you can train a causal language model from scratch. Learn how to do it in the free transformers [course](https://huggingface.co/course/chapter7/6?fw=pt)! + +## Task Variants + +### Completion Generation Models + +A popular variant of Text Generation models predicts the next word given a bunch of words. Word by word a longer text is formed that results in for example: + +- Given an incomplete sentence, complete it. +- Continue a story given the first sentences. +- Provided a code description, generate the code. + +The most popular models for this task are GPT-based models or [Llama series](https://huggingface.co/meta-llama/Llama-2-7b-chat-hf). These models are trained on data that has no labels, so you just need plain text to train your own model. You can train text generation models to generate a wide variety of documents, from code to stories. + +### Text-to-Text Generation Models + +These models are trained to learn the mapping between a pair of texts (e.g. translation from one language to another). The most popular variants of these models are [FLAN-T5](https://huggingface.co/google/flan-t5-xxl), and [BART](https://huggingface.co/docs/transformers/model_doc/bart). Text-to-Text models are trained with multi-tasking capabilities, they can accomplish a wide range of tasks, including summarization, translation, and text classification. + +## Inference + +You can use the 🤗 Transformers library `text-generation` pipeline to do inference with Text Generation models. It takes an incomplete text and returns multiple outputs with which the text can be completed. + +```python +from transformers import pipeline +generator = pipeline('text-generation', model = 'gpt2') +generator("Hello, I'm a language model", max_length = 30, num_return_sequences=3) +## [{'generated_text': "Hello, I'm a language modeler. So while writing this, when I went out to meet my wife or come home she told me that my"}, +## {'generated_text': "Hello, I'm a language modeler. I write and maintain software in Python. I love to code, and that includes coding things that require writing"}, ... +``` + +[Text-to-Text generation models](https://huggingface.co/models?pipeline_tag=text2text-generation&sort=downloads) have a separate pipeline called `text2text-generation`. This pipeline takes an input containing the sentence including the task and returns the output of the accomplished task. + +```python +from transformers import pipeline + +text2text_generator = pipeline("text2text-generation") +text2text_generator("question: What is 42 ? context: 42 is the answer to life, the universe and everything") +[{'generated_text': 'the answer to life, the universe and everything'}] + +text2text_generator("translate from English to French: I'm very happy") +[{'generated_text': 'Je suis très heureux'}] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer text classification models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.conversational({ + model: "distilbert-base-uncased-finetuned-sst-2-english", + inputs: "I love this movie!", +}); +``` + +## Text Generation Inference + +[Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) is an open-source toolkit for serving LLMs tackling challenges such as response time. TGI powers inference solutions like [Inference Endpoints](https://huggingface.co/inference-endpoints) and [Hugging Chat](https://huggingface.co/chat/), as well as multiple community projects. You can use it to deploy any supported open-source large language model of your choice. + +## ChatUI Spaces + +Hugging Face Spaces includes templates to easily deploy your own instance of a specific application. [ChatUI](https://github.com/huggingface/chat-ui) is an open-source interface that enables serving conversational interface for large language models and can be deployed with few clicks at Spaces. TGI powers these Spaces under the hood for faster inference. Thanks to the template, you can deploy your own instance based on a large language model with only a few clicks and customize it. Learn more about it [here](https://huggingface.co/docs/hub/spaces-sdks-docker-chatui) and create your large language model instance [here](https://huggingface.co/new-space?template=huggingchat/chat-ui-template). + +![ChatUI](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/blog/os_llms/docker_chat.png) + +## Useful Resources + +Would you like to learn more about the topic? Awesome! Here you can find some curated resources that you may find helpful! + +### Tools within Hugging Face Ecosystem + +- You can use [PEFT](https://github.com/huggingface/peft) to adapt large language models in efficient way. +- [ChatUI](https://github.com/huggingface/chat-ui) is the open-source interface to conversate with Large Language Models. +- [text-generation-inferface](https://github.com/huggingface/text-generation-inference) +- [HuggingChat](https://huggingface.co/chat/) is a chat interface powered by Hugging Face to chat with powerful models like Llama 2 70B. + +### Documentation + +- [PEFT documentation](https://huggingface.co/docs/peft/index) +- [ChatUI Docker Spaces](https://huggingface.co/docs/hub/spaces-sdks-docker-chatui) +- [Causal language modeling task guide](https://huggingface.co/docs/transformers/tasks/language_modeling) +- [Text generation strategies](https://huggingface.co/docs/transformers/generation_strategies) + +### Course and Blogs + +- [Course Chapter on Training a causal language model from scratch](https://huggingface.co/course/chapter7/6?fw=pt) +- [TO Discussion with Victor Sanh](https://www.youtube.com/watch?v=Oy49SCW_Xpw&ab_channel=HuggingFace) +- [Hugging Face Course Workshops: Pretraining Language Models & CodeParrot](https://www.youtube.com/watch?v=ExUR7w6xe94&ab_channel=HuggingFace) +- [Training CodeParrot 🦜 from Scratch](https://huggingface.co/blog/codeparrot) +- [How to generate text: using different decoding methods for language generation with Transformers](https://huggingface.co/blog/how-to-generate) +- [Guiding Text Generation with Constrained Beam Search in 🤗 Transformers](https://huggingface.co/blog/constrained-beam-search) +- [Code generation with Hugging Face](https://huggingface.co/spaces/codeparrot/code-generation-models) +- [🌸 Introducing The World's Largest Open Multilingual Language Model: BLOOM 🌸](https://huggingface.co/blog/bloom) +- [The Technology Behind BLOOM Training](https://huggingface.co/blog/bloom-megatron-deepspeed) +- [Faster Text Generation with TensorFlow and XLA](https://huggingface.co/blog/tf-xla-generate) +- [Assisted Generation: a new direction toward low-latency text generation](https://huggingface.co/blog/assisted-generation) +- [Introducing RWKV - An RNN with the advantages of a transformer](https://huggingface.co/blog/rwkv) +- [Creating a Coding Assistant with StarCoder](https://huggingface.co/blog/starchat-alpha) +- [StarCoder: A State-of-the-Art LLM for Code](https://huggingface.co/blog/starcoder) +- [Open-Source Text Generation & LLM Ecosystem at Hugging Face](https://huggingface.co/blog/os-llms) +- [Llama 2 is at Hugging Face](https://huggingface.co/blog/llama2) + +### Notebooks + +- [Training a CLM in Flax](https://github.com/huggingface/notebooks/blob/master/examples/causal_language_modeling_flax.ipynb) +- [Training a CLM in TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch-tf.ipynb) +- [Training a CLM in PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/language_modeling_from_scratch.ipynb) + +### Scripts for training + +- [Training a CLM in PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/language-modeling) +- [Training a CLM in TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/language-modeling) +- [Text Generation in PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/text-generation) diff --git a/packages/tasks/src/text-generation/data.ts b/packages/tasks/src/text-generation/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..15b83ef0a8ed9a07365e86d67a0c606f07ec97cd --- /dev/null +++ b/packages/tasks/src/text-generation/data.ts @@ -0,0 +1,126 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A large multilingual dataset of text crawled from the web.", + id: "mc4", + }, + { + description: + "Diverse open-source data consisting of 22 smaller high-quality datasets. It was used to train GPT-Neo.", + id: "the_pile", + }, + { + description: "A crowd-sourced instruction dataset to develop an AI assistant.", + id: "OpenAssistant/oasst1", + }, + { + description: "A crowd-sourced instruction dataset created by Databricks employees.", + id: "databricks/databricks-dolly-15k", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "Once upon a time,", + type: "text", + }, + ], + outputs: [ + { + label: "Output", + content: + "Once upon a time, we knew that our ancestors were on the verge of extinction. The great explorers and poets of the Old World, from Alexander the Great to Chaucer, are dead and gone. A good many of our ancient explorers and poets have", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "Cross Entropy is a metric that calculates the difference between two probability distributions. Each probability distribution is the distribution of predicted words", + id: "Cross Entropy", + }, + { + description: + "The Perplexity metric is the exponential of the cross-entropy loss. It evaluates the probabilities assigned to the next word by the model. Lower perplexity indicates better performance", + id: "Perplexity", + }, + ], + models: [ + { + description: "A large language model trained for text generation.", + id: "bigscience/bloom-560m", + }, + { + description: "A large code generation model that can generate code in 80+ languages.", + id: "bigcode/starcoder", + }, + { + description: "A model trained to follow instructions, uses Pythia-12b as base model.", + id: "databricks/dolly-v2-12b", + }, + { + description: "A model trained to follow instructions curated by community, uses Pythia-12b as base model.", + id: "OpenAssistant/oasst-sft-4-pythia-12b-epoch-3.5", + }, + { + description: "A large language model trained to generate text in English.", + id: "stabilityai/stablelm-tuned-alpha-7b", + }, + { + description: "A model trained to follow instructions, based on mosaicml/mpt-7b.", + id: "mosaicml/mpt-7b-instruct", + }, + { + description: "A large language model trained to generate text in English.", + id: "EleutherAI/pythia-12b", + }, + { + description: "A large text-to-text model trained to follow instructions.", + id: "google/flan-ul2", + }, + { + description: "A large and powerful text generation model.", + id: "tiiuae/falcon-40b", + }, + { + description: "State-of-the-art open-source large language model.", + id: "meta-llama/Llama-2-70b-hf", + }, + ], + spaces: [ + { + description: "A robust text generation model that can perform various tasks through natural language prompting.", + id: "huggingface/bloom_demo", + }, + { + description: "An text generation based application that can write code for 80+ languages.", + id: "bigcode/bigcode-playground", + }, + { + description: "An text generation based application for conversations.", + id: "h2oai/h2ogpt-chatbot", + }, + { + description: "An text generation application that combines OpenAI and Hugging Face models.", + id: "microsoft/HuggingGPT", + }, + { + description: "An text generation application that uses StableLM-tuned-alpha-7b.", + id: "stabilityai/stablelm-tuned-alpha-chat", + }, + { + description: "An UI that uses StableLM-tuned-alpha-7b.", + id: "togethercomputer/OpenChatKit", + }, + ], + summary: + "Generating text is the task of producing new text. These models can, for example, fill in incomplete text or paraphrase.", + widgetModels: ["tiiuae/falcon-7b-instruct"], + youtubeId: "Vpjb1lu0MDk", +}; + +export default taskData; diff --git a/packages/tasks/src/text-to-image/about.md b/packages/tasks/src/text-to-image/about.md new file mode 100644 index 0000000000000000000000000000000000000000..e7c79fb43a2483720638c168e9f9cadb9bd9aa5d --- /dev/null +++ b/packages/tasks/src/text-to-image/about.md @@ -0,0 +1,66 @@ +## Use Cases + +### Data Generation + +Businesses can generate data for their their use cases by inputting text and getting image outputs. + +### Immersive Conversational Chatbots + +Chatbots can be made more immersive if they provide contextual images based on the input provided by the user. + +### Creative Ideas for Fashion Industry + +Different patterns can be generated to obtain unique pieces of fashion. Text-to-image models make creations easier for designers to conceptualize their design before actually implementing it. + +### Architecture Industry + +Architects can utilise the models to construct an environment based out on the requirements of the floor plan. This can also include the furniture that has to be placed in that environment. + +## Task Variants + +You can contribute variants of this task [here](https://github.com/huggingface/hub-docs/blob/main/tasks/src/text-to-image/about.md). + +## Inference + +You can use diffusers pipelines to infer with `text-to-image` models. + +```python +from diffusers import StableDiffusionPipeline, EulerDiscreteScheduler + +model_id = "stabilityai/stable-diffusion-2" +scheduler = EulerDiscreteScheduler.from_pretrained(model_id, subfolder="scheduler") +pipe = StableDiffusionPipeline.from_pretrained(model_id, scheduler=scheduler, torch_dtype=torch.float16) +pipe = pipe.to("cuda") + +prompt = "a photo of an astronaut riding a horse on mars" +image = pipe(prompt).images[0] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer text-to-image models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.textToImage({ + model: "stabilityai/stable-diffusion-2", + inputs: "award winning high resolution photo of a giant tortoise/((ladybird)) hybrid, [trending on artstation]", + parameters: { + negative_prompt: "blurry", + }, +}); +``` + +## Useful Resources + +- [Hugging Face Diffusion Models Course](https://github.com/huggingface/diffusion-models-class) +- [Getting Started with Diffusers](https://huggingface.co/docs/diffusers/index) +- [Text-to-Image Generation](https://huggingface.co/docs/diffusers/using-diffusers/conditional_image_generation) +- [MinImagen - Build Your Own Imagen Text-to-Image Model](https://www.assemblyai.com/blog/minimagen-build-your-own-imagen-text-to-image-model/) +- [Using LoRA for Efficient Stable Diffusion Fine-Tuning](https://huggingface.co/blog/lora) +- [Using Stable Diffusion with Core ML on Apple Silicon](https://huggingface.co/blog/diffusers-coreml) +- [A guide on Vector Quantized Diffusion](https://huggingface.co/blog/vq-diffusion) +- [🧨 Stable Diffusion in JAX/Flax](https://huggingface.co/blog/stable_diffusion_jax) +- [Running IF with 🧨 diffusers on a Free Tier Google Colab](https://huggingface.co/blog/if) + +This page was made possible thanks to the efforts of [Ishan Dutta](https://huggingface.co/ishandutta), [Enrique Elias Ubaldo](https://huggingface.co/herrius) and [Oğuz Akif](https://huggingface.co/oguzakif). diff --git a/packages/tasks/src/text-to-image/data.ts b/packages/tasks/src/text-to-image/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..0a6c41ac13abf5149fef865f32fc4afee8f041a1 --- /dev/null +++ b/packages/tasks/src/text-to-image/data.ts @@ -0,0 +1,94 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "RedCaps is a large-scale dataset of 12M image-text pairs collected from Reddit.", + id: "red_caps", + }, + { + description: "Conceptual Captions is a dataset consisting of ~3.3M images annotated with captions.", + id: "conceptual_captions", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "A city above clouds, pastel colors, Victorian style", + type: "text", + }, + ], + outputs: [ + { + filename: "image.jpeg", + type: "img", + }, + ], + }, + metrics: [ + { + description: + "The Inception Score (IS) measure assesses diversity and meaningfulness. It uses a generated image sample to predict its label. A higher score signifies more diverse and meaningful images.", + id: "IS", + }, + { + description: + "The Fréchet Inception Distance (FID) calculates the distance between distributions between synthetic and real samples. A lower FID score indicates better similarity between the distributions of real and generated images.", + id: "FID", + }, + { + description: + "R-precision assesses how the generated image aligns with the provided text description. It uses the generated images as queries to retrieve relevant text descriptions. The top 'r' relevant descriptions are selected and used to calculate R-precision as r/R, where 'R' is the number of ground truth descriptions associated with the generated images. A higher R-precision value indicates a better model.", + id: "R-Precision", + }, + ], + models: [ + { + description: + "A latent text-to-image diffusion model capable of generating photo-realistic images given any text input.", + id: "CompVis/stable-diffusion-v1-4", + }, + { + description: + "A model that can be used to generate images based on text prompts. The DALL·E Mega model is the largest version of DALLE Mini.", + id: "dalle-mini/dalle-mega", + }, + { + description: "A text-to-image model that can generate coherent text inside image.", + id: "DeepFloyd/IF-I-XL-v1.0", + }, + { + description: "A powerful text-to-image model.", + id: "kakaobrain/karlo-v1-alpha", + }, + ], + spaces: [ + { + description: "A powerful text-to-image application.", + id: "stabilityai/stable-diffusion", + }, + { + description: "An text-to-image application that can generate coherent text inside the image.", + id: "DeepFloyd/IF", + }, + { + description: "An powerful text-to-image application that can generate images.", + id: "kakaobrain/karlo", + }, + { + description: "An powerful text-to-image application that can generates 3D representations.", + id: "hysts/Shap-E", + }, + { + description: "A strong application for `text-to-image`, `image-to-image` and image inpainting.", + id: "ArtGAN/Stable-Diffusion-ControlNet-WebUI", + }, + ], + summary: + "Generates images from input text. These models can be used to generate and modify images based on text prompts.", + widgetModels: ["CompVis/stable-diffusion-v1-4"], + youtubeId: "", +}; + +export default taskData; diff --git a/packages/tasks/src/text-to-speech/about.md b/packages/tasks/src/text-to-speech/about.md new file mode 100644 index 0000000000000000000000000000000000000000..5b2fc6530b4164418e68feb8ac62f0d6bc70d949 --- /dev/null +++ b/packages/tasks/src/text-to-speech/about.md @@ -0,0 +1,62 @@ +## Use Cases + +Text-to-Speech (TTS) models can be used in any speech-enabled application that requires converting text to speech imitating human voice. + +### Voice Assistants + +TTS models are used to create voice assistants on smart devices. These models are a better alternative compared to concatenative methods where the assistant is built by recording sounds and mapping them, since the outputs in TTS models contain elements in natural speech such as emphasis. + +### Announcement Systems + +TTS models are widely used in airport and public transportation announcement systems to convert the announcement of a given text into speech. + +## Inference API + +The Hub contains over [1500 TTS models](https://huggingface.co/models?pipeline_tag=text-to-speech&sort=downloads) that you can use right away by trying out the widgets directly in the browser or calling the models as a service using the Inference API. Here is a simple code snippet to get you started: + +```python +import json +import requests + +headers = {"Authorization": f"Bearer {API_TOKEN}"} +API_URL = "https://api-inference.huggingface.co/models/microsoft/speecht5_tts" + +def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response + +output = query({"text_inputs": "This is a test"}) +``` + +You can also use libraries such as [espnet](https://huggingface.co/models?library=espnet&pipeline_tag=text-to-speech&sort=downloads) or [transformers](https://huggingface.co/models?pipeline_tag=text-to-speech&library=transformers&sort=trending) if you want to handle the Inference directly. + +## Direct Inference + +Now, you can also use the Text-to-Speech pipeline in Transformers to synthesise high quality voice. + +```python +from transformers import pipeline + +synthesizer = pipeline("text-to-speech", "suno/bark") + +synthesizer("Look I am generating speech in three lines of code!") +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer summarization models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.textToSpeech({ + model: "facebook/mms-tts", + inputs: "text to generate speech from", +}); +``` + +## Useful Resources + +- [ML for Audio Study Group - Text to Speech Deep Dive](https://www.youtube.com/watch?v=aLBedWj-5CQ) +- [An introduction to SpeechT5, a multi-purpose speech recognition and synthesis model](https://huggingface.co/blog/speecht5). +- [A guide on Fine-tuning Whisper For Multilingual ASR with 🤗Transformers](https://huggingface.co/blog/fine-tune-whisper) +- [Speech Synthesis, Recognition, and More With SpeechT5](https://huggingface.co/blog/speecht5) diff --git a/packages/tasks/src/text-to-speech/data.ts b/packages/tasks/src/text-to-speech/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..031aa96d3074df0987e50d24211c1046e2149c4f --- /dev/null +++ b/packages/tasks/src/text-to-speech/data.ts @@ -0,0 +1,69 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "Thousands of short audio clips of a single speaker.", + id: "lj_speech", + }, + { + description: "Multi-speaker English dataset.", + id: "LibriTTS", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "I love audio models on the Hub!", + type: "text", + }, + ], + outputs: [ + { + filename: "audio.wav", + type: "audio", + }, + ], + }, + metrics: [ + { + description: "The Mel Cepstral Distortion (MCD) metric is used to calculate the quality of generated speech.", + id: "mel cepstral distortion", + }, + ], + models: [ + { + description: "A powerful TTS model.", + id: "suno/bark", + }, + { + description: "A massively multi-lingual TTS model.", + id: "facebook/mms-tts", + }, + { + description: "An end-to-end speech synthesis model.", + id: "microsoft/speecht5_tts", + }, + ], + spaces: [ + { + description: "An application for generate highly realistic, multilingual speech.", + id: "suno/bark", + }, + { + description: "An application that contains multiple speech synthesis models for various languages and accents.", + id: "coqui/CoquiTTS", + }, + { + description: "An application that synthesizes speech for various speaker types.", + id: "Matthijs/speecht5-tts-demo", + }, + ], + summary: + "Text-to-Speech (TTS) is the task of generating natural sounding speech given text input. TTS models can be extended to have a single model that generates speech for multiple speakers and multiple languages.", + widgetModels: ["microsoft/speecht5_tts"], + youtubeId: "NW62DpzJ274", +}; + +export default taskData; diff --git a/packages/tasks/src/text-to-video/about.md b/packages/tasks/src/text-to-video/about.md new file mode 100644 index 0000000000000000000000000000000000000000..898d638c264aa8219cdc3a71d1a4562de0d084b8 --- /dev/null +++ b/packages/tasks/src/text-to-video/about.md @@ -0,0 +1,41 @@ +## Use Cases + +### Script-based Video Generation + +Text-to-video models can be used to create short-form video content from a provided text script. These models can be used to create engaging and informative marketing videos. For example, a company could use a text-to-video model to create a video that explains how their product works. + +### Content format conversion + +Text-to-video models can be used to generate videos from long-form text, including blog posts, articles, and text files. Text-to-video models can be used to create educational videos that are more engaging and interactive. An example of this is creating a video that explains a complex concept from an article. + +### Voice-overs and Speech + +Text-to-video models can be used to create an AI newscaster to deliver daily news, or for a film-maker to create a short film or a music video. + +## Task Variants +Text-to-video models have different variants based on inputs and outputs. + +### Text-to-video Editing + +One text-to-video task is generating text-based video style and local attribute editing. Text-to-video editing models can make it easier to perform tasks like cropping, stabilization, color correction, resizing and audio editing consistently. + +### Text-to-video Search + +Text-to-video search is the task of retrieving videos that are relevant to a given text query. This can be challenging, as videos are a complex medium that can contain a lot of information. By using semantic analysis to extract the meaning of the text query, visual analysis to extract features from the videos, such as the objects and actions that are present in the video, and temporal analysis to categorize relationships between the objects and actions in the video, we can determine which videos are most likely to be relevant to the text query. + +### Text-driven Video Prediction + +Text-driven video prediction is the task of generating a video sequence from a text description. Text description can be anything from a simple sentence to a detailed story. The goal of this task is to generate a video that is both visually realistic and semantically consistent with the text description. + +### Video Translation + +Text-to-video translation models can translate videos from one language to another or allow to query the multilingual text-video model with non-English sentences. This can be useful for people who want to watch videos in a language that they don't understand, especially when multi-lingual captions are available for training. + +## Inference +Contribute an inference snippet for text-to-video here! + +## Useful Resources + +In this area, you can insert useful resources about how to train or use a model for this task. + +- [Text-to-Video: The Task, Challenges and the Current State](https://huggingface.co/blog/text-to-video) diff --git a/packages/tasks/src/text-to-video/data.ts b/packages/tasks/src/text-to-video/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..17fba749c0eaba78b38ebdc112f55fc60336965e --- /dev/null +++ b/packages/tasks/src/text-to-video/data.ts @@ -0,0 +1,102 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "Microsoft Research Video to Text is a large-scale dataset for open domain video captioning", + id: "iejMac/CLIP-MSR-VTT", + }, + { + description: "UCF101 Human Actions dataset consists of 13,320 video clips from YouTube, with 101 classes.", + id: "quchenyuan/UCF101-ZIP", + }, + { + description: "A high-quality dataset for human action recognition in YouTube videos.", + id: "nateraw/kinetics", + }, + { + description: "A dataset of video clips of humans performing pre-defined basic actions with everyday objects.", + id: "HuggingFaceM4/something_something_v2", + }, + { + description: + "This dataset consists of text-video pairs and contains noisy samples with irrelevant video descriptions", + id: "HuggingFaceM4/webvid", + }, + { + description: "A dataset of short Flickr videos for the temporal localization of events with descriptions.", + id: "iejMac/CLIP-DiDeMo", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "Darth Vader is surfing on the waves.", + type: "text", + }, + ], + outputs: [ + { + filename: "text-to-video-output.gif", + type: "img", + }, + ], + }, + metrics: [ + { + description: + "Inception Score uses an image classification model that predicts class labels and evaluates how distinct and diverse the images are. A higher score indicates better video generation.", + id: "is", + }, + { + description: + "Frechet Inception Distance uses an image classification model to obtain image embeddings. The metric compares mean and standard deviation of the embeddings of real and generated images. A smaller score indicates better video generation.", + id: "fid", + }, + { + description: + "Frechet Video Distance uses a model that captures coherence for changes in frames and the quality of each frame. A smaller score indicates better video generation.", + id: "fvd", + }, + { + description: + "CLIPSIM measures similarity between video frames and text using an image-text similarity model. A higher score indicates better video generation.", + id: "clipsim", + }, + ], + models: [ + { + description: "A strong model for video generation.", + id: "PAIR/text2video-zero-controlnet-canny-arcane", + }, + { + description: "A robust model for text-to-video generation.", + id: "damo-vilab/text-to-video-ms-1.7b", + }, + { + description: "A text-to-video generation model with high quality and smooth outputs.", + id: "cerspense/zeroscope_v2_576w", + }, + ], + spaces: [ + { + description: "An application that generates video from text.", + id: "fffiloni/zeroscope", + }, + { + description: "An application that generates video from image and text.", + id: "TempoFunk/makeavid-sd-jax", + }, + { + description: "An application that generates videos from text and provides multi-model support.", + id: "ArtGAN/Video-Diffusion-WebUI", + }, + ], + summary: + "Text-to-video models can be used in any application that requires generating consistent sequence of images from text. ", + widgetModels: [], + youtubeId: undefined, +}; + +export default taskData; diff --git a/packages/tasks/src/token-classification/about.md b/packages/tasks/src/token-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..9b0701385b5793f32bdafd890c476a4efb99b509 --- /dev/null +++ b/packages/tasks/src/token-classification/about.md @@ -0,0 +1,76 @@ +## Use Cases + +### Information Extraction from Invoices + +You can extract entities of interest from invoices automatically using Named Entity Recognition (NER) models. Invoices can be read with Optical Character Recognition models and the output can be used to do inference with NER models. In this way, important information such as date, company name, and other named entities can be extracted. + +## Task Variants + +### Named Entity Recognition (NER) + +NER is the task of recognizing named entities in a text. These entities can be the names of people, locations, or organizations. The task is formulated as labeling each token with a class for each named entity and a class named "0" for tokens that do not contain any entities. The input for this task is text and the output is the annotated text with named entities. + +#### Inference + +You can use the 🤗 Transformers library `ner` pipeline to infer with NER models. + +```python +from transformers import pipeline + +classifier = pipeline("ner") +classifier("Hello I'm Omar and I live in Zürich.") +``` + +### Part-of-Speech (PoS) Tagging +In PoS tagging, the model recognizes parts of speech, such as nouns, pronouns, adjectives, or verbs, in a given text. The task is formulated as labeling each word with a part of the speech. + +#### Inference + +You can use the 🤗 Transformers library `token-classification` pipeline with a POS tagging model of your choice. The model will return a json with PoS tags for each token. + +```python +from transformers import pipeline + +classifier = pipeline("token-classification", model = "vblagoje/bert-english-uncased-finetuned-pos") +classifier("Hello I'm Omar and I live in Zürich.") +``` + +This is not limited to transformers! You can also use other libraries such as Stanza, spaCy, and Flair to do inference! Here is an example using a canonical [spaCy](https://hf.co/blog/spacy) model. + +```python +!pip install https://huggingface.co/spacy/en_core_web_sm/resolve/main/en_core_web_sm-any-py3-none-any.whl + +import en_core_web_sm + +nlp = en_core_web_sm.load() +doc = nlp("I'm Omar and I live in Zürich.") +for token in doc: + print(token.text, token.pos_, token.dep_, token.ent_type_) + +## I PRON nsubj +## 'm AUX ROOT +## Omar PROPN attr PERSON +### ... +``` + +## Useful Resources + +Would you like to learn more about token classification? Great! Here you can find some curated resources that you may find helpful! + +- [Course Chapter on Token Classification](https://huggingface.co/course/chapter7/2?fw=pt) +- [Blog post: Welcome spaCy to the Hugging Face Hub](https://huggingface.co/blog/spacy) + +### Notebooks + +- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/token_classification.ipynb) +- [TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/token_classification-tf.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/token-classification) +- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow) +- [Flax](https://github.com/huggingface/transformers/tree/main/examples/flax/token-classification) + +### Documentation + +- [Token classification task guide](https://huggingface.co/docs/transformers/tasks/token_classification) diff --git a/packages/tasks/src/token-classification/data.ts b/packages/tasks/src/token-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..cb3e211df92f7366d1209c194c0ec481cd0380c1 --- /dev/null +++ b/packages/tasks/src/token-classification/data.ts @@ -0,0 +1,84 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A widely used dataset useful to benchmark named entity recognition models.", + id: "conll2003", + }, + { + description: + "A multilingual dataset of Wikipedia articles annotated for named entity recognition in over 150 different languages.", + id: "wikiann", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "My name is Omar and I live in Zürich.", + type: "text", + }, + ], + outputs: [ + { + text: "My name is Omar and I live in Zürich.", + tokens: [ + { + type: "PERSON", + start: 11, + end: 15, + }, + { + type: "GPE", + start: 30, + end: 36, + }, + ], + type: "text-with-tokens", + }, + ], + }, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: "", + id: "recall", + }, + { + description: "", + id: "precision", + }, + { + description: "", + id: "f1", + }, + ], + models: [ + { + description: + "A robust performance model to identify people, locations, organizations and names of miscellaneous entities.", + id: "dslim/bert-base-NER", + }, + { + description: "Flair models are typically the state of the art in named entity recognition tasks.", + id: "flair/ner-english", + }, + ], + spaces: [ + { + description: + "An application that can recognizes entities, extracts noun chunks and recognizes various linguistic features of each token.", + id: "spacy/gradio_pipeline_visualizer", + }, + ], + summary: + "Token classification is a natural language understanding task in which a label is assigned to some tokens in a text. Some popular token classification subtasks are Named Entity Recognition (NER) and Part-of-Speech (PoS) tagging. NER models could be trained to identify specific entities in a text, such as dates, individuals and places; and PoS tagging would identify, for example, which words in a text are verbs, nouns, and punctuation marks.", + widgetModels: ["dslim/bert-base-NER"], + youtubeId: "wVHdVlPScxA", +}; + +export default taskData; diff --git a/packages/tasks/src/translation/about.md b/packages/tasks/src/translation/about.md new file mode 100644 index 0000000000000000000000000000000000000000..39755a8db4de8934ee7ec3881b942346e02100c5 --- /dev/null +++ b/packages/tasks/src/translation/about.md @@ -0,0 +1,65 @@ +## Use Cases + +You can find over a thousand Translation models on the Hub, but sometimes you might not find a model for the language pair you are interested in. When this happen, you can use a pretrained multilingual Translation model like [mBART](https://huggingface.co/facebook/mbart-large-cc25) and further train it on your own data in a process called fine-tuning. + +### Multilingual conversational agents + +Translation models can be used to build conversational agents across different languages. This can be done in two ways. + +- **Translate the dataset to a new language.** You can translate a dataset of intents (inputs) and responses to the target language. You can then train a new intent classification model with this new dataset. This allows you to proofread responses in the target language and have better control of the chatbot's outputs. + +* **Translate the input and output of the agent.** You can use a Translation model in user inputs so that the chatbot can process it. You can then translate the output of the chatbot into the language of the user. This approach might be less reliable as the chatbot will generate responses that were not defined before. + +## Inference + +You can use the 🤗 Transformers library with the `translation_xx_to_yy` pattern where xx is the source language code and yy is the target language code. The default model for the pipeline is [t5-base](https://huggingface.co/t5-base) which under the hood adds a task prefix indicating the task itself, e.g. “translate: English to French”. + +```python +from transformers import pipeline +en_fr_translator = pipeline("translation_en_to_fr") +en_fr_translator("How old are you?") +## [{'translation_text': ' quel âge êtes-vous?'}] +``` + +If you’d like to use a specific model checkpoint that is from one specific language to another, you can also directly use the `translation` pipeline. + +```python +from transformers import pipeline + +model_checkpoint = "Helsinki-NLP/opus-mt-en-fr" +translator = pipeline("translation", model=model_checkpoint) +translator("How are you?") +# [{'translation_text': 'Comment allez-vous ?'}] +``` + +You can use [huggingface.js](https://github.com/huggingface/huggingface.js) to infer translation models on Hugging Face Hub. + +```javascript +import { HfInference } from "@huggingface/inference"; + +const inference = new HfInference(HF_ACCESS_TOKEN); +await inference.translation({ + model: "t5-base", + inputs: "My name is Wolfgang and I live in Berlin", +}); +``` + +## Useful Resources + +Would you like to learn more about Translation? Great! Here you can find some curated resources that you may find helpful! + +- [Course Chapter on Translation](https://huggingface.co/course/chapter7/4?fw=pt) + +### Notebooks + +- [PyTorch](https://github.com/huggingface/notebooks/blob/master/examples/translation.ipynb) +- [TensorFlow](https://github.com/huggingface/notebooks/blob/master/examples/translation-tf.ipynb) + +### Scripts for training + +- [PyTorch](https://github.com/huggingface/transformers/tree/main/examples/pytorch/translation) +- [TensorFlow](https://github.com/huggingface/transformers/tree/main/examples/tensorflow/translation) + +### Documentation + +- [Translation task guide](https://huggingface.co/docs/transformers/tasks/translation) diff --git a/packages/tasks/src/translation/data.ts b/packages/tasks/src/translation/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..7973a77bd0142c603b5147dcdbf4f97501f8c9fa --- /dev/null +++ b/packages/tasks/src/translation/data.ts @@ -0,0 +1,68 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A dataset of copyright-free books translated into 16 different languages.", + id: "opus_books", + }, + { + description: + "An example of translation between programming languages. This dataset consists of functions in Java and C#.", + id: "code_x_glue_cc_code_to_code_trans", + }, + ], + demo: { + inputs: [ + { + label: "Input", + content: "My name is Omar and I live in Zürich.", + type: "text", + }, + ], + outputs: [ + { + label: "Output", + content: "Mein Name ist Omar und ich wohne in Zürich.", + type: "text", + }, + ], + }, + metrics: [ + { + description: + "BLEU score is calculated by counting the number of shared single or subsequent tokens between the generated sequence and the reference. Subsequent n tokens are called “n-grams”. Unigram refers to a single token while bi-gram refers to token pairs and n-grams refer to n subsequent tokens. The score ranges from 0 to 1, where 1 means the translation perfectly matched and 0 did not match at all", + id: "bleu", + }, + { + description: "", + id: "sacrebleu", + }, + ], + models: [ + { + description: "A model that translates from English to French.", + id: "Helsinki-NLP/opus-mt-en-fr", + }, + { + description: + "A general-purpose Transformer that can be used to translate from English to German, French, or Romanian.", + id: "t5-base", + }, + ], + spaces: [ + { + description: "An application that can translate between 100 languages.", + id: "Iker/Translate-100-languages", + }, + { + description: "An application that can translate between English, Spanish and Hindi.", + id: "EuroPython2022/Translate-with-Bloom", + }, + ], + summary: "Translation is the task of converting text from one language to another.", + widgetModels: ["t5-small"], + youtubeId: "1JvfrvZgi6c", +}; + +export default taskData; diff --git a/packages/tasks/src/unconditional-image-generation/about.md b/packages/tasks/src/unconditional-image-generation/about.md new file mode 100644 index 0000000000000000000000000000000000000000..e5a9585528ae0afd0bc779d0d8628ceca167376e --- /dev/null +++ b/packages/tasks/src/unconditional-image-generation/about.md @@ -0,0 +1,50 @@ +## About the Task + +Unconditional image generation is the task of generating new images without any specific input. The main goal of this is to create novel, original images that are not based on existing images. +This can be used for a variety of applications, such as creating new artistic images, improving image recognition algorithms, or generating photorealistic images for virtual reality environments. + +Unconditional image generation models usually start with a _seed_ that generates a _random noise vector_. The model will then use this vector to create an output image similar to the images used for training the model. + +An example of unconditional image generation would be generating the image of a face on a model trained with the [CelebA dataset](https://huggingface.co/datasets/huggan/CelebA-HQ) or [generating a butterfly](https://huggingface.co/spaces/huggan/butterfly-gan) on a model trained with the [Smithsonian Butterflies dataset](https://huggingface.co/datasets/ceyda/smithsonian_butterflies). + +[Generative adversarial networks](https://en.wikipedia.org/wiki/Generative_adversarial_network) and [Diffusion](https://huggingface.co/docs/diffusers/index) are common architectures for this task. + +## Use Cases + +Unconditional image generation can be used for a variety of applications. + +### Artistic Expression + +Unconditional image generation can be used to create novel, original artwork that is not based on any existing images. This can be used to explore new creative possibilities and produce unique, imaginative images. + +### Data Augmentation + +Unconditional image generation models can be used to generate new images to improve the performance of image recognition algorithms. This makes algorithms more robust and able to handle a broader range of images. + +### Virtual Reality + +Unconditional image generation models can be used to create photorealistic images that can be used in virtual reality environments. This makes the VR experience more immersive and realistic. + +### Medical Imaging + +Unconditional image generation models can generate new medical images, such as CT or MRI scans, that can be used to train and evaluate medical imaging algorithms. This can improve the accuracy and reliability of these algorithms. + +### Industrial Design + +Unconditional image generation models can generate new designs for products, such as clothing or furniture, that are not based on any existing designs. This way, designers can explore new creative possibilities and produce unique, innovative designs. + +## Model Hosting and Inference + +This section should have useful information about Model Hosting and Inference + +## Useful Resources + +- [Hugging Face Diffusion Models Course](https://github.com/huggingface/diffusion-models-class) +- [Getting Started with Diffusers](https://huggingface.co/docs/diffusers/index) +- [Unconditional Image Generation Training](https://huggingface.co/docs/diffusers/training/unconditional_training) + +### Training your own model in just a few seconds + +In this area, you can insert useful information about training the model + +This page was made possible thanks to the efforts of [Someet Sahoo](https://huggingface.co/Someet24) and [Juan Carlos Piñeros](https://huggingface.co/juancopi81). diff --git a/packages/tasks/src/unconditional-image-generation/data.ts b/packages/tasks/src/unconditional-image-generation/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..f9eeac7e45900294fa268234f92a8f7b7f5ff494 --- /dev/null +++ b/packages/tasks/src/unconditional-image-generation/data.ts @@ -0,0 +1,72 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: + "The CIFAR-100 dataset consists of 60000 32x32 colour images in 100 classes, with 600 images per class.", + id: "cifar100", + }, + { + description: "Multiple images of celebrities, used for facial expression translation.", + id: "CelebA", + }, + ], + demo: { + inputs: [ + { + label: "Seed", + content: "42", + type: "text", + }, + { + label: "Number of images to generate:", + content: "4", + type: "text", + }, + ], + outputs: [ + { + filename: "unconditional-image-generation-output.jpeg", + type: "img", + }, + ], + }, + metrics: [ + { + description: + "The inception score (IS) evaluates the quality of generated images. It measures the diversity of the generated images (the model predictions are evenly distributed across all possible labels) and their 'distinction' or 'sharpness' (the model confidently predicts a single label for each image).", + id: "Inception score (IS)", + }, + { + description: + "The Fréchet Inception Distance (FID) evaluates the quality of images created by a generative model by calculating the distance between feature vectors for real and generated images.", + id: "Frećhet Inception Distance (FID)", + }, + ], + models: [ + { + description: + "High-quality image generation model trained on the CIFAR-10 dataset. It synthesizes images of the ten classes presented in the dataset using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics.", + id: "google/ddpm-cifar10-32", + }, + { + description: + "High-quality image generation model trained on the 256x256 CelebA-HQ dataset. It synthesizes images of faces using diffusion probabilistic models, a class of latent variable models inspired by considerations from nonequilibrium thermodynamics.", + id: "google/ddpm-celebahq-256", + }, + ], + spaces: [ + { + description: "An application that can generate realistic faces.", + id: "CompVis/celeba-latent-diffusion", + }, + ], + summary: + "Unconditional image generation is the task of generating images with no condition in any context (like a prompt text or another image). Once trained, the model will create images that resemble its training data distribution.", + widgetModels: [""], + // TODO: Add related video + youtubeId: "", +}; + +export default taskData; diff --git a/packages/tasks/src/video-classification/about.md b/packages/tasks/src/video-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..fc87585af4f9eb3092e169cb2a61e32772a5034c --- /dev/null +++ b/packages/tasks/src/video-classification/about.md @@ -0,0 +1,57 @@ +## Use Cases + +Video classification models can be used to categorize what a video is all about. + +### Activity Recognition + +Video classification models are used to perform activity recognition which is useful for fitness applications. Activity recognition is also helpful for vision-impaired individuals especially when they're commuting. + +### Video Search + +Models trained in video classification can improve user experience by organizing and categorizing video galleries on the phone or in the cloud, on multiple keywords or tags. + +## Inference + +Below you can find code for inferring with a pre-trained video classification model. + +```python +from transformers import VideoMAEFeatureExtractor, VideoMAEForVideoClassification +from pytorchvideo.transforms import UniformTemporalSubsample +from pytorchvideo.data.encoded_video import EncodedVideo + + +# Load the video. +video = EncodedVideo.from_path("path_to_video.mp4") +video_data = video.get_clip(start_sec=0, end_sec=4.0)["video"] + +# Sub-sample a fixed set of frames and convert them to a NumPy array. +num_frames = 16 +subsampler = UniformTemporalSubsample(num_frames) +subsampled_frames = subsampler(video_data) +video_data_np = subsampled_frames.numpy().transpose(1, 2, 3, 0) + +# Preprocess the video frames. +inputs = feature_extractor(list(video_data_np), return_tensors="pt") + +# Run inference +with torch.no_grad(): + outputs = model(**inputs) + logits = outputs.logits + +# Model predicts one of the 400 Kinetics 400 classes +predicted_label = logits.argmax(-1).item() +print(model.config.id2label[predicted_label]) +# `eating spaghetti` (if you chose this video: +# https://hf.co/datasets/nielsr/video-demo/resolve/main/eating_spaghetti.mp4) +``` + +## Useful Resources + +- [Developing a simple video classification model](https://keras.io/examples/vision/video_classification) +- [Video classification with Transformers](https://keras.io/examples/vision/video_transformers) +- [Building a video archive](https://www.youtube.com/watch?v=_IeS1m8r6SY) +- [Video classification task guide](https://huggingface.co/docs/transformers/tasks/video_classification) + +### Creating your own video classifier in minutes + +- [Fine-tuning tutorial notebook (PyTorch)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/examples/video_classification.ipynb) diff --git a/packages/tasks/src/video-classification/data.ts b/packages/tasks/src/video-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..2f6e4d93551ddea48d5c64b831d23ac79fbe9e5a --- /dev/null +++ b/packages/tasks/src/video-classification/data.ts @@ -0,0 +1,84 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + // TODO write proper description + description: "Benchmark dataset used for video classification with videos that belong to 400 classes.", + id: "kinetics400", + }, + ], + demo: { + inputs: [ + { + filename: "video-classification-input.gif", + type: "img", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Playing Guitar", + score: 0.514, + }, + { + label: "Playing Tennis", + score: 0.193, + }, + { + label: "Cooking", + score: 0.068, + }, + ], + }, + ], + }, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: "", + id: "recall", + }, + { + description: "", + id: "precision", + }, + { + description: "", + id: "f1", + }, + ], + models: [ + { + // TO DO: write description + description: "Strong Video Classification model trained on the Kinects 400 dataset.", + id: "MCG-NJU/videomae-base-finetuned-kinetics", + }, + { + // TO DO: write description + description: "Strong Video Classification model trained on the Kinects 400 dataset.", + id: "microsoft/xclip-base-patch32", + }, + ], + spaces: [ + { + description: "An application that classifies video at different timestamps.", + id: "nateraw/lavila", + }, + { + description: "An application that classifies video.", + id: "fcakyon/video-classification", + }, + ], + summary: + "Video classification is the task of assigning a label or class to an entire video. Videos are expected to have only one class for each video. Video classification models take a video as input and return a prediction about which class the video belongs to.", + widgetModels: [], + youtubeId: "", +}; + +export default taskData; diff --git a/packages/tasks/src/visual-question-answering/about.md b/packages/tasks/src/visual-question-answering/about.md new file mode 100644 index 0000000000000000000000000000000000000000..7f96e1679b8a5b46042f5c6e2eb533e80749160f --- /dev/null +++ b/packages/tasks/src/visual-question-answering/about.md @@ -0,0 +1,48 @@ +## Use Cases + +### Aid the Visually Impaired Persons + +VQA models can be used to reduce visual barriers for visually impaired individuals by allowing them to get information about images from the web and the real world. + +### Education + +VQA models can be used to improve experiences at museums by allowing observers to directly ask questions they interested in. + +### Improved Image Retrieval + +Visual question answering models can be used to retrieve images with specific characteristics. For example, the user can ask "Is there a dog?" to find all images with dogs from a set of images. + +### Video Search + +Specific snippets/timestamps of a video can be retrieved based on search queries. For example, the user can ask "At which part of the video does the guitar appear?" and get a specific timestamp range from the whole video. + +## Task Variants + +### Video Question Answering + +Video Question Answering aims to answer questions asked about the content of a video. + +## Inference + +You can infer with Visual Question Answering models using the `vqa` (or `visual-question-answering`) pipeline. This pipeline requires [the Python Image Library (PIL)](https://pillow.readthedocs.io/en/stable/) to process images. You can install it with (`pip install pillow`). + +```python +from PIL import Image +from transformers import pipeline + +vqa_pipeline = pipeline("visual-question-answering") + +image = Image.open("elephant.jpeg") +question = "Is there an elephant?" + +vqa_pipeline(image, question, top_k=1) +#[{'score': 0.9998154044151306, 'answer': 'yes'}] +``` + +## Useful Resources + +- [An introduction to Visual Question Answering - AllenAI](https://blog.allenai.org/vanilla-vqa-adcaaaa94336) +- [Multi Modal Framework (MMF) - Meta Research](https://mmf.sh/docs/getting_started/video_overview/) + +The contents of this page are contributed by [ +Bharat Raghunathan](https://huggingface.co/bharat-raghunathan) and [Jose Londono Botero](https://huggingface.co/jlondonobo). diff --git a/packages/tasks/src/visual-question-answering/data.ts b/packages/tasks/src/visual-question-answering/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..83a7e69496945aea9867eadd25d89af4fb9f79c9 --- /dev/null +++ b/packages/tasks/src/visual-question-answering/data.ts @@ -0,0 +1,93 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A widely used dataset containing questions (with answers) about images.", + id: "Graphcore/vqa", + }, + { + description: "A dataset to benchmark visual reasoning based on text in images.", + id: "textvqa", + }, + ], + demo: { + inputs: [ + { + filename: "elephant.jpeg", + type: "img", + }, + { + label: "Question", + content: "What is in this image?", + type: "text", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "elephant", + score: 0.97, + }, + { + label: "elephants", + score: 0.06, + }, + { + label: "animal", + score: 0.003, + }, + ], + }, + ], + }, + isPlaceholder: false, + metrics: [ + { + description: "", + id: "accuracy", + }, + { + description: + "Measures how much a predicted answer differs from the ground truth based on the difference in their semantic meaning.", + id: "wu-palmer similarity", + }, + ], + models: [ + { + description: "A visual question answering model trained to convert charts and plots to text.", + id: "google/deplot", + }, + { + description: + "A visual question answering model trained for mathematical reasoning and chart derendering from images.", + id: "google/matcha-base ", + }, + { + description: "A strong visual question answering that answers questions from book covers.", + id: "google/pix2struct-ocrvqa-large", + }, + ], + spaces: [ + { + description: "An application that can answer questions based on images.", + id: "nielsr/vilt-vqa", + }, + { + description: "An application that can caption images and answer questions about a given image. ", + id: "Salesforce/BLIP", + }, + { + description: "An application that can caption images and answer questions about a given image. ", + id: "vumichien/Img2Prompt", + }, + ], + summary: + "Visual Question Answering is the task of answering open-ended questions based on an image. They output natural language responses to natural language questions.", + widgetModels: ["dandelin/vilt-b32-finetuned-vqa"], + youtubeId: "", +}; + +export default taskData; diff --git a/packages/tasks/src/zero-shot-classification/about.md b/packages/tasks/src/zero-shot-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..9b7ff3c48c931d3355c76aed20b891fe8f57c54b --- /dev/null +++ b/packages/tasks/src/zero-shot-classification/about.md @@ -0,0 +1,40 @@ +## About the Task + +Zero Shot Classification is the task of predicting a class that wasn't seen by the model during training. This method, which leverages a pre-trained language model, can be thought of as an instance of [transfer learning](https://www.youtube.com/watch?v=BqqfQnyjmgg) which generally refers to using a model trained for one task in a different application than what it was originally trained for. This is particularly useful for situations where the amount of labeled data is small. + +In zero shot classification, we provide the model with a prompt and a sequence of text that describes what we want our model to do, in natural language. Zero-shot classification excludes any examples of the desired task being completed. This differs from single or few-shot classification, as these tasks include a single or a few examples of the selected task. + +Zero, single and few-shot classification seem to be an emergent feature of large language models. This feature seems to come about around model sizes of +100M parameters. The effectiveness of a model at a zero, single or few-shot task seems to scale with model size, meaning that larger models (models with more trainable parameters or layers) generally do better at this task. + +Here is an example of a zero-shot prompt for classifying the sentiment of a sequence of text: + +``` +Classify the following input text into one of the following three categories: [positive, negative, neutral] + +Input Text: Hugging Face is awesome for making all of these +state of the art models available! +Sentiment: positive + +``` + +One great example of this task with a nice off-the-shelf model is available at the widget of this page, where the user can input a sequence of text and candidate labels to the model. This is a _word level_ example of zero shot classification, more elaborate and lengthy generations are available with larger models. Testing these models out and getting a feel for prompt engineering is the best way to learn how to use them. + +## Inference + +You can use the 🤗 Transformers library zero-shot-classification pipeline to infer with zero shot text classification models. + +```python +from transformers import pipeline + +pipe = pipeline(model="facebook/bart-large-mnli") +pipe("I have a problem with my iphone that needs to be resolved asap!", + candidate_labels=["urgent", "not urgent", "phone", "tablet", "computer"], +) +# output +>>> {'sequence': 'I have a problem with my iphone that needs to be resolved asap!!', 'labels': ['urgent', 'phone', 'computer', 'not urgent', 'tablet'], 'scores': [0.504, 0.479, 0.013, 0.003, 0.002]} +``` + +## Useful Resources + +- [Zero Shot Learning](https://joeddav.github.io/blog/2020/05/29/ZSL.html) +- [Hugging Face on Transfer Learning](https://huggingface.co/course/en/chapter1/4?fw=pt#transfer-learning) diff --git a/packages/tasks/src/zero-shot-classification/data.ts b/packages/tasks/src/zero-shot-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..c949fd0a197936ad37dcb61850703d40771d84e8 --- /dev/null +++ b/packages/tasks/src/zero-shot-classification/data.ts @@ -0,0 +1,66 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + description: "A widely used dataset used to benchmark multiple variants of text classification.", + id: "glue", + }, + { + description: + "The Multi-Genre Natural Language Inference (MultiNLI) corpus is a crowd-sourced collection of 433k sentence pairs annotated with textual entailment information.", + id: "MultiNLI", + }, + { + description: + "FEVER is a publicly available dataset for fact extraction and verification against textual sources.", + id: "FEVER", + }, + ], + demo: { + inputs: [ + { + label: "Text Input", + content: "Dune is the best movie ever.", + type: "text", + }, + { + label: "Candidate Labels", + content: "CINEMA, ART, MUSIC", + type: "text", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "CINEMA", + score: 0.9, + }, + { + label: "ART", + score: 0.1, + }, + { + label: "MUSIC", + score: 0.0, + }, + ], + }, + ], + }, + metrics: [], + models: [ + { + description: "Powerful zero-shot text classification model", + id: "facebook/bart-large-mnli", + }, + ], + spaces: [], + summary: + "Zero-shot text classification is a task in natural language processing where a model is trained on a set of labeled examples but is then able to classify new examples from previously unseen classes.", + widgetModels: ["facebook/bart-large-mnli"], +}; + +export default taskData; diff --git a/packages/tasks/src/zero-shot-image-classification/about.md b/packages/tasks/src/zero-shot-image-classification/about.md new file mode 100644 index 0000000000000000000000000000000000000000..0c4b283280ac5dadcc270740ad6fcff03f18f5ba --- /dev/null +++ b/packages/tasks/src/zero-shot-image-classification/about.md @@ -0,0 +1,76 @@ +## About the Task + +Zero-shot image classification is a computer vision task to classify images into one of several classes, without any prior training or knowledge of the classes. + +Zero shot image classification works by transferring knowledge learnt during training of one model, to classify novel classes that was not present in the training data. So this is a variation of [transfer learning](https://www.youtube.com/watch?v=BqqfQnyjmgg). For instance, a model trained to differentiate cars from airplanes can be used to classify images of ships. + +The data in this learning paradigm consists of + +- Seen data - images and their corresponding labels +- Unseen data - only labels and no images +- Auxiliary information - additional information given to the model during training connecting the unseen and seen data. This can be in the form of textual description or word embeddings. + +## Use Cases + +### Image Retrieval + +Zero-shot learning resolves several challenges in image retrieval systems. For example, with the rapid growth of categories on the web, it is challenging to index images based on unseen categories. With zero-shot learning we can associate unseen categories to images by exploiting attributes to model the relationships among visual features and labels. + +### Action Recognition + +Action recognition is the task of identifying when a person in an image/video is performing a given action from a set of actions. If all the possible actions are not known beforehand, conventional deep learning models fail. With zero-shot learning, for a given domain of a set of actions, we can create a mapping connecting low-level features and a semantic description of auxiliary data to classify unknown classes of actions. + +## Task Variants + +You can contribute variants of this task [here](https://github.com/huggingface/hub-docs/blob/main/tasks/src/zero-shot-image-classification/about.md). + +## Inference + +The model can be loaded with the zero-shot-image-classification pipeline like so: + +```python +from transformers import pipeline +# More models in the model hub. +model_name = "openai/clip-vit-large-patch14-336" +classifier = pipeline("zero-shot-image-classification", model = model_name) +``` + +You can then use this pipeline to classify images into any of the class names you specify. You can specify more than two class labels too. + +```python +image_to_classify = "path_to_cat_and_dog_image.jpeg" +labels_for_classification = ["cat and dog", + "lion and cheetah", + "rabbit and lion"] +scores = classifier(image_to_classify, + candidate_labels = labels_for_classification) +``` + +The classifier would return a list of dictionaries after the inference which is stored in the variable `scores` in the code snippet above. Variable `scores` would look as follows: + +```python +[{'score': 0.9950482249259949, 'label': 'cat and dog'}, +{'score': 0.004863627254962921, 'label': 'rabbit and lion'}, +{'score': 8.816882473183796e-05, 'label': 'lion and cheetah'}] +``` + +The dictionary at the zeroth index of the list will contain the label with the highest score. + +```python +print(f"The highest score is {scores[0]['score']:.3f} for the label {scores[0]['label']}") +``` + +The output from the print statement above would look as follows: + +``` +The highest probability is 0.995 for the label cat and dog +``` + +## Useful Resources + +You can contribute useful resources about this task [here](https://github.com/huggingface/hub-docs/blob/main/tasks/src/zero-shot-image-classification/about.md). + +Check out [Zero-shot image classification task guide](https://huggingface.co/docs/transformers/tasks/zero_shot_image_classification). + +This page was made possible thanks to the efforts of [Shamima Hossain](https://huggingface.co/Shamima), [Haider Zaidi +](https://huggingface.co/chefhaider) and [Paarth Bhatnagar](https://huggingface.co/Paarth). diff --git a/packages/tasks/src/zero-shot-image-classification/data.ts b/packages/tasks/src/zero-shot-image-classification/data.ts new file mode 100644 index 0000000000000000000000000000000000000000..be8da73d4c94f7a4b715fb0f8f2ec2851869cb49 --- /dev/null +++ b/packages/tasks/src/zero-shot-image-classification/data.ts @@ -0,0 +1,77 @@ +import type { TaskDataCustom } from "../Types"; + +const taskData: TaskDataCustom = { + datasets: [ + { + // TODO write proper description + description: "", + id: "", + }, + ], + demo: { + inputs: [ + { + filename: "image-classification-input.jpeg", + type: "img", + }, + { + label: "Classes", + content: "cat, dog, bird", + type: "text", + }, + ], + outputs: [ + { + type: "chart", + data: [ + { + label: "Cat", + score: 0.664, + }, + { + label: "Dog", + score: 0.329, + }, + { + label: "Bird", + score: 0.008, + }, + ], + }, + ], + }, + metrics: [ + { + description: "Computes the number of times the correct label appears in top K labels predicted", + id: "top-K accuracy", + }, + ], + models: [ + { + description: "Robust image classification model trained on publicly available image-caption data.", + id: "openai/clip-vit-base-patch16", + }, + { + description: + "Robust image classification model trained on publicly available image-caption data trained on additional high pixel data for better performance.", + id: "openai/clip-vit-large-patch14-336", + }, + { + description: "Strong image classification model for biomedical domain.", + id: "microsoft/BiomedCLIP-PubMedBERT_256-vit_base_patch16_224", + }, + ], + spaces: [ + { + description: + "An application that leverages zero shot image classification to find best captions to generate an image. ", + id: "pharma/CLIP-Interrogator", + }, + ], + summary: + "Zero shot image classification is the task of classifying previously unseen classes during training of a model.", + widgetModels: ["openai/clip-vit-large-patch14-336"], + youtubeId: "", +}; + +export default taskData; diff --git a/packages/tasks/tsconfig.json b/packages/tasks/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..37823efde76049a185b3c599a3d9709fe765af1d --- /dev/null +++ b/packages/tasks/tsconfig.json @@ -0,0 +1,18 @@ +{ + "compilerOptions": { + "allowSyntheticDefaultImports": true, + "lib": ["ES2022", "DOM"], + "module": "CommonJS", + "moduleResolution": "node", + "target": "ES2022", + "forceConsistentCasingInFileNames": true, + "strict": true, + "noImplicitAny": true, + "strictNullChecks": true, + "skipLibCheck": true, + "noImplicitOverride": true, + "outDir": "./dist" + }, + "include": ["src"], + "exclude": ["dist"] +} diff --git a/packages/widgets/.eslintignore b/packages/widgets/.eslintignore new file mode 100644 index 0000000000000000000000000000000000000000..03cc13658b964ab78efc70b29922fced12af0f9f --- /dev/null +++ b/packages/widgets/.eslintignore @@ -0,0 +1,14 @@ +.DS_Store +node_modules +/dist +/build +/.svelte-kit +/package +.env +.env.* +!.env.example + +# Ignore files for PNPM, NPM and YARN +pnpm-lock.yaml +package-lock.json +yarn.lock diff --git a/packages/widgets/.gitignore b/packages/widgets/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..ac7211b4033ca0cd15c21967204484e016ea6739 --- /dev/null +++ b/packages/widgets/.gitignore @@ -0,0 +1,11 @@ +.DS_Store +node_modules +/build +/dist +/.svelte-kit +/package +.env +.env.* +!.env.example +vite.config.js.timestamp-* +vite.config.ts.timestamp-* diff --git a/packages/widgets/.prettierignore b/packages/widgets/.prettierignore new file mode 100644 index 0000000000000000000000000000000000000000..03cc13658b964ab78efc70b29922fced12af0f9f --- /dev/null +++ b/packages/widgets/.prettierignore @@ -0,0 +1,14 @@ +.DS_Store +node_modules +/dist +/build +/.svelte-kit +/package +.env +.env.* +!.env.example + +# Ignore files for PNPM, NPM and YARN +pnpm-lock.yaml +package-lock.json +yarn.lock diff --git a/packages/widgets/README.md b/packages/widgets/README.md new file mode 100644 index 0000000000000000000000000000000000000000..64d097d3ae688b6d4aaf833be3df5936100cd641 --- /dev/null +++ b/packages/widgets/README.md @@ -0,0 +1,18 @@ +# Huggingface Widgets + +Open-source version of the inference widgets from huggingface.co + +> Built with Svelte and SvelteKit + +**Demo page:** https://huggingface.co/spaces/huggingfacejs/inference-widgets + +You can also run the demo locally: + +```console +pnpm install +pnpm dev +``` + +## Publishing + +Because `@huggingface/widgets` depends on `@huggingface/tasks`, you need to publish `@huggingface/tasks` first, and then `@huggingface/widgets`. diff --git a/packages/widgets/package.json b/packages/widgets/package.json new file mode 100644 index 0000000000000000000000000000000000000000..b8e394020e990df805b798ac8579ddaab30b5ef2 --- /dev/null +++ b/packages/widgets/package.json @@ -0,0 +1,59 @@ +{ + "name": "@huggingface/widgets", + "packageManager": "pnpm@8.10.5", + "version": "0.0.4", + "publishConfig": { + "access": "public" + }, + "scripts": { + "dev": "vite dev", + "build": "vite build && npm run package", + "preview": "vite preview", + "package": "svelte-kit sync && svelte-package && publint", + "prepublishOnly": "npm run package", + "check": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json", + "check:watch": "svelte-kit sync && svelte-check --tsconfig ./tsconfig.json --watch", + "lint": "eslint --quiet --fix --ext .cjs,.ts .", + "lint:check": "eslint --ext .cjs,.ts .", + "format": "prettier --write .", + "format:check": "prettier --check ." + }, + "exports": { + ".": { + "types": "./dist/index.d.ts", + "svelte": "./dist/index.js" + } + }, + "files": [ + "dist", + "src", + "!dist/**/*.test.*", + "!dist/**/*.spec.*", + "static/audioProcessor.js" + ], + "dependencies": { + "@huggingface/tasks": "workspace:^" + }, + "peerDependencies": { + "svelte": "^3.59.2" + }, + "devDependencies": { + "@sveltejs/adapter-auto": "^2.0.0", + "@sveltejs/kit": "^1.27.4", + "@sveltejs/package": "^2.0.0", + "@tailwindcss/forms": "^0.5.7", + "autoprefixer": "^10.4.16", + "eslint": "^8.28.0", + "postcss": "^8.4.31", + "publint": "^0.1.9", + "svelte": "^3.59.2", + "svelte-check": "^3.6.0", + "tailwindcss": "^3.3.5", + "tslib": "^2.4.1", + "typescript": "^5.0.0", + "vite": "^4.0.0" + }, + "svelte": "./dist/index.js", + "types": "./dist/index.d.ts", + "type": "module" +} diff --git a/packages/widgets/pnpm-lock.yaml b/packages/widgets/pnpm-lock.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4703ba0b3d6ea38e79755934691d35e664890b38 --- /dev/null +++ b/packages/widgets/pnpm-lock.yaml @@ -0,0 +1,1973 @@ +lockfileVersion: '6.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +dependencies: + '@huggingface/tasks': + specifier: workspace:^ + version: link:../tasks + +devDependencies: + '@sveltejs/adapter-auto': + specifier: ^2.0.0 + version: 2.0.0(@sveltejs/kit@1.27.4) + '@sveltejs/kit': + specifier: ^1.27.4 + version: 1.27.4(svelte@3.59.2)(vite@4.5.0) + '@sveltejs/package': + specifier: ^2.0.0 + version: 2.0.0(svelte@3.59.2)(typescript@5.0.4) + '@tailwindcss/forms': + specifier: ^0.5.7 + version: 0.5.7(tailwindcss@3.3.5) + autoprefixer: + specifier: ^10.4.16 + version: 10.4.16(postcss@8.4.31) + eslint: + specifier: ^8.28.0 + version: 8.28.0 + postcss: + specifier: ^8.4.31 + version: 8.4.31 + publint: + specifier: ^0.1.9 + version: 0.1.9 + svelte: + specifier: ^3.59.2 + version: 3.59.2 + svelte-check: + specifier: ^3.6.0 + version: 3.6.0(postcss@8.4.31)(svelte@3.59.2) + tailwindcss: + specifier: ^3.3.5 + version: 3.3.5 + tslib: + specifier: ^2.4.1 + version: 2.4.1 + typescript: + specifier: ^5.0.0 + version: 5.0.4 + vite: + specifier: ^4.0.0 + version: 4.5.0 + +packages: + + /@aashutoshrathi/word-wrap@1.2.6: + resolution: {integrity: sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==} + engines: {node: '>=0.10.0'} + dev: true + + /@alloc/quick-lru@5.2.0: + resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} + engines: {node: '>=10'} + dev: true + + /@esbuild/android-arm64@0.18.20: + resolution: {integrity: sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + requiresBuild: true + dev: true + optional: true + + /@esbuild/android-arm@0.18.20: + resolution: {integrity: sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw==} + engines: {node: '>=12'} + cpu: [arm] + os: [android] + requiresBuild: true + dev: true + optional: true + + /@esbuild/android-x64@0.18.20: + resolution: {integrity: sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + requiresBuild: true + dev: true + optional: true + + /@esbuild/darwin-arm64@0.18.20: + resolution: {integrity: sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@esbuild/darwin-x64@0.18.20: + resolution: {integrity: sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@esbuild/freebsd-arm64@0.18.20: + resolution: {integrity: sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/freebsd-x64@0.18.20: + resolution: {integrity: sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-arm64@0.18.20: + resolution: {integrity: sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-arm@0.18.20: + resolution: {integrity: sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-ia32@0.18.20: + resolution: {integrity: sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-loong64@0.18.20: + resolution: {integrity: sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-mips64el@0.18.20: + resolution: {integrity: sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-ppc64@0.18.20: + resolution: {integrity: sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-riscv64@0.18.20: + resolution: {integrity: sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-s390x@0.18.20: + resolution: {integrity: sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-x64@0.18.20: + resolution: {integrity: sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/netbsd-x64@0.18.20: + resolution: {integrity: sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/openbsd-x64@0.18.20: + resolution: {integrity: sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/sunos-x64@0.18.20: + resolution: {integrity: sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + requiresBuild: true + dev: true + optional: true + + /@esbuild/win32-arm64@0.18.20: + resolution: {integrity: sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@esbuild/win32-ia32@0.18.20: + resolution: {integrity: sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@esbuild/win32-x64@0.18.20: + resolution: {integrity: sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@eslint/eslintrc@1.4.1: + resolution: {integrity: sha512-XXrH9Uarn0stsyldqDYq8r++mROmWRI1xKMXa640Bb//SY1+ECYX6VzT6Lcx5frD0V30XieqJ0oX9I2Xj5aoMA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + ajv: 6.12.6 + debug: 4.3.4 + espree: 9.6.1 + globals: 13.23.0 + ignore: 5.3.0 + import-fresh: 3.3.0 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + dev: true + + /@fastify/busboy@2.1.0: + resolution: {integrity: sha512-+KpH+QxZU7O4675t3mnkQKcZZg56u+K/Ct2K+N2AZYNVK8kyeo/bI18tI8aPm3tvNNRyTWfj6s5tnGNlcbQRsA==} + engines: {node: '>=14'} + dev: true + + /@humanwhocodes/config-array@0.11.13: + resolution: {integrity: sha512-JSBDMiDKSzQVngfRjOdFXgFfklaXI4K9nLF49Auh21lmBWRLIK3+xTErTWD4KU54pb6coM6ESE7Awz/FNU3zgQ==} + engines: {node: '>=10.10.0'} + dependencies: + '@humanwhocodes/object-schema': 2.0.1 + debug: 4.3.4 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@humanwhocodes/module-importer@1.0.1: + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + dev: true + + /@humanwhocodes/object-schema@2.0.1: + resolution: {integrity: sha512-dvuCeX5fC9dXgJn9t+X5atfmgQAzUOWqS1254Gh0m6i8wKd10ebXkfNKiRK+1GWi/yTvvLDHpoxLr0xxxeslWw==} + dev: true + + /@jridgewell/gen-mapping@0.3.3: + resolution: {integrity: sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==} + engines: {node: '>=6.0.0'} + dependencies: + '@jridgewell/set-array': 1.1.2 + '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/trace-mapping': 0.3.20 + dev: true + + /@jridgewell/resolve-uri@3.1.1: + resolution: {integrity: sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==} + engines: {node: '>=6.0.0'} + dev: true + + /@jridgewell/set-array@1.1.2: + resolution: {integrity: sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==} + engines: {node: '>=6.0.0'} + dev: true + + /@jridgewell/sourcemap-codec@1.4.15: + resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} + dev: true + + /@jridgewell/trace-mapping@0.3.20: + resolution: {integrity: sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==} + dependencies: + '@jridgewell/resolve-uri': 3.1.1 + '@jridgewell/sourcemap-codec': 1.4.15 + dev: true + + /@nodelib/fs.scandir@2.1.5: + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + dev: true + + /@nodelib/fs.stat@2.0.5: + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + dev: true + + /@nodelib/fs.walk@1.2.8: + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.15.0 + dev: true + + /@polka/url@1.0.0-next.23: + resolution: {integrity: sha512-C16M+IYz0rgRhWZdCmK+h58JMv8vijAA61gmz2rspCSwKwzBebpdcsiUmwrtJRdphuY30i6BSLEOP8ppbNLyLg==} + dev: true + + /@sveltejs/adapter-auto@2.0.0(@sveltejs/kit@1.27.4): + resolution: {integrity: sha512-b+gkHFZgD771kgV3aO4avHFd7y1zhmMYy9i6xOK7m/rwmwaRO8gnF5zBc0Rgca80B2PMU1bKNxyBTHA14OzUAQ==} + peerDependencies: + '@sveltejs/kit': ^1.0.0 + dependencies: + '@sveltejs/kit': 1.27.4(svelte@3.59.2)(vite@4.5.0) + import-meta-resolve: 2.2.2 + dev: true + + /@sveltejs/kit@1.27.4(svelte@3.59.2)(vite@4.5.0): + resolution: {integrity: sha512-Vxl8Jf0C1+/8i/slsxFOnwJntCBDLueO/O6GJ0390KUnyW3Zs+4ZiIinD+cEcYnJPQQ9CRzVSr9Bn6DbmTn4Dw==} + engines: {node: ^16.14 || >=18} + hasBin: true + requiresBuild: true + peerDependencies: + svelte: ^3.54.0 || ^4.0.0-next.0 || ^5.0.0-next.0 + vite: ^4.0.0 + dependencies: + '@sveltejs/vite-plugin-svelte': 2.5.2(svelte@3.59.2)(vite@4.5.0) + '@types/cookie': 0.5.4 + cookie: 0.5.0 + devalue: 4.3.2 + esm-env: 1.0.0 + kleur: 4.1.5 + magic-string: 0.30.5 + mrmime: 1.0.1 + sade: 1.8.1 + set-cookie-parser: 2.6.0 + sirv: 2.0.3 + svelte: 3.59.2 + tiny-glob: 0.2.9 + undici: 5.26.5 + vite: 4.5.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@sveltejs/package@2.0.0(svelte@3.59.2)(typescript@5.0.4): + resolution: {integrity: sha512-sANz/dJibOHOe83hl8pFWUSypqefdYwPp6SUr0SmJxTNQFB5dDECEqwAwoy28DWCQFYl7DU+C1hKkTXyuKOdug==} + engines: {node: ^16.14 || >=18} + hasBin: true + peerDependencies: + svelte: ^3.44.0 + dependencies: + chokidar: 3.5.3 + kleur: 4.1.5 + sade: 1.8.1 + svelte: 3.59.2 + svelte2tsx: 0.6.25(svelte@3.59.2)(typescript@5.0.4) + transitivePeerDependencies: + - typescript + dev: true + + /@sveltejs/vite-plugin-svelte-inspector@1.0.4(@sveltejs/vite-plugin-svelte@2.5.2)(svelte@3.59.2)(vite@4.5.0): + resolution: {integrity: sha512-zjiuZ3yydBtwpF3bj0kQNV0YXe+iKE545QGZVTaylW3eAzFr+pJ/cwK8lZEaRp4JtaJXhD5DyWAV4AxLh6DgaQ==} + engines: {node: ^14.18.0 || >= 16} + peerDependencies: + '@sveltejs/vite-plugin-svelte': ^2.2.0 + svelte: ^3.54.0 || ^4.0.0 + vite: ^4.0.0 + dependencies: + '@sveltejs/vite-plugin-svelte': 2.5.2(svelte@3.59.2)(vite@4.5.0) + debug: 4.3.4 + svelte: 3.59.2 + vite: 4.5.0 + transitivePeerDependencies: + - supports-color + dev: true + + /@sveltejs/vite-plugin-svelte@2.5.2(svelte@3.59.2)(vite@4.5.0): + resolution: {integrity: sha512-Dfy0Rbl+IctOVfJvWGxrX/3m6vxPLH8o0x+8FA5QEyMUQMo4kGOVIojjryU7YomBAexOTAuYf1RT7809yDziaA==} + engines: {node: ^14.18.0 || >= 16} + peerDependencies: + svelte: ^3.54.0 || ^4.0.0 || ^5.0.0-next.0 + vite: ^4.0.0 + dependencies: + '@sveltejs/vite-plugin-svelte-inspector': 1.0.4(@sveltejs/vite-plugin-svelte@2.5.2)(svelte@3.59.2)(vite@4.5.0) + debug: 4.3.4 + deepmerge: 4.3.1 + kleur: 4.1.5 + magic-string: 0.30.5 + svelte: 3.59.2 + svelte-hmr: 0.15.3(svelte@3.59.2) + vite: 4.5.0 + vitefu: 0.2.5(vite@4.5.0) + transitivePeerDependencies: + - supports-color + dev: true + + /@tailwindcss/forms@0.5.7(tailwindcss@3.3.5): + resolution: {integrity: sha512-QE7X69iQI+ZXwldE+rzasvbJiyV/ju1FGHH0Qn2W3FKbuYtqp8LKcy6iSw79fVUT5/Vvf+0XgLCeYVG+UV6hOw==} + peerDependencies: + tailwindcss: '>=3.0.0 || >= 3.0.0-alpha.1' + dependencies: + mini-svg-data-uri: 1.4.4 + tailwindcss: 3.3.5 + dev: true + + /@types/cookie@0.5.4: + resolution: {integrity: sha512-7z/eR6O859gyWIAjuvBWFzNURmf2oPBmJlfVWkwehU5nzIyjwBsTh7WMmEEV4JFnHuQ3ex4oyTvfKzcyJVDBNA==} + dev: true + + /@types/pug@2.0.9: + resolution: {integrity: sha512-Yg4LkgFYvn1faISbDNWmcAC1XoDT8IoMUFspp5mnagKk+UvD2N0IWt5A7GRdMubsNWqgCLmrkf8rXkzNqb4szA==} + dev: true + + /acorn-jsx@5.3.2(acorn@8.11.2): + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + dependencies: + acorn: 8.11.2 + dev: true + + /acorn@8.11.2: + resolution: {integrity: sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==} + engines: {node: '>=0.4.0'} + hasBin: true + dev: true + + /ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + dev: true + + /ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + dev: true + + /ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + dependencies: + color-convert: 2.0.1 + dev: true + + /any-promise@1.3.0: + resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==} + dev: true + + /anymatch@3.1.3: + resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} + engines: {node: '>= 8'} + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + dev: true + + /arg@5.0.2: + resolution: {integrity: sha512-PYjyFOLKQ9y57JvQ6QLo8dAgNqswh8M1RMJYdQduT6xbWSgK36P/Z/v+p888pM69jMMfS8Xd8F6I1kQ/I9HUGg==} + dev: true + + /argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + dev: true + + /autoprefixer@10.4.16(postcss@8.4.31): + resolution: {integrity: sha512-7vd3UC6xKp0HLfua5IjZlcXvGAGy7cBAXTg2lyQ/8WpNhd6SiZ8Be+xm3FyBSYJx5GKcpRCzBh7RH4/0dnY+uQ==} + engines: {node: ^10 || ^12 || >=14} + hasBin: true + peerDependencies: + postcss: ^8.1.0 + dependencies: + browserslist: 4.22.1 + caniuse-lite: 1.0.30001563 + fraction.js: 4.3.7 + normalize-range: 0.1.2 + picocolors: 1.0.0 + postcss: 8.4.31 + postcss-value-parser: 4.2.0 + dev: true + + /balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + dev: true + + /binary-extensions@2.2.0: + resolution: {integrity: sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==} + engines: {node: '>=8'} + dev: true + + /brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + dev: true + + /brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + dependencies: + balanced-match: 1.0.2 + dev: true + + /braces@3.0.2: + resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} + engines: {node: '>=8'} + dependencies: + fill-range: 7.0.1 + dev: true + + /browserslist@4.22.1: + resolution: {integrity: sha512-FEVc202+2iuClEhZhrWy6ZiAcRLvNMyYcxZ8raemul1DYVOVdFsbqckWLdsixQZCpJlwe77Z3UTalE7jsjnKfQ==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + dependencies: + caniuse-lite: 1.0.30001563 + electron-to-chromium: 1.4.588 + node-releases: 2.0.13 + update-browserslist-db: 1.0.13(browserslist@4.22.1) + dev: true + + /buffer-crc32@0.2.13: + resolution: {integrity: sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==} + dev: true + + /callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + dev: true + + /camelcase-css@2.0.1: + resolution: {integrity: sha512-QOSvevhslijgYwRx6Rv7zKdMF8lbRmx+uQGx2+vDc+KI/eBnsy9kit5aj23AgGu3pa4t9AgwbnXWqS+iOY+2aA==} + engines: {node: '>= 6'} + dev: true + + /caniuse-lite@1.0.30001563: + resolution: {integrity: sha512-na2WUmOxnwIZtwnFI2CZ/3er0wdNzU7hN+cPYz/z2ajHThnkWjNBOpEPP4n+4r2WPM847JaMotaJE3bnfzjyKw==} + dev: true + + /chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + dev: true + + /chokidar@3.5.3: + resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==} + engines: {node: '>= 8.10.0'} + dependencies: + anymatch: 3.1.3 + braces: 3.0.2 + glob-parent: 5.1.2 + is-binary-path: 2.1.0 + is-glob: 4.0.3 + normalize-path: 3.0.0 + readdirp: 3.6.0 + optionalDependencies: + fsevents: 2.3.3 + dev: true + + /color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + dependencies: + color-name: 1.1.4 + dev: true + + /color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + dev: true + + /commander@4.1.1: + resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} + engines: {node: '>= 6'} + dev: true + + /concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + dev: true + + /cookie@0.5.0: + resolution: {integrity: sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==} + engines: {node: '>= 0.6'} + dev: true + + /cross-spawn@7.0.3: + resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} + engines: {node: '>= 8'} + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + dev: true + + /cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + dev: true + + /debug@4.3.4: + resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.1.2 + dev: true + + /dedent-js@1.0.1: + resolution: {integrity: sha512-OUepMozQULMLUmhxS95Vudo0jb0UchLimi3+pQ2plj61Fcy8axbP9hbiD4Sz6DPqn6XG3kfmziVfQ1rSys5AJQ==} + dev: true + + /deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + dev: true + + /deepmerge@4.3.1: + resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} + engines: {node: '>=0.10.0'} + dev: true + + /detect-indent@6.1.0: + resolution: {integrity: sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==} + engines: {node: '>=8'} + dev: true + + /devalue@4.3.2: + resolution: {integrity: sha512-KqFl6pOgOW+Y6wJgu80rHpo2/3H07vr8ntR9rkkFIRETewbf5GaYYcakYfiKz89K+sLsuPkQIZaXDMjUObZwWg==} + dev: true + + /didyoumean@1.2.2: + resolution: {integrity: sha512-gxtyfqMg7GKyhQmb056K7M3xszy/myH8w+B4RT+QXBQsvAOdc3XymqDDPHx1BgPgsdAA5SIifona89YtRATDzw==} + dev: true + + /dlv@1.1.3: + resolution: {integrity: sha512-+HlytyjlPKnIG8XuRG8WvmBP8xs8P71y+SKKS6ZXWoEgLuePxtDoUEiH7WkdePWrQ5JBpE6aoVqfZfJUQkjXwA==} + dev: true + + /doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + dependencies: + esutils: 2.0.3 + dev: true + + /electron-to-chromium@1.4.588: + resolution: {integrity: sha512-soytjxwbgcCu7nh5Pf4S2/4wa6UIu+A3p03U2yVr53qGxi1/VTR3ENI+p50v+UxqqZAfl48j3z55ud7VHIOr9w==} + dev: true + + /es6-promise@3.3.1: + resolution: {integrity: sha512-SOp9Phqvqn7jtEUxPWdWfWoLmyt2VaJ6MpvP9Comy1MceMXqE6bxvaTu4iaxpYYPzhny28Lc+M87/c2cPK6lDg==} + dev: true + + /esbuild@0.18.20: + resolution: {integrity: sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA==} + engines: {node: '>=12'} + hasBin: true + requiresBuild: true + optionalDependencies: + '@esbuild/android-arm': 0.18.20 + '@esbuild/android-arm64': 0.18.20 + '@esbuild/android-x64': 0.18.20 + '@esbuild/darwin-arm64': 0.18.20 + '@esbuild/darwin-x64': 0.18.20 + '@esbuild/freebsd-arm64': 0.18.20 + '@esbuild/freebsd-x64': 0.18.20 + '@esbuild/linux-arm': 0.18.20 + '@esbuild/linux-arm64': 0.18.20 + '@esbuild/linux-ia32': 0.18.20 + '@esbuild/linux-loong64': 0.18.20 + '@esbuild/linux-mips64el': 0.18.20 + '@esbuild/linux-ppc64': 0.18.20 + '@esbuild/linux-riscv64': 0.18.20 + '@esbuild/linux-s390x': 0.18.20 + '@esbuild/linux-x64': 0.18.20 + '@esbuild/netbsd-x64': 0.18.20 + '@esbuild/openbsd-x64': 0.18.20 + '@esbuild/sunos-x64': 0.18.20 + '@esbuild/win32-arm64': 0.18.20 + '@esbuild/win32-ia32': 0.18.20 + '@esbuild/win32-x64': 0.18.20 + dev: true + + /escalade@3.1.1: + resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==} + engines: {node: '>=6'} + dev: true + + /escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + dev: true + + /eslint-scope@7.2.2: + resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + dev: true + + /eslint-utils@3.0.0(eslint@8.28.0): + resolution: {integrity: sha512-uuQC43IGctw68pJA1RgbQS8/NP7rch6Cwd4j3ZBtgo4/8Flj4eGE7ZYSZRN3iq5pVUv6GPdW5Z1RFleo84uLDA==} + engines: {node: ^10.0.0 || ^12.0.0 || >= 14.0.0} + peerDependencies: + eslint: '>=5' + dependencies: + eslint: 8.28.0 + eslint-visitor-keys: 2.1.0 + dev: true + + /eslint-visitor-keys@2.1.0: + resolution: {integrity: sha512-0rSmRBzXgDzIsD6mGdJgevzgezI534Cer5L/vyMX0kHzT/jiB43jRhd9YUlMGYLQy2zprNmoT8qasCGtY+QaKw==} + engines: {node: '>=10'} + dev: true + + /eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: true + + /eslint@8.28.0: + resolution: {integrity: sha512-S27Di+EVyMxcHiwDrFzk8dJYAaD+/5SoWKxL1ri/71CRHsnJnRDPNt2Kzj24+MT9FDupf4aqqyqPrvI8MvQ4VQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + hasBin: true + dependencies: + '@eslint/eslintrc': 1.4.1 + '@humanwhocodes/config-array': 0.11.13 + '@humanwhocodes/module-importer': 1.0.1 + '@nodelib/fs.walk': 1.2.8 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.3 + debug: 4.3.4 + doctrine: 3.0.0 + escape-string-regexp: 4.0.0 + eslint-scope: 7.2.2 + eslint-utils: 3.0.0(eslint@8.28.0) + eslint-visitor-keys: 3.4.3 + espree: 9.6.1 + esquery: 1.5.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + find-up: 5.0.0 + glob-parent: 6.0.2 + globals: 13.23.0 + grapheme-splitter: 1.0.4 + ignore: 5.3.0 + import-fresh: 3.3.0 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + is-path-inside: 3.0.3 + js-sdsl: 4.4.2 + js-yaml: 4.1.0 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.3 + regexpp: 3.2.0 + strip-ansi: 6.0.1 + strip-json-comments: 3.1.1 + text-table: 0.2.0 + transitivePeerDependencies: + - supports-color + dev: true + + /esm-env@1.0.0: + resolution: {integrity: sha512-Cf6VksWPsTuW01vU9Mk/3vRue91Zevka5SjyNf3nEpokFRuqt/KjUQoGAwq9qMmhpLTHmXzSIrFRw8zxWzmFBA==} + dev: true + + /espree@9.6.1: + resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + acorn: 8.11.2 + acorn-jsx: 5.3.2(acorn@8.11.2) + eslint-visitor-keys: 3.4.3 + dev: true + + /esquery@1.5.0: + resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==} + engines: {node: '>=0.10'} + dependencies: + estraverse: 5.3.0 + dev: true + + /esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + dependencies: + estraverse: 5.3.0 + dev: true + + /estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + dev: true + + /esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + dev: true + + /fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + dev: true + + /fast-glob@3.3.2: + resolution: {integrity: sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==} + engines: {node: '>=8.6.0'} + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.5 + dev: true + + /fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + dev: true + + /fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + dev: true + + /fastq@1.15.0: + resolution: {integrity: sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==} + dependencies: + reusify: 1.0.4 + dev: true + + /file-entry-cache@6.0.1: + resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} + engines: {node: ^10.12.0 || >=12.0.0} + dependencies: + flat-cache: 3.2.0 + dev: true + + /fill-range@7.0.1: + resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} + engines: {node: '>=8'} + dependencies: + to-regex-range: 5.0.1 + dev: true + + /find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + dev: true + + /flat-cache@3.2.0: + resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==} + engines: {node: ^10.12.0 || >=12.0.0} + dependencies: + flatted: 3.2.9 + keyv: 4.5.4 + rimraf: 3.0.2 + dev: true + + /flatted@3.2.9: + resolution: {integrity: sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==} + dev: true + + /fraction.js@4.3.7: + resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==} + dev: true + + /fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + dev: true + + /fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + dev: true + + /glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + dependencies: + is-glob: 4.0.3 + dev: true + + /glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + dependencies: + is-glob: 4.0.3 + dev: true + + /glob@7.1.6: + resolution: {integrity: sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + dev: true + + /glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + dev: true + + /glob@8.1.0: + resolution: {integrity: sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==} + engines: {node: '>=12'} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 5.1.6 + once: 1.4.0 + dev: true + + /globals@13.23.0: + resolution: {integrity: sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==} + engines: {node: '>=8'} + dependencies: + type-fest: 0.20.2 + dev: true + + /globalyzer@0.1.0: + resolution: {integrity: sha512-40oNTM9UfG6aBmuKxk/giHn5nQ8RVz/SS4Ir6zgzOv9/qC3kKZ9v4etGTcJbEl/NyVQH7FGU7d+X1egr57Md2Q==} + dev: true + + /globrex@0.1.2: + resolution: {integrity: sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg==} + dev: true + + /graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + dev: true + + /grapheme-splitter@1.0.4: + resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==} + dev: true + + /has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + dev: true + + /hasown@2.0.0: + resolution: {integrity: sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==} + engines: {node: '>= 0.4'} + dependencies: + function-bind: 1.1.2 + dev: true + + /ignore-walk@5.0.1: + resolution: {integrity: sha512-yemi4pMf51WKT7khInJqAvsIGzoqYXblnsz0ql8tM+yi1EKYTY1evX4NAbJrLL/Aanr2HyZeluqU+Oi7MGHokw==} + engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} + dependencies: + minimatch: 5.1.6 + dev: true + + /ignore@5.3.0: + resolution: {integrity: sha512-g7dmpshy+gD7mh88OC9NwSGTKoc3kyLAZQRU1mt53Aw/vnvfXnbC+F/7F7QoYVKbV+KNvJx8wArewKy1vXMtlg==} + engines: {node: '>= 4'} + dev: true + + /import-fresh@3.3.0: + resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} + engines: {node: '>=6'} + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + dev: true + + /import-meta-resolve@2.2.2: + resolution: {integrity: sha512-f8KcQ1D80V7RnqVm+/lirO9zkOxjGxhaTC1IPrBGd3MEfNgmNG67tSUO9gTi2F3Blr2Az6g1vocaxzkVnWl9MA==} + dev: true + + /imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + dev: true + + /inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + dev: true + + /inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + dev: true + + /is-binary-path@2.1.0: + resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} + engines: {node: '>=8'} + dependencies: + binary-extensions: 2.2.0 + dev: true + + /is-core-module@2.13.1: + resolution: {integrity: sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==} + dependencies: + hasown: 2.0.0 + dev: true + + /is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + dev: true + + /is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + dependencies: + is-extglob: 2.1.1 + dev: true + + /is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + dev: true + + /is-path-inside@3.0.3: + resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} + engines: {node: '>=8'} + dev: true + + /isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + dev: true + + /jiti@1.21.0: + resolution: {integrity: sha512-gFqAIbuKyyso/3G2qhiO2OM6shY6EPP/R0+mkDbyspxKazh8BXDC5FiFsUjlczgdNz/vfra0da2y+aHrusLG/Q==} + hasBin: true + dev: true + + /js-sdsl@4.4.2: + resolution: {integrity: sha512-dwXFwByc/ajSV6m5bcKAPwe4yDDF6D614pxmIi5odytzxRlwqF6nwoiCek80Ixc7Cvma5awClxrzFtxCQvcM8w==} + dev: true + + /js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + dependencies: + argparse: 2.0.1 + dev: true + + /json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + dev: true + + /json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + dev: true + + /json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + dev: true + + /keyv@4.5.4: + resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + dependencies: + json-buffer: 3.0.1 + dev: true + + /kleur@4.1.5: + resolution: {integrity: sha512-o+NO+8WrRiQEE4/7nwRJhN1HWpVmJm511pBHUxPLtp0BUISzlBplORYSmTclCnJvQq2tKu/sgl3xVpkc7ZWuQQ==} + engines: {node: '>=6'} + dev: true + + /levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + dev: true + + /lilconfig@2.1.0: + resolution: {integrity: sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==} + engines: {node: '>=10'} + dev: true + + /lilconfig@3.0.0: + resolution: {integrity: sha512-K2U4W2Ff5ibV7j7ydLr+zLAkIg5JJ4lPn1Ltsdt+Tz/IjQ8buJ55pZAxoP34lqIiwtF9iAvtLv3JGv7CAyAg+g==} + engines: {node: '>=14'} + dev: true + + /lines-and-columns@1.2.4: + resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + dev: true + + /locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + dependencies: + p-locate: 5.0.0 + dev: true + + /lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + dev: true + + /lower-case@2.0.2: + resolution: {integrity: sha512-7fm3l3NAF9WfN6W3JOmf5drwpVqX78JtoGJ3A6W0a6ZnldM41w2fV5D490psKFTpMds8TJse/eHLFFsNHHjHgg==} + dependencies: + tslib: 2.4.1 + dev: true + + /magic-string@0.27.0: + resolution: {integrity: sha512-8UnnX2PeRAPZuN12svgR9j7M1uWMovg/CEnIwIG0LFkXSJJe4PdfUGiTGl8V9bsBHFUtfVINcSyYxd7q+kx9fA==} + engines: {node: '>=12'} + dependencies: + '@jridgewell/sourcemap-codec': 1.4.15 + dev: true + + /magic-string@0.30.5: + resolution: {integrity: sha512-7xlpfBaQaP/T6Vh8MO/EqXSW5En6INHEvEXQiuff7Gku0PWjU3uf6w/j9o7O+SpB5fOAkrI5HeoNgwjEO0pFsA==} + engines: {node: '>=12'} + dependencies: + '@jridgewell/sourcemap-codec': 1.4.15 + dev: true + + /merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + dev: true + + /micromatch@4.0.5: + resolution: {integrity: sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==} + engines: {node: '>=8.6'} + dependencies: + braces: 3.0.2 + picomatch: 2.3.1 + dev: true + + /min-indent@1.0.1: + resolution: {integrity: sha512-I9jwMn07Sy/IwOj3zVkVik2JTvgpaykDZEigL6Rx6N9LbMywwUSMtxET+7lVoDLLd3O3IXwJwvuuns8UB/HeAg==} + engines: {node: '>=4'} + dev: true + + /mini-svg-data-uri@1.4.4: + resolution: {integrity: sha512-r9deDe9p5FJUPZAk3A59wGH7Ii9YrjjWw0jmw/liSbHl2CHiyXj6FcDXDu2K3TjVAXqiJdaw3xxwlZZr9E6nHg==} + hasBin: true + dev: true + + /minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + dependencies: + brace-expansion: 1.1.11 + dev: true + + /minimatch@5.1.6: + resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==} + engines: {node: '>=10'} + dependencies: + brace-expansion: 2.0.1 + dev: true + + /minimist@1.2.8: + resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} + dev: true + + /mkdirp@0.5.6: + resolution: {integrity: sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==} + hasBin: true + dependencies: + minimist: 1.2.8 + dev: true + + /mri@1.2.0: + resolution: {integrity: sha512-tzzskb3bG8LvYGFF/mDTpq3jpI6Q9wc3LEmBaghu+DdCssd1FakN7Bc0hVNmEyGq1bq3RgfkCb3cmQLpNPOroA==} + engines: {node: '>=4'} + dev: true + + /mrmime@1.0.1: + resolution: {integrity: sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw==} + engines: {node: '>=10'} + dev: true + + /ms@2.1.2: + resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} + dev: true + + /mz@2.7.0: + resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==} + dependencies: + any-promise: 1.3.0 + object-assign: 4.1.1 + thenify-all: 1.6.0 + dev: true + + /nanoid@3.3.7: + resolution: {integrity: sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + dev: true + + /natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + dev: true + + /no-case@3.0.4: + resolution: {integrity: sha512-fgAN3jGAh+RoxUGZHTSOLJIqUc2wmoBwGR4tbpNAKmmovFoWq0OdRkb0VkldReO2a2iBT/OEulG9XSUc10r3zg==} + dependencies: + lower-case: 2.0.2 + tslib: 2.4.1 + dev: true + + /node-releases@2.0.13: + resolution: {integrity: sha512-uYr7J37ae/ORWdZeQ1xxMJe3NtdmqMC/JZK+geofDrkLUApKRHPd18/TxtBOJ4A0/+uUIliorNrfYV6s1b02eQ==} + dev: true + + /normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + dev: true + + /normalize-range@0.1.2: + resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==} + engines: {node: '>=0.10.0'} + dev: true + + /npm-bundled@2.0.1: + resolution: {integrity: sha512-gZLxXdjEzE/+mOstGDqR6b0EkhJ+kM6fxM6vUuckuctuVPh80Q6pw/rSZj9s4Gex9GxWtIicO1pc8DB9KZWudw==} + engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} + dependencies: + npm-normalize-package-bin: 2.0.0 + dev: true + + /npm-normalize-package-bin@2.0.0: + resolution: {integrity: sha512-awzfKUO7v0FscrSpRoogyNm0sajikhBWpU0QMrW09AMi9n1PoKU6WaIqUzuJSQnpciZZmJ/jMZ2Egfmb/9LiWQ==} + engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} + dev: true + + /npm-packlist@5.1.3: + resolution: {integrity: sha512-263/0NGrn32YFYi4J533qzrQ/krmmrWwhKkzwTuM4f/07ug51odoaNjUexxO4vxlzURHcmYMH1QjvHjsNDKLVg==} + engines: {node: ^12.13.0 || ^14.15.0 || >=16.0.0} + hasBin: true + dependencies: + glob: 8.1.0 + ignore-walk: 5.0.1 + npm-bundled: 2.0.1 + npm-normalize-package-bin: 2.0.0 + dev: true + + /object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + dev: true + + /object-hash@3.0.0: + resolution: {integrity: sha512-RSn9F68PjH9HqtltsSnqYC1XXoWe9Bju5+213R98cNGttag9q9yAOTzdbsqvIa7aNm5WffBZFpWYr2aWrklWAw==} + engines: {node: '>= 6'} + dev: true + + /once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + dependencies: + wrappy: 1.0.2 + dev: true + + /optionator@0.9.3: + resolution: {integrity: sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==} + engines: {node: '>= 0.8.0'} + dependencies: + '@aashutoshrathi/word-wrap': 1.2.6 + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + dev: true + + /p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + dependencies: + yocto-queue: 0.1.0 + dev: true + + /p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + dependencies: + p-limit: 3.1.0 + dev: true + + /parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + dependencies: + callsites: 3.1.0 + dev: true + + /pascal-case@3.1.2: + resolution: {integrity: sha512-uWlGT3YSnK9x3BQJaOdcZwrnV6hPpd8jFH1/ucpiLRPh/2zCVJKS19E4GvYHvaCcACn3foXZ0cLB9Wrx1KGe5g==} + dependencies: + no-case: 3.0.4 + tslib: 2.4.1 + dev: true + + /path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + dev: true + + /path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + dev: true + + /path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + dev: true + + /path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + dev: true + + /picocolors@1.0.0: + resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==} + dev: true + + /picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + dev: true + + /pify@2.3.0: + resolution: {integrity: sha512-udgsAY+fTnvv7kI7aaxbqwWNb0AHiB0qBO89PZKPkoTmGOgdbrHDKD+0B2X4uTfJ/FT1R09r9gTsjUjNJotuog==} + engines: {node: '>=0.10.0'} + dev: true + + /pirates@4.0.6: + resolution: {integrity: sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==} + engines: {node: '>= 6'} + dev: true + + /postcss-import@15.1.0(postcss@8.4.31): + resolution: {integrity: sha512-hpr+J05B2FVYUAXHeK1YyI267J/dDDhMU6B6civm8hSY1jYJnBXxzKDKDswzJmtLHryrjhnDjqqp/49t8FALew==} + engines: {node: '>=14.0.0'} + peerDependencies: + postcss: ^8.0.0 + dependencies: + postcss: 8.4.31 + postcss-value-parser: 4.2.0 + read-cache: 1.0.0 + resolve: 1.22.8 + dev: true + + /postcss-js@4.0.1(postcss@8.4.31): + resolution: {integrity: sha512-dDLF8pEO191hJMtlHFPRa8xsizHaM82MLfNkUHdUtVEV3tgTp5oj+8qbEqYM57SLfc74KSbw//4SeJma2LRVIw==} + engines: {node: ^12 || ^14 || >= 16} + peerDependencies: + postcss: ^8.4.21 + dependencies: + camelcase-css: 2.0.1 + postcss: 8.4.31 + dev: true + + /postcss-load-config@4.0.2(postcss@8.4.31): + resolution: {integrity: sha512-bSVhyJGL00wMVoPUzAVAnbEoWyqRxkjv64tUl427SKnPrENtq6hJwUojroMz2VB+Q1edmi4IfrAPpami5VVgMQ==} + engines: {node: '>= 14'} + peerDependencies: + postcss: '>=8.0.9' + ts-node: '>=9.0.0' + peerDependenciesMeta: + postcss: + optional: true + ts-node: + optional: true + dependencies: + lilconfig: 3.0.0 + postcss: 8.4.31 + yaml: 2.3.4 + dev: true + + /postcss-nested@6.0.1(postcss@8.4.31): + resolution: {integrity: sha512-mEp4xPMi5bSWiMbsgoPfcP74lsWLHkQbZc3sY+jWYd65CUwXrUaTp0fmNpa01ZcETKlIgUdFN/MpS2xZtqL9dQ==} + engines: {node: '>=12.0'} + peerDependencies: + postcss: ^8.2.14 + dependencies: + postcss: 8.4.31 + postcss-selector-parser: 6.0.13 + dev: true + + /postcss-selector-parser@6.0.13: + resolution: {integrity: sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==} + engines: {node: '>=4'} + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + dev: true + + /postcss-value-parser@4.2.0: + resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} + dev: true + + /postcss@8.4.31: + resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==} + engines: {node: ^10 || ^12 || >=14} + dependencies: + nanoid: 3.3.7 + picocolors: 1.0.0 + source-map-js: 1.0.2 + dev: true + + /prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + dev: true + + /publint@0.1.9: + resolution: {integrity: sha512-O53y7vbePxuGFmEjgcrafMSlDpOJwOkj8YdexOt7yWlv7SB3rXoT3mHknyMJ3lf2UFH5Bmt6tnIkHcOTR6dEoA==} + engines: {node: '>=16'} + hasBin: true + dependencies: + npm-packlist: 5.1.3 + picocolors: 1.0.0 + sade: 1.8.1 + dev: true + + /punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + dev: true + + /queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + dev: true + + /read-cache@1.0.0: + resolution: {integrity: sha512-Owdv/Ft7IjOgm/i0xvNDZ1LrRANRfew4b2prF3OWMQLxLfu3bS8FVhCsrSCMK4lR56Y9ya+AThoTpDCTxCmpRA==} + dependencies: + pify: 2.3.0 + dev: true + + /readdirp@3.6.0: + resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} + engines: {node: '>=8.10.0'} + dependencies: + picomatch: 2.3.1 + dev: true + + /regexpp@3.2.0: + resolution: {integrity: sha512-pq2bWo9mVD43nbts2wGv17XLiNLya+GklZ8kaDLV2Z08gDCsGpnKn9BFMepvWuHCbyVvY7J5o5+BVvoQbmlJLg==} + engines: {node: '>=8'} + dev: true + + /resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + dev: true + + /resolve@1.22.8: + resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} + hasBin: true + dependencies: + is-core-module: 2.13.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + dev: true + + /reusify@1.0.4: + resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + dev: true + + /rimraf@2.7.1: + resolution: {integrity: sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==} + hasBin: true + dependencies: + glob: 7.2.3 + dev: true + + /rimraf@3.0.2: + resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + hasBin: true + dependencies: + glob: 7.2.3 + dev: true + + /rollup@3.29.4: + resolution: {integrity: sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==} + engines: {node: '>=14.18.0', npm: '>=8.0.0'} + hasBin: true + optionalDependencies: + fsevents: 2.3.3 + dev: true + + /run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + dependencies: + queue-microtask: 1.2.3 + dev: true + + /sade@1.8.1: + resolution: {integrity: sha512-xal3CZX1Xlo/k4ApwCFrHVACi9fBqJ7V+mwhBsuf/1IOKbBy098Fex+Wa/5QMubw09pSZ/u8EY8PWgevJsXp1A==} + engines: {node: '>=6'} + dependencies: + mri: 1.2.0 + dev: true + + /sander@0.5.1: + resolution: {integrity: sha512-3lVqBir7WuKDHGrKRDn/1Ye3kwpXaDOMsiRP1wd6wpZW56gJhsbp5RqQpA6JG/P+pkXizygnr1dKR8vzWaVsfA==} + dependencies: + es6-promise: 3.3.1 + graceful-fs: 4.2.11 + mkdirp: 0.5.6 + rimraf: 2.7.1 + dev: true + + /set-cookie-parser@2.6.0: + resolution: {integrity: sha512-RVnVQxTXuerk653XfuliOxBP81Sf0+qfQE73LIYKcyMYHG94AuH0kgrQpRDuTZnSmjpysHmzxJXKNfa6PjFhyQ==} + dev: true + + /shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + dependencies: + shebang-regex: 3.0.0 + dev: true + + /shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + dev: true + + /sirv@2.0.3: + resolution: {integrity: sha512-O9jm9BsID1P+0HOi81VpXPoDxYP374pkOLzACAoyUQ/3OUVndNpsz6wMnY2z+yOxzbllCKZrM+9QrWsv4THnyA==} + engines: {node: '>= 10'} + dependencies: + '@polka/url': 1.0.0-next.23 + mrmime: 1.0.1 + totalist: 3.0.1 + dev: true + + /sorcery@0.11.0: + resolution: {integrity: sha512-J69LQ22xrQB1cIFJhPfgtLuI6BpWRiWu1Y3vSsIwK/eAScqJxd/+CJlUuHQRdX2C9NGFamq+KqNywGgaThwfHw==} + hasBin: true + dependencies: + '@jridgewell/sourcemap-codec': 1.4.15 + buffer-crc32: 0.2.13 + minimist: 1.2.8 + sander: 0.5.1 + dev: true + + /source-map-js@1.0.2: + resolution: {integrity: sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==} + engines: {node: '>=0.10.0'} + dev: true + + /strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + dependencies: + ansi-regex: 5.0.1 + dev: true + + /strip-indent@3.0.0: + resolution: {integrity: sha512-laJTa3Jb+VQpaC6DseHhF7dXVqHTfJPCRDaEbid/drOhgitgYku/letMUqOXFoWV0zIIUbjpdH2t+tYj4bQMRQ==} + engines: {node: '>=8'} + dependencies: + min-indent: 1.0.1 + dev: true + + /strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + dev: true + + /sucrase@3.34.0: + resolution: {integrity: sha512-70/LQEZ07TEcxiU2dz51FKaE6hCTWC6vr7FOk3Gr0U60C3shtAN+H+BFr9XlYe5xqf3RA8nrc+VIwzCfnxuXJw==} + engines: {node: '>=8'} + hasBin: true + dependencies: + '@jridgewell/gen-mapping': 0.3.3 + commander: 4.1.1 + glob: 7.1.6 + lines-and-columns: 1.2.4 + mz: 2.7.0 + pirates: 4.0.6 + ts-interface-checker: 0.1.13 + dev: true + + /supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + dependencies: + has-flag: 4.0.0 + dev: true + + /supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + dev: true + + /svelte-check@3.6.0(postcss@8.4.31)(svelte@3.59.2): + resolution: {integrity: sha512-8VfqhfuRJ1sKW+o8isH2kPi0RhjXH1nNsIbCFGyoUHG+ZxVxHYRKcb+S8eaL/1tyj3VGvWYx3Y5+oCUsJgnzcw==} + hasBin: true + peerDependencies: + svelte: ^3.55.0 || ^4.0.0-next.0 || ^4.0.0 || ^5.0.0-next.0 + dependencies: + '@jridgewell/trace-mapping': 0.3.20 + chokidar: 3.5.3 + fast-glob: 3.3.2 + import-fresh: 3.3.0 + picocolors: 1.0.0 + sade: 1.8.1 + svelte: 3.59.2 + svelte-preprocess: 5.1.0(postcss@8.4.31)(svelte@3.59.2)(typescript@5.0.4) + typescript: 5.0.4 + transitivePeerDependencies: + - '@babel/core' + - coffeescript + - less + - postcss + - postcss-load-config + - pug + - sass + - stylus + - sugarss + dev: true + + /svelte-hmr@0.15.3(svelte@3.59.2): + resolution: {integrity: sha512-41snaPswvSf8TJUhlkoJBekRrABDXDMdpNpT2tfHIv4JuhgvHqLMhEPGtaQn0BmbNSTkuz2Ed20DF2eHw0SmBQ==} + engines: {node: ^12.20 || ^14.13.1 || >= 16} + peerDependencies: + svelte: ^3.19.0 || ^4.0.0 + dependencies: + svelte: 3.59.2 + dev: true + + /svelte-preprocess@5.1.0(postcss@8.4.31)(svelte@3.59.2)(typescript@5.0.4): + resolution: {integrity: sha512-EkErPiDzHAc0k2MF5m6vBNmRUh338h2myhinUw/xaqsLs7/ZvsgREiLGj03VrSzbY/TB5ZXgBOsKraFee5yceA==} + engines: {node: '>= 14.10.0'} + requiresBuild: true + peerDependencies: + '@babel/core': ^7.10.2 + coffeescript: ^2.5.1 + less: ^3.11.3 || ^4.0.0 + postcss: ^7 || ^8 + postcss-load-config: ^2.1.0 || ^3.0.0 || ^4.0.0 + pug: ^3.0.0 + sass: ^1.26.8 + stylus: ^0.55.0 + sugarss: ^2.0.0 || ^3.0.0 || ^4.0.0 + svelte: ^3.23.0 || ^4.0.0-next.0 || ^4.0.0 || ^5.0.0-next.0 + typescript: '>=3.9.5 || ^4.0.0 || ^5.0.0' + peerDependenciesMeta: + '@babel/core': + optional: true + coffeescript: + optional: true + less: + optional: true + postcss: + optional: true + postcss-load-config: + optional: true + pug: + optional: true + sass: + optional: true + stylus: + optional: true + sugarss: + optional: true + typescript: + optional: true + dependencies: + '@types/pug': 2.0.9 + detect-indent: 6.1.0 + magic-string: 0.27.0 + postcss: 8.4.31 + sorcery: 0.11.0 + strip-indent: 3.0.0 + svelte: 3.59.2 + typescript: 5.0.4 + dev: true + + /svelte2tsx@0.6.25(svelte@3.59.2)(typescript@5.0.4): + resolution: {integrity: sha512-hhBKL5X9gGvKQAZ9xLoHnbE9Yb00HxEZJlxcj2drxWK+Tpqcs/bnodjSfCGbqEhvNaUXYNbVL7s4dEXT+o0f6w==} + peerDependencies: + svelte: ^3.55 || ^4.0.0-next.0 || ^4.0 || ^5.0.0-next.0 + typescript: ^4.9.4 || ^5.0.0 + dependencies: + dedent-js: 1.0.1 + pascal-case: 3.1.2 + svelte: 3.59.2 + typescript: 5.0.4 + dev: true + + /svelte@3.59.2: + resolution: {integrity: sha512-vzSyuGr3eEoAtT/A6bmajosJZIUWySzY2CzB3w2pgPvnkUjGqlDnsNnA0PMO+mMAhuyMul6C2uuZzY6ELSkzyA==} + engines: {node: '>= 8'} + dev: true + + /tailwindcss@3.3.5: + resolution: {integrity: sha512-5SEZU4J7pxZgSkv7FP1zY8i2TIAOooNZ1e/OGtxIEv6GltpoiXUqWvLy89+a10qYTB1N5Ifkuw9lqQkN9sscvA==} + engines: {node: '>=14.0.0'} + hasBin: true + dependencies: + '@alloc/quick-lru': 5.2.0 + arg: 5.0.2 + chokidar: 3.5.3 + didyoumean: 1.2.2 + dlv: 1.1.3 + fast-glob: 3.3.2 + glob-parent: 6.0.2 + is-glob: 4.0.3 + jiti: 1.21.0 + lilconfig: 2.1.0 + micromatch: 4.0.5 + normalize-path: 3.0.0 + object-hash: 3.0.0 + picocolors: 1.0.0 + postcss: 8.4.31 + postcss-import: 15.1.0(postcss@8.4.31) + postcss-js: 4.0.1(postcss@8.4.31) + postcss-load-config: 4.0.2(postcss@8.4.31) + postcss-nested: 6.0.1(postcss@8.4.31) + postcss-selector-parser: 6.0.13 + resolve: 1.22.8 + sucrase: 3.34.0 + transitivePeerDependencies: + - ts-node + dev: true + + /text-table@0.2.0: + resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} + dev: true + + /thenify-all@1.6.0: + resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==} + engines: {node: '>=0.8'} + dependencies: + thenify: 3.3.1 + dev: true + + /thenify@3.3.1: + resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==} + dependencies: + any-promise: 1.3.0 + dev: true + + /tiny-glob@0.2.9: + resolution: {integrity: sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg==} + dependencies: + globalyzer: 0.1.0 + globrex: 0.1.2 + dev: true + + /to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + dependencies: + is-number: 7.0.0 + dev: true + + /totalist@3.0.1: + resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==} + engines: {node: '>=6'} + dev: true + + /ts-interface-checker@0.1.13: + resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} + dev: true + + /tslib@2.4.1: + resolution: {integrity: sha512-tGyy4dAjRIEwI7BzsB0lynWgOpfqjUdq91XXAlIWD2OwKBH7oCl/GZG/HT4BOHrTlPMOASlMQ7veyTqpmRcrNA==} + dev: true + + /type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + dependencies: + prelude-ls: 1.2.1 + dev: true + + /type-fest@0.20.2: + resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} + engines: {node: '>=10'} + dev: true + + /typescript@5.0.4: + resolution: {integrity: sha512-cW9T5W9xY37cc+jfEnaUvX91foxtHkza3Nw3wkoF4sSlKn0MONdkdEndig/qPBWXNkmplh3NzayQzCiHM4/hqw==} + engines: {node: '>=12.20'} + hasBin: true + dev: true + + /undici@5.26.5: + resolution: {integrity: sha512-cSb4bPFd5qgR7qr2jYAi0hlX9n5YKK2ONKkLFkxl+v/9BvC0sOpZjBHDBSXc5lWAf5ty9oZdRXytBIHzgUcerw==} + engines: {node: '>=14.0'} + dependencies: + '@fastify/busboy': 2.1.0 + dev: true + + /update-browserslist-db@1.0.13(browserslist@4.22.1): + resolution: {integrity: sha512-xebP81SNcPuNpPP3uzeW1NYXxI3rxyJzF3pD6sH4jE7o/IX+WtSpwnVU+qIsDPyk0d3hmFQ7mjqc6AtV604hbg==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + dependencies: + browserslist: 4.22.1 + escalade: 3.1.1 + picocolors: 1.0.0 + dev: true + + /uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + dependencies: + punycode: 2.3.1 + dev: true + + /util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + dev: true + + /vite@4.5.0: + resolution: {integrity: sha512-ulr8rNLA6rkyFAlVWw2q5YJ91v098AFQ2R0PRFwPzREXOUJQPtFUG0t+/ZikhaOCDqFoDhN6/v8Sq0o4araFAw==} + engines: {node: ^14.18.0 || >=16.0.0} + hasBin: true + peerDependencies: + '@types/node': '>= 14' + less: '*' + lightningcss: ^1.21.0 + sass: '*' + stylus: '*' + sugarss: '*' + terser: ^5.4.0 + peerDependenciesMeta: + '@types/node': + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + dependencies: + esbuild: 0.18.20 + postcss: 8.4.31 + rollup: 3.29.4 + optionalDependencies: + fsevents: 2.3.3 + dev: true + + /vitefu@0.2.5(vite@4.5.0): + resolution: {integrity: sha512-SgHtMLoqaeeGnd2evZ849ZbACbnwQCIwRH57t18FxcXoZop0uQu0uzlIhJBlF/eWVzuce0sHeqPcDo+evVcg8Q==} + peerDependencies: + vite: ^3.0.0 || ^4.0.0 || ^5.0.0 + peerDependenciesMeta: + vite: + optional: true + dependencies: + vite: 4.5.0 + dev: true + + /which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + dependencies: + isexe: 2.0.0 + dev: true + + /wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + dev: true + + /yaml@2.3.4: + resolution: {integrity: sha512-8aAvwVUSHpfEqTQ4w/KMlf3HcRdt50E5ODIQJBw1fQ5RL34xabzxtUlzTXVqc4rkZsPbvrXKWnABCD7kWSmocA==} + engines: {node: '>= 14'} + dev: true + + /yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + dev: true diff --git a/packages/widgets/postcss.config.js b/packages/widgets/postcss.config.js new file mode 100644 index 0000000000000000000000000000000000000000..7b75c83aff1c05e0e0e315638e07a22314603d4d --- /dev/null +++ b/packages/widgets/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + tailwindcss: {}, + autoprefixer: {}, + }, +}; diff --git a/packages/widgets/src/app.d.ts b/packages/widgets/src/app.d.ts new file mode 100644 index 0000000000000000000000000000000000000000..f59b884c51ed3c31fc0738fd38d0d75b580df5e4 --- /dev/null +++ b/packages/widgets/src/app.d.ts @@ -0,0 +1,12 @@ +// See https://kit.svelte.dev/docs/types#app +// for information about these interfaces +declare global { + namespace App { + // interface Error {} + // interface Locals {} + // interface PageData {} + // interface Platform {} + } +} + +export {}; diff --git a/packages/widgets/src/app.html b/packages/widgets/src/app.html new file mode 100644 index 0000000000000000000000000000000000000000..f22aeaad5e392f5121f38b7d9e6ba033438b3103 --- /dev/null +++ b/packages/widgets/src/app.html @@ -0,0 +1,12 @@ + + + + + + + %sveltekit.head% + + +
%sveltekit.body%
+ + diff --git a/packages/widgets/src/lib/components/DemoThemeSwitcher/DemoThemeSwitcher.svelte b/packages/widgets/src/lib/components/DemoThemeSwitcher/DemoThemeSwitcher.svelte new file mode 100644 index 0000000000000000000000000000000000000000..244e51cc6740cfef9d2ffea8b524c58a4abaa5fd --- /dev/null +++ b/packages/widgets/src/lib/components/DemoThemeSwitcher/DemoThemeSwitcher.svelte @@ -0,0 +1,60 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconAudioClassification.svelte b/packages/widgets/src/lib/components/Icons/IconAudioClassification.svelte new file mode 100644 index 0000000000000000000000000000000000000000..963570923853f7e72c2c4c31e24c8e8f45dd32f3 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconAudioClassification.svelte @@ -0,0 +1,21 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconAudioToAudio.svelte b/packages/widgets/src/lib/components/Icons/IconAudioToAudio.svelte new file mode 100644 index 0000000000000000000000000000000000000000..ae5c4aeef4cbaded1451e7672855dca33c8a5550 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconAudioToAudio.svelte @@ -0,0 +1,23 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconAutomaticSpeechRecognition.svelte b/packages/widgets/src/lib/components/Icons/IconAutomaticSpeechRecognition.svelte new file mode 100644 index 0000000000000000000000000000000000000000..01b18c541c81c064f42cbe3b5fd74051b1b1563f --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconAutomaticSpeechRecognition.svelte @@ -0,0 +1,23 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconAzureML.svelte b/packages/widgets/src/lib/components/Icons/IconAzureML.svelte new file mode 100644 index 0000000000000000000000000000000000000000..3fc373b4ba05f593431fdad7ef35068aebca2aa1 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconAzureML.svelte @@ -0,0 +1,40 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconCaretDown.svelte b/packages/widgets/src/lib/components/Icons/IconCaretDown.svelte new file mode 100644 index 0000000000000000000000000000000000000000..ae3574382b017c17f79271e6c9de9d12ee725a6f --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconCaretDown.svelte @@ -0,0 +1,19 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconCaretDownV2.svelte b/packages/widgets/src/lib/components/Icons/IconCaretDownV2.svelte new file mode 100644 index 0000000000000000000000000000000000000000..f8b4baac5a1e6de91b0c3149a89111cee87369fa --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconCaretDownV2.svelte @@ -0,0 +1,11 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconCode.svelte b/packages/widgets/src/lib/components/Icons/IconCode.svelte new file mode 100644 index 0000000000000000000000000000000000000000..a84e7b970a940cd4bec28b1900dae603e7248a11 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconCode.svelte @@ -0,0 +1,21 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconConversational.svelte b/packages/widgets/src/lib/components/Icons/IconConversational.svelte new file mode 100644 index 0000000000000000000000000000000000000000..38e01c10bd5be501bc16904e9c7cbd1762fe04fe --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconConversational.svelte @@ -0,0 +1,23 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconCross.svelte b/packages/widgets/src/lib/components/Icons/IconCross.svelte new file mode 100644 index 0000000000000000000000000000000000000000..ae6b04ffca6828d1483f4aea8522641f2c82320c --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconCross.svelte @@ -0,0 +1,21 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconDepthEstimation.svelte b/packages/widgets/src/lib/components/Icons/IconDepthEstimation.svelte new file mode 100644 index 0000000000000000000000000000000000000000..83362801b18c72cd5972afa26719047eec12a597 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconDepthEstimation.svelte @@ -0,0 +1,10 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconDocumentQuestionAnswering.svelte b/packages/widgets/src/lib/components/Icons/IconDocumentQuestionAnswering.svelte new file mode 100644 index 0000000000000000000000000000000000000000..6c1c842972c809eda5952190e55d59e438db115b --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconDocumentQuestionAnswering.svelte @@ -0,0 +1,13 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconFeatureExtraction.svelte b/packages/widgets/src/lib/components/Icons/IconFeatureExtraction.svelte new file mode 100644 index 0000000000000000000000000000000000000000..c1806a76a7f0e0bdd1016652316370c524ed5f70 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconFeatureExtraction.svelte @@ -0,0 +1,21 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconFile.svelte b/packages/widgets/src/lib/components/Icons/IconFile.svelte new file mode 100644 index 0000000000000000000000000000000000000000..0d50da5d5391eaf66918d7b0b9d35c418e5daa01 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconFile.svelte @@ -0,0 +1,21 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconFillMask.svelte b/packages/widgets/src/lib/components/Icons/IconFillMask.svelte new file mode 100644 index 0000000000000000000000000000000000000000..e096d5a8cf27e9e8af6ac4e99c2a95ac31360386 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconFillMask.svelte @@ -0,0 +1,27 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconGraphML.svelte b/packages/widgets/src/lib/components/Icons/IconGraphML.svelte new file mode 100644 index 0000000000000000000000000000000000000000..6000b5ab3dbb44511396fd5549cd0662a4319c71 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconGraphML.svelte @@ -0,0 +1,23 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconImageClassification.svelte b/packages/widgets/src/lib/components/Icons/IconImageClassification.svelte new file mode 100644 index 0000000000000000000000000000000000000000..fa83eb2057bb9faab35d59b9936e382df316cfa0 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconImageClassification.svelte @@ -0,0 +1,23 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconImageSegmentation.svelte b/packages/widgets/src/lib/components/Icons/IconImageSegmentation.svelte new file mode 100644 index 0000000000000000000000000000000000000000..d84e55c747ed85fd76388e90ebd81e783ec9cf21 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconImageSegmentation.svelte @@ -0,0 +1,24 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconImageToImage.svelte b/packages/widgets/src/lib/components/Icons/IconImageToImage.svelte new file mode 100644 index 0000000000000000000000000000000000000000..1ea332d30392075d9e224d76683b37b99cdf1927 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconImageToImage.svelte @@ -0,0 +1,25 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconImageToText.svelte b/packages/widgets/src/lib/components/Icons/IconImageToText.svelte new file mode 100644 index 0000000000000000000000000000000000000000..8263c0801f56beeef9c64c7fbb96bf66eda61227 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconImageToText.svelte @@ -0,0 +1,28 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconInfo.svelte b/packages/widgets/src/lib/components/Icons/IconInfo.svelte new file mode 100644 index 0000000000000000000000000000000000000000..6b5a9b1f993bb62e649dfe42aa3c47c5ef200998 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconInfo.svelte @@ -0,0 +1,20 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconLightning.svelte b/packages/widgets/src/lib/components/Icons/IconLightning.svelte new file mode 100644 index 0000000000000000000000000000000000000000..91d87c2ccbac546e8e21f2e022866a64feb9cbaf --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconLightning.svelte @@ -0,0 +1,18 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconMagicWand.svelte b/packages/widgets/src/lib/components/Icons/IconMagicWand.svelte new file mode 100644 index 0000000000000000000000000000000000000000..7aa63b2a2947ad0fca833ae3cd7f7ab953c650ce --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconMagicWand.svelte @@ -0,0 +1,22 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconMaximize.svelte b/packages/widgets/src/lib/components/Icons/IconMaximize.svelte new file mode 100644 index 0000000000000000000000000000000000000000..872037067ba188e8c90c275f09d368228e3faf7f --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconMaximize.svelte @@ -0,0 +1,23 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconMicrophone.svelte b/packages/widgets/src/lib/components/Icons/IconMicrophone.svelte new file mode 100644 index 0000000000000000000000000000000000000000..d4638549352378b7cd2a8212d3c053e885d7ba72 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconMicrophone.svelte @@ -0,0 +1,25 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconObjectDetection.svelte b/packages/widgets/src/lib/components/Icons/IconObjectDetection.svelte new file mode 100644 index 0000000000000000000000000000000000000000..ae1e1a7a68974667d77a1899a04a7e787e2cc48d --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconObjectDetection.svelte @@ -0,0 +1,25 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconQuestionAnswering.svelte b/packages/widgets/src/lib/components/Icons/IconQuestionAnswering.svelte new file mode 100644 index 0000000000000000000000000000000000000000..b57d90ac4597afba15027b2a2768a3e91c223bfb --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconQuestionAnswering.svelte @@ -0,0 +1,21 @@ + + + + + + + + diff --git a/packages/widgets/src/lib/components/Icons/IconReinforcementLearning.svelte b/packages/widgets/src/lib/components/Icons/IconReinforcementLearning.svelte new file mode 100644 index 0000000000000000000000000000000000000000..802dc243766e5d04db7624aa0cb49acd86b2400e --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconReinforcementLearning.svelte @@ -0,0 +1,23 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconRobotics.svelte b/packages/widgets/src/lib/components/Icons/IconRobotics.svelte new file mode 100644 index 0000000000000000000000000000000000000000..6f354638e8dd8a4bc20ecec7acab2785752f3f23 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconRobotics.svelte @@ -0,0 +1,22 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconRow.svelte b/packages/widgets/src/lib/components/Icons/IconRow.svelte new file mode 100644 index 0000000000000000000000000000000000000000..7710d2b52baaee24c2e54efca5bf799c91b969fa --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconRow.svelte @@ -0,0 +1,16 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconSentenceSimilarity.svelte b/packages/widgets/src/lib/components/Icons/IconSentenceSimilarity.svelte new file mode 100644 index 0000000000000000000000000000000000000000..def158c732d5c3c4b016202e8de56b4ad57210b2 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconSentenceSimilarity.svelte @@ -0,0 +1,25 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconSpin.svelte b/packages/widgets/src/lib/components/Icons/IconSpin.svelte new file mode 100644 index 0000000000000000000000000000000000000000..2afa35d83977a3b7ac1a0e078f14291ae99897f3 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconSpin.svelte @@ -0,0 +1,30 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconSummarization.svelte b/packages/widgets/src/lib/components/Icons/IconSummarization.svelte new file mode 100644 index 0000000000000000000000000000000000000000..8c0ee2fc50d95e4ac11c7148f96e6dcb68f48886 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconSummarization.svelte @@ -0,0 +1,22 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconTableQuestionAnswering.svelte b/packages/widgets/src/lib/components/Icons/IconTableQuestionAnswering.svelte new file mode 100644 index 0000000000000000000000000000000000000000..c11027345e504ce119d01a1799e75ee7ac2a6c96 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconTableQuestionAnswering.svelte @@ -0,0 +1,21 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconTabularClassification.svelte b/packages/widgets/src/lib/components/Icons/IconTabularClassification.svelte new file mode 100644 index 0000000000000000000000000000000000000000..d4ce233b18d18866b3fd0853151949309d95daac --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconTabularClassification.svelte @@ -0,0 +1,22 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconTabularRegression.svelte b/packages/widgets/src/lib/components/Icons/IconTabularRegression.svelte new file mode 100644 index 0000000000000000000000000000000000000000..ed4355c5dc54a6480a3ac895b5be4326e7187e8e --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconTabularRegression.svelte @@ -0,0 +1,20 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconText2textGeneration.svelte b/packages/widgets/src/lib/components/Icons/IconText2textGeneration.svelte new file mode 100644 index 0000000000000000000000000000000000000000..ed5a2c6c7acb3b8edca487cee7bdc69df5303acd --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconText2textGeneration.svelte @@ -0,0 +1,27 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconTextClassification.svelte b/packages/widgets/src/lib/components/Icons/IconTextClassification.svelte new file mode 100644 index 0000000000000000000000000000000000000000..e747e6d2d3e80ce3845c166eeca3d509acb1bd87 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconTextClassification.svelte @@ -0,0 +1,33 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconTextGeneration.svelte b/packages/widgets/src/lib/components/Icons/IconTextGeneration.svelte new file mode 100644 index 0000000000000000000000000000000000000000..fa0153e0d878f64b7cd3989beb606bdd5dc060b5 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconTextGeneration.svelte @@ -0,0 +1,25 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconTextToImage.svelte b/packages/widgets/src/lib/components/Icons/IconTextToImage.svelte new file mode 100644 index 0000000000000000000000000000000000000000..a7adab86fbb37dd8786645150d2929359d2efe30 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconTextToImage.svelte @@ -0,0 +1,25 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconTextToSpeech.svelte b/packages/widgets/src/lib/components/Icons/IconTextToSpeech.svelte new file mode 100644 index 0000000000000000000000000000000000000000..7c533cfcb0d267a2ff38fc8ffdebf549cdecb419 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconTextToSpeech.svelte @@ -0,0 +1,23 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconTextToVideo.svelte b/packages/widgets/src/lib/components/Icons/IconTextToVideo.svelte new file mode 100644 index 0000000000000000000000000000000000000000..3082ea2e673af93e3ca0b14fcde4c018334ad4c9 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconTextToVideo.svelte @@ -0,0 +1,23 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconTokenClassification.svelte b/packages/widgets/src/lib/components/Icons/IconTokenClassification.svelte new file mode 100644 index 0000000000000000000000000000000000000000..27f3d3625882a5bf62d6ca0b03e44d9b869360c0 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconTokenClassification.svelte @@ -0,0 +1,33 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconTranslation.svelte b/packages/widgets/src/lib/components/Icons/IconTranslation.svelte new file mode 100644 index 0000000000000000000000000000000000000000..c0d3444206f9a6c97bc9a79cd23fbdb73dc89df1 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconTranslation.svelte @@ -0,0 +1,24 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconUnconditionalImageGeneration.svelte b/packages/widgets/src/lib/components/Icons/IconUnconditionalImageGeneration.svelte new file mode 100644 index 0000000000000000000000000000000000000000..880d6ae9c6b505ed592c307ec9f206c5549bcdce --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconUnconditionalImageGeneration.svelte @@ -0,0 +1,22 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconVideoClassification.svelte b/packages/widgets/src/lib/components/Icons/IconVideoClassification.svelte new file mode 100644 index 0000000000000000000000000000000000000000..669a372caa8710b7ace03fba1f1cd5b599dacb6d --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconVideoClassification.svelte @@ -0,0 +1,24 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconVoiceActivityDetection.svelte b/packages/widgets/src/lib/components/Icons/IconVoiceActivityDetection.svelte new file mode 100644 index 0000000000000000000000000000000000000000..c896fee502d47582dc6c5815f7f7ed71dd783563 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconVoiceActivityDetection.svelte @@ -0,0 +1,23 @@ + + + diff --git a/packages/widgets/src/lib/components/Icons/IconZeroShotClassification.svelte b/packages/widgets/src/lib/components/Icons/IconZeroShotClassification.svelte new file mode 100644 index 0000000000000000000000000000000000000000..ebbe940ef2409569fd2e58573e00d7e7e5193de6 --- /dev/null +++ b/packages/widgets/src/lib/components/Icons/IconZeroShotClassification.svelte @@ -0,0 +1,51 @@ + + + diff --git a/packages/widgets/src/lib/components/InferenceWidget/InferenceWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/InferenceWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..faf06bf8ee498e07fbd4721396535c0d710d9872 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/InferenceWidget.svelte @@ -0,0 +1,100 @@ + + +{#if widgetComponent} + +{/if} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetAddSentenceBtn/WidgetAddSentenceBtn.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetAddSentenceBtn/WidgetAddSentenceBtn.svelte new file mode 100644 index 0000000000000000000000000000000000000000..7eca76ae45429333b59d3ea234b80862bb6359a4 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetAddSentenceBtn/WidgetAddSentenceBtn.svelte @@ -0,0 +1,12 @@ + + +{#if !isDisabled} + + +{/if} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetAudioTrack/WidgetAudioTrack.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetAudioTrack/WidgetAudioTrack.svelte new file mode 100644 index 0000000000000000000000000000000000000000..b17f29a561a142068eb9197951ee65ad55c5abcb --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetAudioTrack/WidgetAudioTrack.svelte @@ -0,0 +1,17 @@ + + +
+ {#if $$slots.default} + + {:else if label.length} +
{label}
+ {/if} + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetBloomDecoding/WidgetBloomDecoding.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetBloomDecoding/WidgetBloomDecoding.svelte new file mode 100644 index 0000000000000000000000000000000000000000..19d6306456089fc7f46fe928109ef876b1037965 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetBloomDecoding/WidgetBloomDecoding.svelte @@ -0,0 +1,55 @@ + + + (isPromptTipOpen = false)} /> + +
+
+
+ sampling + +
+
+
+
+
+ greedy +
+
+ + (isPromptTipOpen = true)} + >ⓘ BLOOM prompting tips + {#if isPromptTipOpen} +
+ A good prompt: Do NOT talk to Bloom as an entity, it's not a chatbot but a webpage/blog/article completion + model. For the best behaviours: MIMIC a few words of a webpage similar to the content you want to generate. + Start a sentence as if YOU were writing a blog, webpage, math post, coding article and Bloom will generate a + coherent follow-up. +
+ {/if} +
+
+

+ {description} +

+
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetCheckbox/WidgetCheckbox.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetCheckbox/WidgetCheckbox.svelte new file mode 100644 index 0000000000000000000000000000000000000000..e4e975e81f262640d72009967cb58f912b44549f --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetCheckbox/WidgetCheckbox.svelte @@ -0,0 +1,17 @@ + + + + + + + diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetDropzone/WidgetDropzone.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetDropzone/WidgetDropzone.svelte new file mode 100644 index 0000000000000000000000000000000000000000..3cfb0f007df2465df58dca8dcf24ca1b7e7543b9 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetDropzone/WidgetDropzone.svelte @@ -0,0 +1,89 @@ + + + + +
{ + fileInput.click(); + }} + on:dragenter={() => { + isDragging = true; + }} + on:dragleave={() => { + isDragging = false; + }} + on:dragover|preventDefault + on:drop|preventDefault={onDrop} +> + {#if !imgSrc && !isDisabled} + {label} + {:else} +
+ +
+ {/if} + {#if isLoading} +
+ +
+ {/if} +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetExample.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetExample.ts new file mode 100644 index 0000000000000000000000000000000000000000..6428e7b0a429fd5024b50a034366a31a683db5c1 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetExample.ts @@ -0,0 +1,114 @@ +type TableData = Record; + +//#region outputs +export type WidgetExampleOutputLabels = Array<{ label: string; score: number }>; +export interface WidgetExampleOutputAnswerScore { + answer: string; + score: number; +} +export interface WidgetExampleOutputText { + text: string; +} +export interface WidgetExampleOutputUrl { + url: string; +} + +export type WidgetExampleOutput = + | WidgetExampleOutputLabels + | WidgetExampleOutputAnswerScore + | WidgetExampleOutputText + | WidgetExampleOutputUrl; +//#endregion + +export interface WidgetExampleBase { + example_title?: string; + group?: string; + /** + * Potential overrides to API parameters for this specific example + * (takes precedences over the model card metadata's inference.parameters) + */ + parameters?: { + /// token-classification + aggregation_strategy?: string; + /// text-generation + top_k?: number; + top_p?: number; + temperature?: number; + max_new_tokens?: number; + do_sample?: boolean; + /// text-to-image + negative_prompt?: string; + guidance_scale?: number; + num_inference_steps?: number; + }; + /** + * Optional output + */ + output?: TOutput; +} + +export interface WidgetExampleTextInput extends WidgetExampleBase { + text: string; +} + +export interface WidgetExampleTextAndContextInput + extends WidgetExampleTextInput { + context: string; +} + +export interface WidgetExampleTextAndTableInput extends WidgetExampleTextInput { + table: TableData; +} + +export interface WidgetExampleAssetInput extends WidgetExampleBase { + src: string; +} +export interface WidgetExampleAssetAndPromptInput + extends WidgetExampleAssetInput { + prompt: string; +} + +export type WidgetExampleAssetAndTextInput = WidgetExampleAssetInput & + WidgetExampleTextInput; + +export type WidgetExampleAssetAndZeroShotInput = WidgetExampleAssetInput & + WidgetExampleZeroShotTextInput; + +export interface WidgetExampleStructuredDataInput extends WidgetExampleBase { + structured_data: TableData; +} + +export interface WidgetExampleTableDataInput extends WidgetExampleBase { + table: TableData; +} + +export interface WidgetExampleZeroShotTextInput extends WidgetExampleTextInput { + text: string; + candidate_labels: string; + multi_class: boolean; +} + +export interface WidgetExampleSentenceSimilarityInput + extends WidgetExampleBase { + source_sentence: string; + sentences: string[]; +} + +//#endregion + +export type WidgetExample = + | WidgetExampleTextInput + | WidgetExampleTextAndContextInput + | WidgetExampleTextAndTableInput + | WidgetExampleAssetInput + | WidgetExampleAssetAndPromptInput + | WidgetExampleAssetAndTextInput + | WidgetExampleAssetAndZeroShotInput + | WidgetExampleStructuredDataInput + | WidgetExampleTableDataInput + | WidgetExampleZeroShotTextInput + | WidgetExampleSentenceSimilarityInput; + +type KeysOfUnion = T extends unknown ? keyof T : never; + +export type WidgetExampleAttribute = KeysOfUnion; diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetFileInput/WidgetFileInput.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetFileInput/WidgetFileInput.svelte new file mode 100644 index 0000000000000000000000000000000000000000..9e4c6a54f6fe067142d497c92a87de2d94f5c088 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetFileInput/WidgetFileInput.svelte @@ -0,0 +1,56 @@ + + +{#if !isDisabled} +
{ + isDragging = true; + }} + on:dragover|preventDefault + on:dragleave={() => { + isDragging = false; + }} + on:drop|preventDefault={(e) => { + isDragging = false; + fileInput.files = e.dataTransfer?.files ?? null; + onChange(); + }} + > + +
+{/if} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetFooter/WidgetFooter.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetFooter/WidgetFooter.svelte new file mode 100644 index 0000000000000000000000000000000000000000..1ff6749498e0dea03461af2ba5285bdf4eeacc3b --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetFooter/WidgetFooter.svelte @@ -0,0 +1,33 @@ + + +
+ {#if !isDisabled} + + {/if} + +
+{#if outputJson && isOutputJsonVisible} +
{outputJson}
+{/if} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetHeader/WidgetHeader.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetHeader/WidgetHeader.svelte new file mode 100644 index 0000000000000000000000000000000000000000..a899908a40cbee3ca25bd0da8a576edfcff99a2d --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetHeader/WidgetHeader.svelte @@ -0,0 +1,49 @@ + + +
+ {#if !noTitle} + {#if title} +
+ {title} +
+ {:else} +
+ {#if !isDisabled} + + Inference API + {:else} + Inference Examples + {/if} +
+ + + + {/if} + {/if} +
+
+ {#if pipeline && task} + + + + {/if} + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInfo/WidgetInfo.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInfo/WidgetInfo.svelte new file mode 100644 index 0000000000000000000000000000000000000000..f374dabef274a4c4e8d31852ddb67296f1eebb8f --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInfo/WidgetInfo.svelte @@ -0,0 +1,116 @@ + + +
+
+ {#if model.id === "bigscience/bloom"} +
+
+ Powered by  + AzureML +
+
+
+ {@html getStatusReport(modelLoadInfo, azureState, true)} +
+
+ {:else if computeTime} + Computation time on {getComputeTypeMsg()}: {computeTime} + {:else if (model.inference === InferenceDisplayability.Yes || model.pipeline_tag === "reinforcement-learning") && !modelTooBig} + {@html getStatusReport(modelLoadInfo, state)} + {:else if model.inference === InferenceDisplayability.ExplicitOptOut} + Inference API has been turned off for this model. + {:else if model.inference === InferenceDisplayability.CustomCode} + Inference API does not yet support model repos that contain custom code. + {:else if model.inference === InferenceDisplayability.LibraryNotDetected} + + Unable to determine this model's library. Check the + + docs + . + + {:else if model.inference === InferenceDisplayability.PipelineNotDetected} + + Unable to determine this model’s pipeline type. Check the + + docs + . + + {:else if model.inference === InferenceDisplayability.PipelineLibraryPairNotSupported} + + Inference API does not yet support {model.library_name} models for this pipeline type. + + {:else if modelTooBig} + + Model is too large to load onto the free Inference API. To try the model, launch it on Inference Endpoints + instead. + + {:else} + + + Inference API is disabled for an unknown reason. Please open a + Discussion in the Community tab. + + {/if} +
+ {#if error} +
{error}
+ {/if} +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInputSamples/WidgetInputSamples.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInputSamples/WidgetInputSamples.svelte new file mode 100644 index 0000000000000000000000000000000000000000..be37c4b60b33c25c07aa35b847779722383eaffa --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInputSamples/WidgetInputSamples.svelte @@ -0,0 +1,98 @@ + + + + +
+ +
+
{title}
+ +
+ + {#if isOptionsVisible} +
+
+ {#each inputSamples as { example_title }, i} + +
_previewInputSample(i)} + on:click={() => _applyInputSample(i)} + > + {example_title} +
+ {/each} +
+
+ {/if} +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInputSamplesGroup/WidgetInputSamplesGroup.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInputSamplesGroup/WidgetInputSamplesGroup.svelte new file mode 100644 index 0000000000000000000000000000000000000000..97283b98841fedde314399a791b6234e9dd13e23 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetInputSamplesGroup/WidgetInputSamplesGroup.svelte @@ -0,0 +1,81 @@ + + + + +
+ +
+
{title}
+ +
+ + {#if isOptionsVisible} +
+
+ {#each inputGroups as inputGroup, i} + +
chooseInputGroup(i)} + > + {inputGroup} +
+ {/each} +
+
+ {/if} +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetLabel/WidgetLabel.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetLabel/WidgetLabel.svelte new file mode 100644 index 0000000000000000000000000000000000000000..03f1e5c9e62a401a095a479340169e587cc6e557 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetLabel/WidgetLabel.svelte @@ -0,0 +1,13 @@ + + + diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetModelLoading/WidgetModelLoading.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetModelLoading/WidgetModelLoading.svelte new file mode 100644 index 0000000000000000000000000000000000000000..5c7e5f7618f062d6aa98bf3e272ea216e163f6fb --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetModelLoading/WidgetModelLoading.svelte @@ -0,0 +1,38 @@ + + +
+
+
+ + Model is loading +
+
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOuputConvoBubble/WidgetOutputConvoBubble.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOuputConvoBubble/WidgetOutputConvoBubble.svelte new file mode 100644 index 0000000000000000000000000000000000000000..31ab4efa56cac7e7fd431fc59325f01eb1be0691 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOuputConvoBubble/WidgetOutputConvoBubble.svelte @@ -0,0 +1,12 @@ + + +
+ {text} +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputChart/WidgetOutputChart.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputChart/WidgetOutputChart.svelte new file mode 100644 index 0000000000000000000000000000000000000000..a1fe65892620fa198f86a84da759a4b55c318dc5 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputChart/WidgetOutputChart.svelte @@ -0,0 +1,65 @@ + + + +{#if output.length} +
+ + {#each output as { score, color }, index} + +
mouseover(index)} + on:mouseout={mouseout} + > +
+
+ {text(output[index])} +
+ {score.toFixed(3)} +
+ {/each} +
+{/if} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputConvo/WidgetOutputConvo.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputConvo/WidgetOutputConvo.svelte new file mode 100644 index 0000000000000000000000000000000000000000..5a14cf9db92ad55501034c9b7c4408c6057693a1 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputConvo/WidgetOutputConvo.svelte @@ -0,0 +1,33 @@ + + +
+
+ Input a message to start chatting with + {modelId}. +
+
+ {#each output as exchange} + + + {/each} +
+
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputTableQA/WidgetOutputTableQA.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputTableQA/WidgetOutputTableQA.svelte new file mode 100644 index 0000000000000000000000000000000000000000..410fc55c33e8c5e228bb3f0b8f126b175c7b621c --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputTableQA/WidgetOutputTableQA.svelte @@ -0,0 +1,52 @@ + + +
+ {#if isAnswerOnlyOutput} + {output.answer} + {:else} + + {#if output.cells.length} + {output.cells.length} + match{output.cells.length > 1 ? "es" : ""} + : + {:else} + No matches + {/if} + + {#if output.cells.length} + {#each output.cells as answer} + {answer} + {/each} + {#if output.aggregator !== "NONE"} + {output.aggregator} + {/if} + {/if} + {/if} +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputText/WidgetOutputText.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputText/WidgetOutputText.svelte new file mode 100644 index 0000000000000000000000000000000000000000..e1d051506286f1216c3f712b2986dbb6e97463cb --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputText/WidgetOutputText.svelte @@ -0,0 +1,10 @@ + + +{#if output.length} +

+ {output} +

+{/if} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputTokens/WidgetOutputTokens.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputTokens/WidgetOutputTokens.svelte new file mode 100644 index 0000000000000000000000000000000000000000..58343a2cc4bb9d7a1281e2d7bf687571ceff3fcd --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetOutputTokens/WidgetOutputTokens.svelte @@ -0,0 +1,94 @@ + + +{#if text && output.length} + +
+ {@html render(text, output)} +
+{/if} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetQuickInput/WidgetQuickInput.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetQuickInput/WidgetQuickInput.svelte new file mode 100644 index 0000000000000000000000000000000000000000..3a60295b8ef5787de9aaf66c94d0d0d146ec2a14 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetQuickInput/WidgetQuickInput.svelte @@ -0,0 +1,29 @@ + + +
+ + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRadio/WidgetRadio.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRadio/WidgetRadio.svelte new file mode 100644 index 0000000000000000000000000000000000000000..7cfb06a492e722d08045b5d41c9a186e6f6f05fb --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRadio/WidgetRadio.svelte @@ -0,0 +1,21 @@ + + + + + + + diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/Recorder.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/Recorder.ts new file mode 100644 index 0000000000000000000000000000000000000000..be0e295d56a90514f9cd379dcf716126ab0caa0b --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/Recorder.ts @@ -0,0 +1,103 @@ +export default class Recorder { + // see developers.google.com/web/updates/2016/01/mediarecorder + type: "audio" | "video" = "audio"; + private apiToken: string | undefined; + private audioContext: AudioContext | undefined; + private isLoggedIn = false; + private isModelLoaded = false; + private isEmptyBuffer = false; + private modelId: string; + private onError: (err: string) => void; + private updateModelLoading: (isLoading: boolean, estimatedTime?: number) => void; + private renderText: (txt: string) => void; + private renderWarning: (warning: string) => void; + private socket: WebSocket | undefined; + private stream: MediaStream | undefined; + + constructor( + modelId: string, + apiToken: string | undefined, + renderText: (txt: string) => void, + renderWarning: (warning: string) => void, + onError: (err: string) => void, + updateModelLoading: (isLoading: boolean, estimatedTime?: number) => void + ) { + this.modelId = modelId; + this.apiToken = apiToken || ""; + this.renderText = renderText; + this.renderWarning = renderWarning; + this.onError = onError; + this.updateModelLoading = updateModelLoading; + } + + async start(): Promise { + const constraints: MediaStreamConstraints = this.type === "video" ? { audio: true, video: true } : { audio: true }; + this.stream = await navigator.mediaDevices.getUserMedia(constraints); + + this.socket = new WebSocket(`wss://api-inference.huggingface.co/asr/live/cpu/${this.modelId}`); + + this.socket.onerror = () => { + this.onError("Webscoket connection error"); + }; + + this.socket.onopen = () => { + this.socket?.send(`Bearer ${this.apiToken}`); + }; + + this.updateModelLoading(true); + + this.socket.onmessage = (e: MessageEvent) => { + const data = JSON.parse(e.data); + if (data.type === "status" && data.message === "Successful login") { + this.isLoggedIn = true; + } else if (data.type === "status" && !!data.estimated_time && !this.isModelLoaded) { + this.updateModelLoading(true, data.estimated_time); + } else { + // data.type === "results" + this.isModelLoaded = true; + if (data.text) { + this.renderText(data.text); + } else if (!this.isEmptyBuffer) { + this.renderWarning("result was empty"); + } + } + }; + + this.audioContext = new AudioContext(); + await this.audioContext.audioWorklet.addModule("/audioProcessor.js"); + const microphone = this.audioContext.createMediaStreamSource(this.stream); + const dataExtractor = new AudioWorkletNode(this.audioContext, "AudioDataExtractor"); + microphone.connect(dataExtractor).connect(this.audioContext.destination); + + dataExtractor.port.onmessage = (event) => { + const { buffer, sampling_rate: samplingRate } = event.data; + this.isEmptyBuffer = buffer.reduce((sum: number, x: number) => sum + x) === 0; + if (this.isModelLoaded && this.isEmptyBuffer) { + this.renderWarning("🎤 input is empty: try speaking louder 🗣️ & make sure correct mic source is selected"); + } + const base64: string = btoa(String.fromCharCode(...new Uint8Array(buffer.buffer))); + const message = { + raw: base64, + sampling_rate: samplingRate, + }; + if (this.isLoggedIn) { + try { + this.socket?.send(JSON.stringify(message)); + } catch (e) { + this.onError(`Error sending data to websocket: ${e}`); + } + } + }; + } + + stop(): void { + this.isLoggedIn = false; + void this.audioContext?.close(); + this.socket?.close(); + if (this.stream) { + for (const t of this.stream.getTracks()) { + t.stop(); + } + } + } +} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/WidgetRealtimeRecorder.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/WidgetRealtimeRecorder.svelte new file mode 100644 index 0000000000000000000000000000000000000000..6056084c2381b0f041180748bcba3e5410d9f4d1 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/WidgetRealtimeRecorder.svelte @@ -0,0 +1,98 @@ + + + + +{#if isRecording} +
+ {#if warning} +

{warning}

+ {:else} +

{txt}

+ {/if} +
+{/if} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRecorder/Recorder.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRecorder/Recorder.ts new file mode 100644 index 0000000000000000000000000000000000000000..39bed256c3e4909b1346db5b29505ad5b438449a --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRecorder/Recorder.ts @@ -0,0 +1,69 @@ +import { delay } from "$lib/utils/ViewUtils.js"; + +export default class Recorder { + // see developers.google.com/web/updates/2016/01/mediarecorder + type: "audio" | "video" = "audio"; + private stream?: MediaStream; + private mediaRecorder?: MediaRecorder; + private recordedBlobs: Blob[] = []; + public outputBlob?: Blob; + + get desiredMimeType(): string { + return this.type === "video" ? "video/webm" : "audio/webm"; + } + get mimeType(): string { + if (!this.mediaRecorder) { + throw new Error("MediaRecorder not initialized"); + } + return this.mediaRecorder.mimeType; + } + async start(): Promise { + this.recordedBlobs = []; + + const constraints: MediaStreamConstraints = this.type === "video" ? { audio: true, video: true } : { audio: true }; + this.stream = await navigator.mediaDevices.getUserMedia(constraints); + this.startRecording(); + } + private startRecording() { + if (!this.stream) { + throw new Error("Stream not initialized"); + } + this.outputBlob = undefined; + this.mediaRecorder = new MediaRecorder(this.stream, { + mimeType: this.desiredMimeType, + }); + this.mediaRecorder.onstop = this.handleStop.bind(this); + this.mediaRecorder.ondataavailable = this.handleDataAvailable.bind(this); + this.mediaRecorder.start(10); // timeslice in ms + } + handleStop(): void {} + handleDataAvailable(evt: BlobEvent): void { + if (evt.data && evt.data.size > 0) { + this.recordedBlobs.push(evt.data); + } + } + async stopRecording(): Promise { + if (this.mediaRecorder) { + this.mediaRecorder.stop(); + } + if (this.stream) { + for (const t of this.stream.getTracks()) { + t.stop(); // Stop stream. + } + } + + // handle stopRecording gets called before this.mediaRecorder is initialized + if (!this.mediaRecorder) { + return new Blob(this.recordedBlobs); + } + + await delay(30); + // Wait for the last blob in handleDataAvailable. + // Alternative: hook into `onstop` event. + const superBuffer = new Blob(this.recordedBlobs, { + type: this.mimeType, + }); + this.outputBlob = superBuffer; + return superBuffer; + } +} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRecorder/WidgetRecorder.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRecorder/WidgetRecorder.svelte new file mode 100644 index 0000000000000000000000000000000000000000..05de83da5d0a76caf498c9a7cda0fd1eb9b20448 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetRecorder/WidgetRecorder.svelte @@ -0,0 +1,67 @@ + + + diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetShortcutRunLabel/WidgetShortcutRunLabel.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetShortcutRunLabel/WidgetShortcutRunLabel.svelte new file mode 100644 index 0000000000000000000000000000000000000000..322663467417a826f7237bd155a7d7e90b9b1f3b --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetShortcutRunLabel/WidgetShortcutRunLabel.svelte @@ -0,0 +1,22 @@ + + +{#if !isDisabled} + +{/if} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetState/WidgetState.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetState/WidgetState.svelte new file mode 100644 index 0000000000000000000000000000000000000000..94be11f7b7e0062693401d72804c5ef57ba39010 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetState/WidgetState.svelte @@ -0,0 +1,17 @@ + + +
+
+
+ This model is currently loaded and running on the Inference API. +
+
+ ⚠️ This model could not be loaded by the inference API. ⚠️ +
+
+ This model can be loaded on the Inference API on-demand. +
+
+
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte new file mode 100644 index 0000000000000000000000000000000000000000..6fc03b2b178d8a17d5c68c3dbd38726338542cd7 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetSubmitBtn/WidgetSubmitBtn.svelte @@ -0,0 +1,37 @@ + + + + +{#if !isDisabled} + +{/if} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTableInput/WidgetTableInput.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTableInput/WidgetTableInput.svelte new file mode 100644 index 0000000000000000000000000000000000000000..765ef6d23494385fba3a0cdfe749b6b5c6131b94 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTableInput/WidgetTableInput.svelte @@ -0,0 +1,112 @@ + + +
+ {#if table.length > 1} + + + + {#each table[0] as header, x} + + {/each} + + + + {#each table.slice(1) as row, y} + + {#each row as cell, x} + + {/each} + + {/each} + +
editCell(e, [x, 0])} + > + {header} +
editCell(e, [x, y + 1])}>{cell}
+ {/if} +
+ +
+ {#if canAddRow} + + {/if} + {#if canAddCol} + + {/if} + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTextInput/WidgetTextInput.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTextInput/WidgetTextInput.svelte new file mode 100644 index 0000000000000000000000000000000000000000..50665b478649c00a98a9d610b1437ecd21be2679 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTextInput/WidgetTextInput.svelte @@ -0,0 +1,20 @@ + + + + + + + diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTextarea/WidgetTextarea.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTextarea/WidgetTextarea.svelte new file mode 100644 index 0000000000000000000000000000000000000000..2f56f731432bd3194e230ec6e6707f59a3bd80a0 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTextarea/WidgetTextarea.svelte @@ -0,0 +1,106 @@ + + + + + + + + + + diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTimer/WidgetTimer.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTimer/WidgetTimer.svelte new file mode 100644 index 0000000000000000000000000000000000000000..2d41e1ac523c03e04ad4813df90a7994afd536d4 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetTimer/WidgetTimer.svelte @@ -0,0 +1,35 @@ + + +{#if shouldDisplay && !isDisabled} + {counterHuman} +{/if} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetWrapper/WidgetWrapper.svelte b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetWrapper/WidgetWrapper.svelte new file mode 100644 index 0000000000000000000000000000000000000000..4d171a03b9e75c0b226a80a879e4b119d0833415 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/WidgetWrapper/WidgetWrapper.svelte @@ -0,0 +1,159 @@ + + +{#if isDisabled && !inputSamples.length} + + +{:else} +
+ {#if isMaximized} + + {/if} + + {#if !!inputGroups.length} +
+ + {#if inputGroups.length > 1} + group)} + /> + {/if} + +
+ {/if} +
+ + + {#if modelLoading.isLoading} + + {/if} + + +
+{/if} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/consts.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/consts.ts new file mode 100644 index 0000000000000000000000000000000000000000..1ec7359bff5ade2ad5e14096d76175f3f86aaf14 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/consts.ts @@ -0,0 +1,37 @@ +/** + * Color palette for obj-det & img-seg widgets + */ +export const COLORS = [ + { + color: "red", + hex: "#f87171", + }, + { + color: "green", + hex: "#4ade80", + }, + { + color: "yellow", + hex: "#facc15", + }, + { + color: "blue", + hex: "#60a5fa", + }, + { + color: "orange", + hex: "#fb923c", + }, + { + color: "purple", + hex: "#c084fc", + }, + { + color: "cyan", + hex: "#22d3ee", + }, + { + color: "lime", + hex: "#a3e635", + }, +] as const; diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/helpers.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/helpers.ts new file mode 100644 index 0000000000000000000000000000000000000000..d34113757f3033b1d903a8f2a3ec0fa6e77c6ee5 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/helpers.ts @@ -0,0 +1,251 @@ +import type { ModelData } from "$lib/interfaces/Types.js"; +import { randomItem, parseJSON } from "../../../utils/ViewUtils.js"; +import type { WidgetExample, WidgetExampleAttribute } from "./WidgetExample.js"; +import type { ModelLoadInfo, TableData } from "./types.js"; +import { LoadState } from "./types.js"; + +const KEYS_TEXT: WidgetExampleAttribute[] = ["text", "context", "candidate_labels"]; +const KEYS_TABLE: WidgetExampleAttribute[] = ["table", "structured_data"]; +type QueryParamVal = string | null | boolean | (string | number)[][]; + +export function getQueryParamVal(key: WidgetExampleAttribute): QueryParamVal { + const searchParams = new URL(window.location.href).searchParams; + const value = searchParams.get(key); + if (KEYS_TEXT.includes(key)) { + return value; + } else if (KEYS_TABLE.includes(key)) { + const table = convertDataToTable((parseJSON(value) as TableData) ?? {}); + return table; + } else if (key === "multi_class") { + return value === "true"; + } + return value; +} + +export function getWidgetExample( + model: ModelData, + validateExample: (sample: WidgetExample) => sample is TWidgetExample +): TWidgetExample | undefined { + const validExamples = model.widgetData?.filter( + (sample): sample is TWidgetExample => sample && validateExample(sample) + ); + return validExamples?.length ? randomItem(validExamples) : undefined; +} + +// Update current url search params, keeping existing keys intact. +export function updateUrl(obj: Partial>): void { + if (!window) { + return; + } + + const sp = new URL(window.location.href).searchParams; + for (const [k, v] of Object.entries(obj)) { + if (v === undefined) { + sp.delete(k); + } else { + sp.set(k, v); + } + } + const path = `${window.location.pathname}?${sp.toString()}`; + window.history.replaceState(null, "", path); +} + +// Run through our own proxy to bypass CORS: +function proxify(url: string): string { + return url.startsWith(`http://localhost`) || new URL(url).host === window.location.host + ? url + : `https://widgets.hf.co/proxy?url=${url}`; +} + +// Get BLOB from a given URL after proxifying the URL +export async function getBlobFromUrl(url: string): Promise { + const proxiedUrl = proxify(url); + const res = await fetch(proxiedUrl); + const blob = await res.blob(); + return blob; +} + +interface Success { + computeTime: string; + output: T; + outputJson: string; + response: Response; + status: "success"; +} + +interface LoadingModel { + error: string; + estimatedTime: number; + status: "loading-model"; +} + +interface Error { + error: string; + status: "error"; +} + +interface CacheNotFound { + status: "cache not found"; +} + +type Result = Success | LoadingModel | Error | CacheNotFound; + +export async function callInferenceApi( + url: string, + repoId: string, + requestBody: Record, + apiToken = "", + outputParsingFn: (x: unknown) => T, + waitForModel = false, // If true, the server will only respond once the model has been loaded on the inference API, + includeCredentials = false, + isOnLoadCall = false, // If true, the server will try to answer from cache and not do anything if not + useCache = true +): Promise> { + const contentType = + "file" in requestBody && + requestBody["file"] && + typeof requestBody["file"] === "object" && + "type" in requestBody["file"] && + typeof requestBody["file"]["type"] === "string" + ? requestBody["file"]["type"] + : "application/json"; + + const headers = new Headers(); + headers.set("Content-Type", contentType); + if (apiToken) { + headers.set("Authorization", `Bearer ${apiToken}`); + } + if (waitForModel) { + headers.set("X-Wait-For-Model", "true"); + } + if (useCache === false) { + headers.set("X-Use-Cache", "false"); + } + if (isOnLoadCall) { + headers.set("X-Load-Model", "0"); + } + + const reqBody: File | string = + "file" in requestBody && requestBody["file"] instanceof File ? requestBody.file : JSON.stringify(requestBody); + + const response = await fetch(`${url}/models/${repoId}`, { + method: "POST", + body: reqBody, + headers, + credentials: includeCredentials ? "include" : "same-origin", + }); + + if (response.ok) { + // Success + const computeTime = response.headers.has("x-compute-time") + ? `${response.headers.get("x-compute-time")} s` + : `cached`; + const isMediaContent = (response.headers.get("content-type")?.search(/^(?:audio|image)/i) ?? -1) !== -1; + + const body = !isMediaContent ? await response.json() : await response.blob(); + + try { + const output = outputParsingFn(body); + const outputJson = !isMediaContent ? JSON.stringify(body, null, 2) : ""; + return { computeTime, output, outputJson, response, status: "success" }; + } catch (e) { + if (isOnLoadCall && body.error === "not loaded yet") { + return { status: "cache not found" }; + } + // Invalid output + const error = `API Implementation Error: ${String(e).replace(/^Error: /, "")}`; + return { error, status: "error" }; + } + } else { + // Error + const bodyText = await response.text(); + const body = parseJSON>(bodyText) ?? {}; + + if ( + body["error"] && + response.status === 503 && + body["estimated_time"] !== null && + body["estimated_time"] !== undefined + ) { + // Model needs loading + return { error: String(body["error"]), estimatedTime: +body["estimated_time"], status: "loading-model" }; + } else { + // Other errors + const { status, statusText } = response; + return { + error: String(body["error"]) || String(body["traceback"]) || `${status} ${statusText}`, + status: "error", + }; + } + } +} + +export async function getModelLoadInfo( + url: string, + repoId: string, + includeCredentials = false +): Promise { + const response = await fetch(`${url}/status/${repoId}`, { + credentials: includeCredentials ? "include" : "same-origin", + }); + const output = await response.json(); + if (response.ok && typeof output === "object" && output.loaded !== undefined) { + // eslint-disable-next-line @typescript-eslint/naming-convention + const { state, compute_type } = output; + return { compute_type, state }; + } else { + console.warn(response.status, output.error); + return { state: LoadState.Error }; + } +} + +// Extend Inference API requestBody with user supplied Inference API parameters +export function addInferenceParameters(requestBody: Record, model: ModelData): void { + const inference = model?.cardData?.inference; + if (typeof inference === "object") { + const inferenceParameters = inference?.parameters; + if (inferenceParameters) { + if (requestBody.parameters) { + requestBody.parameters = { ...requestBody.parameters, ...inferenceParameters }; + } else { + requestBody.parameters = inferenceParameters; + } + } + } +} + +/* + * Converts table from [[Header0, Header1, Header2], [Column0Val0, Column1Val0, Column2Val0], ...] + * to {Header0: [ColumnVal0, ...], Header1: [Column1Val0, ...], Header2: [Column2Val0, ...]} + */ +export function convertTableToData(table: (string | number)[][]): TableData { + return Object.fromEntries( + table[0].map((cell, x) => { + return [ + cell, + table + .slice(1) + .flat() + .filter((_, i) => i % table[0].length === x) + .map((v) => String(v)), // some models can only handle strings (no numbers) + ]; + }) + ); +} + +/** + * Converts data from {Header0: [ColumnVal0, ...], Header1: [Column1Val0, ...], Header2: [Column2Val0, ...]} + * to [[Header0, Header1, Header2], [Column0Val0, Column1Val0, Column2Val0], ...] + */ +export function convertDataToTable(data: TableData): (string | number)[][] { + const dataArray = Object.entries(data); // [header, cell[]][] + const nbCols = dataArray.length; + const nbRows = (dataArray[0]?.[1]?.length ?? 0) + 1; + return Array(nbRows) + .fill("") + .map((_, y) => + Array(nbCols) + .fill("") + .map((__, x) => (y === 0 ? dataArray[x][0] : dataArray[x][1][y - 1])) + ); +} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/inputValidation.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/inputValidation.ts new file mode 100644 index 0000000000000000000000000000000000000000..06224afdde3e84fc3fe654d9e52bf71511e639f9 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/inputValidation.ts @@ -0,0 +1,87 @@ +import type { + WidgetExample, + WidgetExampleAssetAndPromptInput, + WidgetExampleAssetAndTextInput, + WidgetExampleAssetAndZeroShotInput, + WidgetExampleAssetInput, + WidgetExampleSentenceSimilarityInput, + WidgetExampleStructuredDataInput, + WidgetExampleTableDataInput, + WidgetExampleTextAndContextInput, + WidgetExampleTextAndTableInput, + WidgetExampleTextInput, + WidgetExampleZeroShotTextInput, +} from "./WidgetExample.js"; + +export function isTextInput(sample: WidgetExample): sample is WidgetExampleTextInput { + return "text" in sample; +} + +export function isTextAndContextInput( + sample: WidgetExample +): sample is WidgetExampleTextAndContextInput { + return isTextInput(sample) && "context" in sample; +} + +export function isAssetInput(sample: WidgetExample): sample is WidgetExampleAssetInput { + return "src" in sample; +} + +export function isAssetAndPromptInput( + sample: WidgetExample +): sample is WidgetExampleAssetAndPromptInput { + return isAssetInput(sample) && "prompt" in sample && typeof sample.prompt === "string"; +} + +export function isAssetAndTextInput( + sample: WidgetExample +): sample is WidgetExampleAssetAndTextInput { + return isAssetInput(sample) && isTextInput(sample); +} + +export function isStructuredDataInput( + sample: WidgetExample +): sample is WidgetExampleStructuredDataInput { + return "structured_data" in sample; +} + +export function isTableDataInput( + sample: WidgetExample +): sample is WidgetExampleTableDataInput { + return "table" in sample; +} + +function _isZeroShotTextInput( + sample: WidgetExample +): sample is Exclude, "text"> { + return "candidate_labels" in sample && "multi_class" in sample; +} + +export function isZeroShotTextInput( + sample: WidgetExample +): sample is WidgetExampleZeroShotTextInput { + return isTextInput(sample) && _isZeroShotTextInput(sample); +} + +export function isSentenceSimilarityInput( + sample: WidgetExample +): sample is WidgetExampleSentenceSimilarityInput { + return "source_sentence" in sample && "sentences" in sample; +} + +export function isTextAndTableInput( + sample: WidgetExample +): sample is WidgetExampleTextAndTableInput { + return ( + isTextInput(sample) && + "table" in sample && + Array.isArray(sample.table) && + sample.table.every((r) => Array.isArray(r) && r.every((c) => typeof c === "string" || typeof c === "number")) + ); +} + +export function isAssetAndZeroShotInput( + sample: WidgetExample +): sample is WidgetExampleAssetAndZeroShotInput { + return isAssetInput(sample) && _isZeroShotTextInput(sample); +} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/outputValidation.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/outputValidation.ts new file mode 100644 index 0000000000000000000000000000000000000000..66b092a2c5b179f01d772b8f3768e2bd795a734c --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/outputValidation.ts @@ -0,0 +1,35 @@ +import type { + WidgetExampleOutputLabels, + WidgetExampleOutputAnswerScore, + WidgetExampleOutputText, + WidgetExampleOutputUrl, +} from "./WidgetExample.js"; + +export function isValidOutputLabels(arg: unknown): arg is WidgetExampleOutputLabels { + return Array.isArray(arg) && arg.every((x) => typeof x.label === "string" && typeof x.score === "number"); +} + +export function isValidOutputAnswerScore(arg: unknown): arg is WidgetExampleOutputAnswerScore { + return ( + !!arg && + typeof arg === "object" && + "answer" in arg && + typeof arg["answer"] === "string" && + "score" in arg && + typeof arg["score"] === "number" + ); +} + +export function isValidOutputText(arg: unknown): arg is WidgetExampleOutputText { + return !!arg && typeof arg === "object" && "text" in arg && typeof arg["text"] === "string"; +} + +export function isValidOutputUrl(arg: unknown): arg is WidgetExampleOutputUrl { + return ( + !!arg && + typeof arg === "object" && + "url" in arg && + typeof arg["url"] === "string" && + arg["url"].startsWith("https://") + ); +} diff --git a/packages/widgets/src/lib/components/InferenceWidget/shared/types.ts b/packages/widgets/src/lib/components/InferenceWidget/shared/types.ts new file mode 100644 index 0000000000000000000000000000000000000000..70b6f4b41e4ff3a40f3172b61dcbbeb9e6bf06f3 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/shared/types.ts @@ -0,0 +1,68 @@ +import type { ModelData } from "../../../interfaces/Types.js"; +import type { WidgetExampleOutput } from "./WidgetExample.js"; + +export interface WidgetProps { + apiToken?: string; + apiUrl: string; + callApiOnMount: boolean; + model: ModelData; + noTitle: boolean; + shouldUpdateUrl: boolean; + includeCredentials: boolean; + isLoggedIn?: boolean; +} + +export interface InferenceRunOpts { + withModelLoading?: boolean; + isOnLoadCall?: boolean; + useCache?: boolean; + exampleOutput?: TOutput; +} + +export interface ExampleRunOpts { + isPreview?: boolean; + inferenceOpts?: InferenceRunOpts; +} + +export enum LoadState { + Loadable = "Loadable", + Loaded = "Loaded", + TooBig = "TooBig", + Error = "error", +} + +export enum ComputeType { + CPU = "cpu", + GPU = "gpu", +} + +export interface ModelLoadInfo { + state: LoadState; + compute_type?: ComputeType; +} + +export type TableData = Record; + +export type HighlightCoordinates = Record; + +interface Box { + xmin: number; + ymin: number; + xmax: number; + ymax: number; +} + +export interface DetectedObject { + box: Box; + label: string; + score: number; + color?: string; +} +export interface ImageSegment { + label: string; + score: number; + mask: string; + color?: string; + imgData?: ImageData; + bitmap?: ImageBitmap; +} diff --git a/packages/widgets/src/lib/components/InferenceWidget/stores.ts b/packages/widgets/src/lib/components/InferenceWidget/stores.ts new file mode 100644 index 0000000000000000000000000000000000000000..a9ed4f27e523d54cb00135f8e7bca2fad6fd5c7e --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/stores.ts @@ -0,0 +1,4 @@ +import { writable } from "svelte/store"; +import type { ModelLoadInfo } from "./shared/types.js"; + +export const modelLoadStates = writable>({}); diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/AudioClassificationWidget/AudioClassificationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/AudioClassificationWidget/AudioClassificationWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..d75cea4f8ec57c40efc37022475cd96156703b9b --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/AudioClassificationWidget/AudioClassificationWidget.svelte @@ -0,0 +1,202 @@ + + + + +
+ + {#if fileUrl} + + {/if} + { + getOutput(); + }} + /> + {#if warning} +
{warning}
+ {/if} + +
+ + + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/AudioToAudioWidget/AudioToAudioWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/AudioToAudioWidget/AudioToAudioWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..5592fc5793b8f1e7ff1785b0a2250b8afecdd6db --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/AudioToAudioWidget/AudioToAudioWidget.svelte @@ -0,0 +1,191 @@ + + + + +
+ + {#if fileUrl} + + {/if} + { + getOutput(); + }} + /> + +
+ + {#each output as item} +
+ {item.label}: + +
+ {/each} +
+
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/AutomaticSpeechRecognitionWidget/AutomaticSpeechRecognitionWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/AutomaticSpeechRecognitionWidget/AutomaticSpeechRecognitionWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..94687285a0eccde262346ba10fdef41c5100f76e --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/AutomaticSpeechRecognitionWidget/AutomaticSpeechRecognitionWidget.svelte @@ -0,0 +1,226 @@ + + + + +
+ + {#if !isRealtimeRecording} + {#if fileUrl} + + {/if} + { + getOutput(); + }} + /> + {#if warning} +
{warning}
+ {/if} + {/if} + +
+ + + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ConversationalWidget/ConversationalWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ConversationalWidget/ConversationalWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..b3d90789dc53e319584e5db81427eefb0c5b8416 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ConversationalWidget/ConversationalWidget.svelte @@ -0,0 +1,190 @@ + + + + + +
+ { + getOutput(); + }} + submitButtonLabel="Send" + /> + +
+
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/FeatureExtractionWidget/DataTable.ts b/packages/widgets/src/lib/components/InferenceWidget/widgets/FeatureExtractionWidget/DataTable.ts new file mode 100644 index 0000000000000000000000000000000000000000..551868a3874f1af0fe656766375ceaba8e82036a --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/FeatureExtractionWidget/DataTable.ts @@ -0,0 +1,43 @@ +export class DataTable { + max: number; + min: number; + std: number; + + constructor(public body: number[] | number[][]) { + const all = this.body.flat(); + this.max = Math.max(...all); + this.min = Math.min(...all); + this.std = this.max - this.min; + } + + get isArrLevel0(): boolean { + return isArrLevel0(this.body); + } + + get oneDim(): number[] { + return this.body as number[]; + } + get twoDim(): number[][] { + return this.body as number[][]; + } + + bg(value: number): string { + if (value > this.min + this.std * 0.7) { + return "bg-green-100 dark:bg-green-800"; + } + if (value > this.min + this.std * 0.6) { + return "bg-green-50 dark:bg-green-900"; + } + if (value < this.min + this.std * 0.3) { + return "bg-red-100 dark:bg-red-800"; + } + if (value < this.min + this.std * 0.4) { + return "bg-red-50 dark:bg-red-900"; + } + return ""; + } +} + +function isArrLevel0(x: number[] | number[][]): x is number[] { + return typeof x[0] === "number"; +} diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/FeatureExtractionWidget/FeatureExtractionWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/FeatureExtractionWidget/FeatureExtractionWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..d9f07a5268422fbcd2e5bd7d500148edcc8663a8 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/FeatureExtractionWidget/FeatureExtractionWidget.svelte @@ -0,0 +1,200 @@ + + + + +
+ { + getOutput(); + }} + /> + +
+ + {#if output} + {#if output.isArrLevel0} +
+ + {#each range(numOfRows(output.oneDim.length)) as i} + + {#each range(SINGLE_DIM_COLS) as j} + {#if j * numOfRows(output.oneDim.length) + i < output.oneDim.length} + + + {/if} + {/each} + + {/each} +
+ {j * numOfRows(output.oneDim.length) + i} + + {output.oneDim[j * numOfRows(output.oneDim.length) + i].toFixed(3)} +
+
+ {:else} +
+ + + + {/each} + + {#each output.twoDim as column, i} + + + {#each column as x} + + {/each} + + {/each} +
+ {#each range(output.twoDim[0].length) as j} + {j}
{i} + {x.toFixed(3)} +
+
+ {/if} + {/if} +
+
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/FillMaskWidget/FillMaskWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/FillMaskWidget/FillMaskWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..b8c00e06d9b58b523dcd547136363ecb1f32b22b --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/FillMaskWidget/FillMaskWidget.svelte @@ -0,0 +1,178 @@ + + + + +
+ {#if model.pipeline_tag === "fill-mask"} +
+ Mask token: {model.mask_token} +
+ {/if} + + { + getOutput(); + }} + /> + +
+ + + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageClassificationWidget/ImageClassificationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageClassificationWidget/ImageClassificationWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..33961aed520414ca9179ae5029d81968381f4700 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageClassificationWidget/ImageClassificationWidget.svelte @@ -0,0 +1,176 @@ + + + + +
+ (error = e)} + > + {#if imgSrc} + + {/if} + + + {#if imgSrc} + {#if imgSrc} +
+ +
+ {/if} + {/if} + + {#if warning} +
{warning}
+ {/if} + +
+ + + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageSegmentationWidget/Canvas.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageSegmentationWidget/Canvas.svelte new file mode 100644 index 0000000000000000000000000000000000000000..082b1062efac8aee2e10efe2d9bb7546c0438b74 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageSegmentationWidget/Canvas.svelte @@ -0,0 +1,75 @@ + + + + +
+
+ +
+ {#if output.length} + + mousemove(e, width, height)} + on:mouseout={mouseout} + /> + {/if} +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageSegmentationWidget/ImageSegmentationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageSegmentationWidget/ImageSegmentationWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..beff590a69dd47eb91df369e8cd75b34ed441300 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageSegmentationWidget/ImageSegmentationWidget.svelte @@ -0,0 +1,286 @@ + + + + +
+ (error = e)} + > + {#if imgSrc} + + {/if} + + + {#if imgSrc} + + {/if} + + {#if warning} +
{warning}
+ {/if} + + +
+ + + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageToImageWidget/ImageToImageWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageToImageWidget/ImageToImageWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..2d1ecbf36d3772b87da48cd4dee4ff0f42d67b06 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageToImageWidget/ImageToImageWidget.svelte @@ -0,0 +1,202 @@ + + + + +
+ (error = e)} + > + {#if imgSrc} + + {/if} + + + {#if imgSrc} + {#if imgSrc} +
+ +
+ {/if} + {/if} + + + { + getOutput(); + }} + /> + +
+ + {#if output.length} +
+ +
+ {/if} +
+
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageToTextWidget/ImageToTextWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageToTextWidget/ImageToTextWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..0b7acee47339414c9b08fc0e074eb0baa08dad3d --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ImageToTextWidget/ImageToTextWidget.svelte @@ -0,0 +1,161 @@ + + + + +
+ (error = e)} + > + {#if imgSrc} + + {/if} + + + {#if imgSrc} + {#if imgSrc} +
+ +
+ {/if} + {/if} + + {#if warning} +
{warning}
+ {/if} + +
+ + {#if model?.pipeline_tag !== "text-generation"} + + {/if} + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ObjectDetectionWidget/ObjectDetectionWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ObjectDetectionWidget/ObjectDetectionWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..a14e3ba2863da720832e21ca28f5a3bdfb85ba27 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ObjectDetectionWidget/ObjectDetectionWidget.svelte @@ -0,0 +1,197 @@ + + + + +
+ (error = e)} + > + {#if imgSrc} + + {/if} + + + {#if imgSrc} + + {/if} + + {#if warning} +
{warning}
+ {/if} + +
+ + + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ObjectDetectionWidget/SvgBoundingBoxes.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ObjectDetectionWidget/SvgBoundingBoxes.svelte new file mode 100644 index 0000000000000000000000000000000000000000..90446b606298b2d469c84d48e7f61d52da7d695f --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ObjectDetectionWidget/SvgBoundingBoxes.svelte @@ -0,0 +1,93 @@ + + + +
+
+ +
+ + + {#each boxes as { rect, color, index }} + + mouseover(index)} + on:mouseout={mouseout} + /> + {/each} + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/QuestionAnsweringWidget/QuestionAnsweringWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/QuestionAnsweringWidget/QuestionAnsweringWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..53479ee8f75b0aa2005cc376dacd3550cfd42e86 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/QuestionAnsweringWidget/QuestionAnsweringWidget.svelte @@ -0,0 +1,177 @@ + + + + +
+ { + getOutput(); + }} + /> + + +
+ + {#if output} +
+ {output.answer} + {output.score.toFixed(3)} +
+ {/if} +
+
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ReinforcementLearningWidget/ReinforcementLearningWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ReinforcementLearningWidget/ReinforcementLearningWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..3e6714ab2f23a522bf898e27dbfa1a010b4171b3 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ReinforcementLearningWidget/ReinforcementLearningWidget.svelte @@ -0,0 +1,62 @@ + + + +
+ +
+ {#if replay === Replay.Available} + +
+
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/SentenceSimilarityWidget/SentenceSimilarityWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/SentenceSimilarityWidget/SentenceSimilarityWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..07308d7a847abebbf929b972b60a450abc67e2b9 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/SentenceSimilarityWidget/SentenceSimilarityWidget.svelte @@ -0,0 +1,187 @@ + + + + +
+ + + {#each Array(nComparisonSentences - 1) as _, idx} + + {/each} + { + nComparisonSentences++; + }} + /> + { + getOutput(); + }} + /> + +
+ + {#if output.length} + + {/if} + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/SummarizationWidget/SummarizationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/SummarizationWidget/SummarizationWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..e2e4680d148156e1a75e2ad5fd8f0f9d76f960e0 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/SummarizationWidget/SummarizationWidget.svelte @@ -0,0 +1,140 @@ + + + + +
+ + { + getOutput(); + }} + /> + +
+ + + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/TableQuestionAnsweringWidget/TableQuestionAnsweringWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/TableQuestionAnsweringWidget/TableQuestionAnsweringWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..d252b25c843d1bc438221bc10f6890d244b89c16 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/TableQuestionAnsweringWidget/TableQuestionAnsweringWidget.svelte @@ -0,0 +1,196 @@ + + + + +
+ { + getOutput(); + }} + /> + +
+ {#if output} + + {/if} + {#if table.length > 1 || table[0].length > 1} + + {/if} +
+
+
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/TabularDataWidget/TabularDataWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/TabularDataWidget/TabularDataWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..3c0a12f478d3b0b0197bd8bfc943d88a1349f38e --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/TabularDataWidget/TabularDataWidget.svelte @@ -0,0 +1,228 @@ + + + + +
+
+ {#if table.length > 1 || table[1]?.length > 1} + + {/if} +
+ { + getOutput(); + }} + /> + +
+ +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/TextGenerationWidget/TextGenerationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/TextGenerationWidget/TextGenerationWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..9a23bb5ad9f5590c2021e3c9b9d232b1f0f7f69a --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/TextGenerationWidget/TextGenerationWidget.svelte @@ -0,0 +1,268 @@ + + + + +
+ + {#if model.id === "bigscience/bloom"} + + {/if} +
+ { + getOutput({ useCache }); + }} + /> + +
+ +
+
+ {#if warning} +
{warning}
+ {/if} + {#if isBloomLoginRequired} +
+ + Please + login + or + + register to try BLOOM 🌸 +
+ {/if} + +
+ + {#if model?.pipeline_tag !== "text-generation"} + + + {/if} + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/TextToImageWidget/TextToImageWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/TextToImageWidget/TextToImageWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..ff2d09221de3c0400a2385eba4b1adcde6c38be1 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/TextToImageWidget/TextToImageWidget.svelte @@ -0,0 +1,155 @@ + + + + +
+ getOutput()} /> + +
+ + {#if output.length} +
+ +
+ {/if} +
+
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/TextToSpeechWidget/TextToSpeechWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/TextToSpeechWidget/TextToSpeechWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..27c174be9671ac1460501249ba625a021e59fb3c --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/TextToSpeechWidget/TextToSpeechWidget.svelte @@ -0,0 +1,143 @@ + + + + +
+ + { + getOutput(); + }} + /> + +
+ + {#if output.length} + + {/if} + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/TokenClassificationWidget/TokenClassificationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/TokenClassificationWidget/TokenClassificationWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..f0f9fb25834fb45903340cdbc737ec834b1b49d5 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/TokenClassificationWidget/TokenClassificationWidget.svelte @@ -0,0 +1,265 @@ + + + + +
+ + { + getOutput(); + }} + /> + {#if warning} +
{warning}
+ {/if} + +
+ + + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/VisualQuestionAnsweringWidget/VisualQuestionAnsweringWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/VisualQuestionAnsweringWidget/VisualQuestionAnsweringWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..b37c6ea6d3042b5429b684a78bff1a6ec961133e --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/VisualQuestionAnsweringWidget/VisualQuestionAnsweringWidget.svelte @@ -0,0 +1,204 @@ + + + + +
+ (error = e)} + > + {#if imgSrc} + + {/if} + + + {#if imgSrc} + {#if imgSrc} +
+ +
+ {/if} + {/if} + + { + getOutput(); + }} + /> + +
+ + {#if output} + + {/if} + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ZeroShotImageClassificationWidget/ZeroShotImageClassificationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ZeroShotImageClassificationWidget/ZeroShotImageClassificationWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..cb2d4ee898634ceef4875126c74f375ced0477bd --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ZeroShotImageClassificationWidget/ZeroShotImageClassificationWidget.svelte @@ -0,0 +1,228 @@ + + + + +
+ (error = e)} + > + {#if imgSrc} + + {/if} + + + {#if imgSrc} + {#if imgSrc} +
+ +
+ {/if} + {/if} + + + { + getOutput(); + }} + /> + +
+ + {#if output.length} + + {/if} + +
diff --git a/packages/widgets/src/lib/components/InferenceWidget/widgets/ZeroShowClassificationWidget/ZeroShotClassificationWidget.svelte b/packages/widgets/src/lib/components/InferenceWidget/widgets/ZeroShowClassificationWidget/ZeroShotClassificationWidget.svelte new file mode 100644 index 0000000000000000000000000000000000000000..d3742b8ca2ada1f0136239401ee22b0eafc4b671 --- /dev/null +++ b/packages/widgets/src/lib/components/InferenceWidget/widgets/ZeroShowClassificationWidget/ZeroShotClassificationWidget.svelte @@ -0,0 +1,200 @@ + + + + +
+ + + + { + getOutput(); + }} + /> + {#if warning} +
{warning}
+ {/if} + +
+ + {#if output.length} + + {/if} + +
diff --git a/packages/widgets/src/lib/components/PipelineIcon/PipelineIcon.svelte b/packages/widgets/src/lib/components/PipelineIcon/PipelineIcon.svelte new file mode 100644 index 0000000000000000000000000000000000000000..9900197635e123eaaa27bdcd4a448a7716cde63f --- /dev/null +++ b/packages/widgets/src/lib/components/PipelineIcon/PipelineIcon.svelte @@ -0,0 +1,89 @@ + + + diff --git a/packages/widgets/src/lib/components/PipelineTag/PipelineTag.svelte b/packages/widgets/src/lib/components/PipelineTag/PipelineTag.svelte new file mode 100644 index 0000000000000000000000000000000000000000..dc2afad8a113e4c670ffa88a37a7c68a2e48bce6 --- /dev/null +++ b/packages/widgets/src/lib/components/PipelineTag/PipelineTag.svelte @@ -0,0 +1,16 @@ + + +
+ + + {pipelineData ? pipelineData.name : pipeline} + +
diff --git a/packages/widgets/src/lib/index.ts b/packages/widgets/src/lib/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..a37f99cf4ef61455cff91195fe1f7fdc83a89f12 --- /dev/null +++ b/packages/widgets/src/lib/index.ts @@ -0,0 +1,38 @@ +import InferenceWidget from "./components/InferenceWidget/InferenceWidget.svelte"; +import WidgetOutputChart from "./components/InferenceWidget/shared/WidgetOutputChart/WidgetOutputChart.svelte"; +import WidgetOutputTokens from "./components/InferenceWidget/shared/WidgetOutputTokens/WidgetOutputTokens.svelte"; +import PipelineIcon from "./components/PipelineIcon/PipelineIcon.svelte"; +import { modelLoadStates } from "./components/InferenceWidget/stores.js"; +import { InferenceDisplayability } from "./interfaces/InferenceDisplayability.js"; +import * as serveCurl from "./inferenceSnippets/serveCurl.js"; +import * as serveJs from "./inferenceSnippets/serveJs.js"; +import * as servePython from "./inferenceSnippets/servePython.js"; +import * as snippetInputs from "./inferenceSnippets/inputs.js"; +import { MODEL_LIBRARIES_UI_ELEMENTS } from "./interfaces/Libraries.js"; +import type { LibraryUiElement } from "./interfaces/Libraries.js"; +import type { TransformersInfo } from "./interfaces/Types.js"; +import { MAPPING_DEFAULT_WIDGET } from "./interfaces/DefaultWidget.js"; +import { LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS } from "./interfaces/LibrariesToTasks.js"; + +export { + InferenceWidget, + WidgetOutputChart, + WidgetOutputTokens, + modelLoadStates, + InferenceDisplayability, + PipelineIcon, + serveCurl, + serveJs, + servePython, + snippetInputs, + MODEL_LIBRARIES_UI_ELEMENTS, + MAPPING_DEFAULT_WIDGET, + LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS, +}; +export type { + WidgetExample, + WidgetExampleOutput, + WidgetExampleOutputUrl, + WidgetExampleTextInput, +} from "./components/InferenceWidget/shared/WidgetExample.js"; +export type { LibraryUiElement, TransformersInfo }; diff --git a/packages/widgets/src/lib/inferenceSnippets/inputs.ts b/packages/widgets/src/lib/inferenceSnippets/inputs.ts new file mode 100644 index 0000000000000000000000000000000000000000..64e8c4c4173579dda9f1e3a9f83265b6903bbe96 --- /dev/null +++ b/packages/widgets/src/lib/inferenceSnippets/inputs.ts @@ -0,0 +1,129 @@ +import type { PipelineType } from "@huggingface/tasks"; +import type { ModelData } from "../interfaces/Types.js"; + +const inputsZeroShotClassification = () => + `"Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!"`; + +const inputsTranslation = () => `"Меня зовут Вольфганг и я живу в Берлине"`; + +const inputsSummarization = () => + `"The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct."`; + +const inputsConversational = () => + `{ + "past_user_inputs": ["Which movie is the best ?"], + "generated_responses": ["It is Die Hard for sure."], + "text": "Can you explain why ?" + }`; + +const inputsTableQuestionAnswering = () => + `{ + "query": "How many stars does the transformers repository have?", + "table": { + "Repository": ["Transformers", "Datasets", "Tokenizers"], + "Stars": ["36542", "4512", "3934"], + "Contributors": ["651", "77", "34"], + "Programming language": [ + "Python", + "Python", + "Rust, Python and NodeJS" + ] + } + }`; + +const inputsQuestionAnswering = () => + `{ + "question": "What is my name?", + "context": "My name is Clara and I live in Berkeley." + }`; + +const inputsTextClassification = () => `"I like you. I love you"`; + +const inputsTokenClassification = () => `"My name is Sarah Jessica Parker but you can call me Jessica"`; + +const inputsTextGeneration = () => `"Can you please let us know more details about your "`; + +const inputsText2TextGeneration = () => `"The answer to the universe is"`; + +const inputsFillMask = (model: ModelData) => `"The answer to the universe is ${model.mask_token}."`; + +const inputsSentenceSimilarity = () => + `{ + "source_sentence": "That is a happy person", + "sentences": [ + "That is a happy dog", + "That is a very happy person", + "Today is a sunny day" + ] + }`; + +const inputsFeatureExtraction = () => `"Today is a sunny day and I will get some ice cream."`; + +const inputsImageClassification = () => `"cats.jpg"`; + +const inputsImageToText = () => `"cats.jpg"`; + +const inputsImageSegmentation = () => `"cats.jpg"`; + +const inputsObjectDetection = () => `"cats.jpg"`; + +const inputsAudioToAudio = () => `"sample1.flac"`; + +const inputsAudioClassification = () => `"sample1.flac"`; + +const inputsTextToImage = () => `"Astronaut riding a horse"`; + +const inputsTextToSpeech = () => `"The answer to the universe is 42"`; + +const inputsTextToAudio = () => `"liquid drum and bass, atmospheric synths, airy sounds"`; + +const inputsAutomaticSpeechRecognition = () => `"sample1.flac"`; + +const modelInputSnippets: { + [key in PipelineType]?: (model: ModelData) => string; +} = { + "audio-to-audio": inputsAudioToAudio, + "audio-classification": inputsAudioClassification, + "automatic-speech-recognition": inputsAutomaticSpeechRecognition, + conversational: inputsConversational, + "feature-extraction": inputsFeatureExtraction, + "fill-mask": inputsFillMask, + "image-classification": inputsImageClassification, + "image-to-text": inputsImageToText, + "image-segmentation": inputsImageSegmentation, + "object-detection": inputsObjectDetection, + "question-answering": inputsQuestionAnswering, + "sentence-similarity": inputsSentenceSimilarity, + summarization: inputsSummarization, + "table-question-answering": inputsTableQuestionAnswering, + "text-classification": inputsTextClassification, + "text-generation": inputsTextGeneration, + "text-to-image": inputsTextToImage, + "text-to-speech": inputsTextToSpeech, + "text-to-audio": inputsTextToAudio, + "text2text-generation": inputsText2TextGeneration, + "token-classification": inputsTokenClassification, + translation: inputsTranslation, + "zero-shot-classification": inputsZeroShotClassification, +}; + +// Use noWrap to put the whole snippet on a single line (removing new lines and tabulations) +// Use noQuotes to strip quotes from start & end (example: "abc" -> abc) +export function getModelInputSnippet(model: ModelData, noWrap = false, noQuotes = false): string { + if (model.pipeline_tag) { + const inputs = modelInputSnippets[model.pipeline_tag]; + if (inputs) { + let result = inputs(model); + if (noWrap) { + result = result.replace(/(?:(?:\r?\n|\r)\t*)|\t+/g, " "); + } + if (noQuotes) { + const REGEX_QUOTES = /^"(.+)"$/s; + const match = result.match(REGEX_QUOTES); + result = match ? match[1] : result; + } + return result; + } + } + return "No input example has been defined for this model task."; +} diff --git a/packages/widgets/src/lib/inferenceSnippets/serveCurl.ts b/packages/widgets/src/lib/inferenceSnippets/serveCurl.ts new file mode 100644 index 0000000000000000000000000000000000000000..7a801e934f84c7843ab074df1f48a7e679d7a36c --- /dev/null +++ b/packages/widgets/src/lib/inferenceSnippets/serveCurl.ts @@ -0,0 +1,63 @@ +import type { PipelineType } from "@huggingface/tasks"; +import type { ModelData } from "../interfaces/Types.js"; +import { getModelInputSnippet } from "./inputs.js"; + +export const snippetBasic = (model: ModelData, accessToken: string): string => + `curl https://api-inference.huggingface.co/models/${model.id} \\ + -X POST \\ + -d '{"inputs": ${getModelInputSnippet(model, true)}}' \\ + -H 'Content-Type: application/json' \\ + -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" +`; + +export const snippetZeroShotClassification = (model: ModelData, accessToken: string): string => + `curl https://api-inference.huggingface.co/models/${model.id} \\ + -X POST \\ + -d '{"inputs": ${getModelInputSnippet(model, true)}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \\ + -H 'Content-Type: application/json' \\ + -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" +`; + +export const snippetFile = (model: ModelData, accessToken: string): string => + `curl https://api-inference.huggingface.co/models/${model.id} \\ + -X POST \\ + --data-binary '@${getModelInputSnippet(model, true, true)}' \\ + -H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" +`; + +export const curlSnippets: Partial string>> = { + // Same order as in js/src/lib/interfaces/Types.ts + "text-classification": snippetBasic, + "token-classification": snippetBasic, + "table-question-answering": snippetBasic, + "question-answering": snippetBasic, + "zero-shot-classification": snippetZeroShotClassification, + translation: snippetBasic, + summarization: snippetBasic, + conversational: snippetBasic, + "feature-extraction": snippetBasic, + "text-generation": snippetBasic, + "text2text-generation": snippetBasic, + "fill-mask": snippetBasic, + "sentence-similarity": snippetBasic, + "automatic-speech-recognition": snippetFile, + "text-to-image": snippetBasic, + "text-to-speech": snippetBasic, + "text-to-audio": snippetBasic, + "audio-to-audio": snippetFile, + "audio-classification": snippetFile, + "image-classification": snippetFile, + "image-to-text": snippetFile, + "object-detection": snippetFile, + "image-segmentation": snippetFile, +}; + +export function getCurlInferenceSnippet(model: ModelData, accessToken: string): string { + return model.pipeline_tag && model.pipeline_tag in curlSnippets + ? curlSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" + : ""; +} + +export function hasCurlInferenceSnippet(model: ModelData): boolean { + return !!model.pipeline_tag && model.pipeline_tag in curlSnippets; +} diff --git a/packages/widgets/src/lib/inferenceSnippets/serveJs.ts b/packages/widgets/src/lib/inferenceSnippets/serveJs.ts new file mode 100644 index 0000000000000000000000000000000000000000..ef94c30645a68d6a62f5f4d23427346555d3fd7b --- /dev/null +++ b/packages/widgets/src/lib/inferenceSnippets/serveJs.ts @@ -0,0 +1,150 @@ +import type { PipelineType } from "@huggingface/tasks"; +import type { ModelData } from "../interfaces/Types.js"; +import { getModelInputSnippet } from "./inputs.js"; + +export const snippetBasic = (model: ModelData, accessToken: string): string => + `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" }, + method: "POST", + body: JSON.stringify(data), + } + ); + const result = await response.json(); + return result; +} + +query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + console.log(JSON.stringify(response)); +});`; + +export const snippetZeroShotClassification = (model: ModelData, accessToken: string): string => + `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" }, + method: "POST", + body: JSON.stringify(data), + } + ); + const result = await response.json(); + return result; +} + +query({"inputs": ${getModelInputSnippet( + model + )}, "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}).then((response) => { + console.log(JSON.stringify(response)); +});`; + +export const snippetTextToImage = (model: ModelData, accessToken: string): string => + `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" }, + method: "POST", + body: JSON.stringify(data), + } + ); + const result = await response.blob(); + return result; +} +query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + // Use image +});`; + +export const snippetTextToAudio = (model: ModelData, accessToken: string): string => { + const commonSnippet = `async function query(data) { + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" }, + method: "POST", + body: JSON.stringify(data), + } + );`; + if (model.library_name === "transformers") { + return ( + commonSnippet + + ` + const result = await response.blob(); + return result; + } + query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + // Returns a byte object of the Audio wavform. Use it directly! + });` + ); + } else { + return ( + commonSnippet + + ` + const result = await response.json(); + return result; + } + + query({"inputs": ${getModelInputSnippet(model)}}).then((response) => { + console.log(JSON.stringify(response)); + });` + ); + } +}; + +export const snippetFile = (model: ModelData, accessToken: string): string => + `async function query(filename) { + const data = fs.readFileSync(filename); + const response = await fetch( + "https://api-inference.huggingface.co/models/${model.id}", + { + headers: { Authorization: "Bearer ${accessToken || `{API_TOKEN}`}" }, + method: "POST", + body: data, + } + ); + const result = await response.json(); + return result; +} + +query(${getModelInputSnippet(model)}).then((response) => { + console.log(JSON.stringify(response)); +});`; + +export const jsSnippets: Partial string>> = { + // Same order as in js/src/lib/interfaces/Types.ts + "text-classification": snippetBasic, + "token-classification": snippetBasic, + "table-question-answering": snippetBasic, + "question-answering": snippetBasic, + "zero-shot-classification": snippetZeroShotClassification, + translation: snippetBasic, + summarization: snippetBasic, + conversational: snippetBasic, + "feature-extraction": snippetBasic, + "text-generation": snippetBasic, + "text2text-generation": snippetBasic, + "fill-mask": snippetBasic, + "sentence-similarity": snippetBasic, + "automatic-speech-recognition": snippetFile, + "text-to-image": snippetTextToImage, + "text-to-speech": snippetTextToAudio, + "text-to-audio": snippetTextToAudio, + "audio-to-audio": snippetFile, + "audio-classification": snippetFile, + "image-classification": snippetFile, + "image-to-text": snippetFile, + "object-detection": snippetFile, + "image-segmentation": snippetFile, +}; + +export function getJsInferenceSnippet(model: ModelData, accessToken: string): string { + return model.pipeline_tag && model.pipeline_tag in jsSnippets + ? jsSnippets[model.pipeline_tag]?.(model, accessToken) ?? "" + : ""; +} + +export function hasJsInferenceSnippet(model: ModelData): boolean { + return !!model.pipeline_tag && model.pipeline_tag in jsSnippets; +} diff --git a/packages/widgets/src/lib/inferenceSnippets/servePython.ts b/packages/widgets/src/lib/inferenceSnippets/servePython.ts new file mode 100644 index 0000000000000000000000000000000000000000..5ddbeabeb178164149565752336d1ede251d1a51 --- /dev/null +++ b/packages/widgets/src/lib/inferenceSnippets/servePython.ts @@ -0,0 +1,114 @@ +import type { PipelineType } from "@huggingface/tasks"; +import type { ModelData } from "../interfaces/Types.js"; +import { getModelInputSnippet } from "./inputs.js"; + +export const snippetZeroShotClassification = (model: ModelData): string => + `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "inputs": ${getModelInputSnippet(model)}, + "parameters": {"candidate_labels": ["refund", "legal", "faq"]}, +})`; + +export const snippetBasic = (model: ModelData): string => + `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +output = query({ + "inputs": ${getModelInputSnippet(model)}, +})`; + +export const snippetFile = (model: ModelData): string => + `def query(filename): + with open(filename, "rb") as f: + data = f.read() + response = requests.post(API_URL, headers=headers, data=data) + return response.json() + +output = query(${getModelInputSnippet(model)})`; + +export const snippetTextToImage = (model: ModelData): string => + `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.content +image_bytes = query({ + "inputs": ${getModelInputSnippet(model)}, +}) +# You can access the image with PIL.Image for example +import io +from PIL import Image +image = Image.open(io.BytesIO(image_bytes))`; + +export const snippetTextToAudio = (model: ModelData): string => { + // Transformers TTS pipeline and api-inference-community (AIC) pipeline outputs are diverged + // with the latest update to inference-api (IA). + // Transformers IA returns a byte object (wav file), whereas AIC returns wav and sampling_rate. + if (model.library_name === "transformers") { + return `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.content + +audio_bytes = query({ + "inputs": ${getModelInputSnippet(model)}, +}) +# You can access the audio with IPython.display for example +from IPython.display import Audio +Audio(audio_bytes)`; + } else { + return `def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +audio, sampling_rate = query({ + "inputs": ${getModelInputSnippet(model)}, +}) +# You can access the audio with IPython.display for example +from IPython.display import Audio +Audio(audio, rate=sampling_rate)`; + } +}; +export const pythonSnippets: Partial string>> = { + // Same order as in js/src/lib/interfaces/Types.ts + "text-classification": snippetBasic, + "token-classification": snippetBasic, + "table-question-answering": snippetBasic, + "question-answering": snippetBasic, + "zero-shot-classification": snippetZeroShotClassification, + translation: snippetBasic, + summarization: snippetBasic, + conversational: snippetBasic, + "feature-extraction": snippetBasic, + "text-generation": snippetBasic, + "text2text-generation": snippetBasic, + "fill-mask": snippetBasic, + "sentence-similarity": snippetBasic, + "automatic-speech-recognition": snippetFile, + "text-to-image": snippetTextToImage, + "text-to-speech": snippetTextToAudio, + "text-to-audio": snippetTextToAudio, + "audio-to-audio": snippetFile, + "audio-classification": snippetFile, + "image-classification": snippetFile, + "image-to-text": snippetFile, + "object-detection": snippetFile, + "image-segmentation": snippetFile, +}; + +export function getPythonInferenceSnippet(model: ModelData, accessToken: string): string { + const body = + model.pipeline_tag && model.pipeline_tag in pythonSnippets ? pythonSnippets[model.pipeline_tag]?.(model) ?? "" : ""; + + return `import requests + +API_URL = "https://api-inference.huggingface.co/models/${model.id}" +headers = {"Authorization": ${accessToken ? `"Bearer ${accessToken}"` : `f"Bearer {API_TOKEN}"`}} + +${body}`; +} + +export function hasPythonInferenceSnippet(model: ModelData): boolean { + return !!model.pipeline_tag && model.pipeline_tag in pythonSnippets; +} diff --git a/packages/widgets/src/lib/interfaces/DefaultWidget.ts b/packages/widgets/src/lib/interfaces/DefaultWidget.ts new file mode 100644 index 0000000000000000000000000000000000000000..e7ad1962157a8eecf7d88c0b81ae735924fbf3e9 --- /dev/null +++ b/packages/widgets/src/lib/interfaces/DefaultWidget.ts @@ -0,0 +1,718 @@ +import type { PipelineType } from "@huggingface/tasks"; +import type { WidgetExample } from "../components/InferenceWidget/shared/WidgetExample.js"; + +type LanguageCode = string; + +type PerLanguageMapping = Map; + +/// NOTE TO CONTRIBUTORS: +/// +/// When adding sample inputs for a new language, you don't +/// necessarily have to translate the inputs from existing languages. +/// (which were quite random to begin with) +/// +/// i.e. Feel free to be creative and provide better samples. +// + +/// The placeholder will be replaced by the correct mask token +/// in the following examples, depending on the model type +/// +/// see [INTERNAL] github.com/huggingface/moon-landing/blob/c5c3d45fe0ab27347b3ab27bdad646ef20732351/server/lib/App.ts#L254 +// + +const MAPPING_EN: PerLanguageMapping = new Map([ + ["text-classification", [`I like you. I love you`]], + [ + "token-classification", + [ + `My name is Wolfgang and I live in Berlin`, + `My name is Sarah and I live in London`, + `My name is Clara and I live in Berkeley, California.`, + ], + ], + [ + "table-question-answering", + [ + { + text: `How many stars does the transformers repository have?`, + table: { + Repository: ["Transformers", "Datasets", "Tokenizers"], + Stars: [36542, 4512, 3934], + Contributors: [651, 77, 34], + "Programming language": ["Python", "Python", "Rust, Python and NodeJS"], + }, + }, + ], + ], + [ + "question-answering", + [ + { + text: `Where do I live?`, + context: `My name is Wolfgang and I live in Berlin`, + }, + { + text: `Where do I live?`, + context: `My name is Sarah and I live in London`, + }, + { + text: `What's my name?`, + context: `My name is Clara and I live in Berkeley.`, + }, + { + text: `Which name is also used to describe the Amazon rainforest in English?`, + context: `The Amazon rainforest (Portuguese: Floresta Amazônica or Amazônia; Spanish: Selva Amazónica, Amazonía or usually Amazonia; French: Forêt amazonienne; Dutch: Amazoneregenwoud), also known in English as Amazonia or the Amazon Jungle, is a moist broadleaf forest that covers most of the Amazon basin of South America. This basin encompasses 7,000,000 square kilometres (2,700,000 sq mi), of which 5,500,000 square kilometres (2,100,000 sq mi) are covered by the rainforest. This region includes territory belonging to nine nations. The majority of the forest is contained within Brazil, with 60% of the rainforest, followed by Peru with 13%, Colombia with 10%, and with minor amounts in Venezuela, Ecuador, Bolivia, Guyana, Suriname and French Guiana. States or departments in four nations contain "Amazonas" in their names. The Amazon represents over half of the planet's remaining rainforests, and comprises the largest and most biodiverse tract of tropical rainforest in the world, with an estimated 390 billion individual trees divided into 16,000 species.`, + }, + ], + ], + [ + "zero-shot-classification", + [ + { + text: "I have a problem with my iphone that needs to be resolved asap!!", + candidate_labels: "urgent, not urgent, phone, tablet, computer", + multi_class: true, + }, + { + text: "Last week I upgraded my iOS version and ever since then my phone has been overheating whenever I use your app.", + candidate_labels: "mobile, website, billing, account access", + multi_class: false, + }, + { + text: "A new model offers an explanation for how the Galilean satellites formed around the solar system’s largest world. Konstantin Batygin did not set out to solve one of the solar system’s most puzzling mysteries when he went for a run up a hill in Nice, France. Dr. Batygin, a Caltech researcher, best known for his contributions to the search for the solar system’s missing “Planet Nine,” spotted a beer bottle. At a steep, 20 degree grade, he wondered why it wasn’t rolling down the hill. He realized there was a breeze at his back holding the bottle in place. Then he had a thought that would only pop into the mind of a theoretical astrophysicist: “Oh! This is how Europa formed.” Europa is one of Jupiter’s four large Galilean moons. And in a paper published Monday in the Astrophysical Journal, Dr. Batygin and a co-author, Alessandro Morbidelli, a planetary scientist at the Côte d’Azur Observatory in France, present a theory explaining how some moons form around gas giants like Jupiter and Saturn, suggesting that millimeter-sized grains of hail produced during the solar system’s formation became trapped around these massive worlds, taking shape one at a time into the potentially habitable moons we know today.", + candidate_labels: "space & cosmos, scientific discovery, microbiology, robots, archeology", + multi_class: true, + }, + ], + ], + ["translation", [`My name is Wolfgang and I live in Berlin`, `My name is Sarah and I live in London`]], + [ + "summarization", + [ + `The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.`, + ], + ], + [ + "conversational", + [ + `Hey my name is Julien! How are you?`, + `Hey my name is Thomas! How are you?`, + `Hey my name is Mariama! How are you?`, + `Hey my name is Clara! How are you?`, + `Hey my name is Julien! How are you?`, + `Hi.`, + ], + ], + [ + "text-generation", + [ + `My name is Julien and I like to`, + `My name is Thomas and my main`, + `My name is Mariama, my favorite`, + `My name is Clara and I am`, + `My name is Lewis and I like to`, + `My name is Merve and my favorite`, + `My name is Teven and I am`, + `Once upon a time,`, + ], + ], + ["fill-mask", [`Paris is the of France.`, `The goal of life is .`]], + [ + "sentence-similarity", + [ + { + source_sentence: "That is a happy person", + sentences: ["That is a happy dog", "That is a very happy person", "Today is a sunny day"], + }, + ], + ], +]); + +const MAPPING_ZH: PerLanguageMapping = new Map([ + ["text-classification", [`我喜欢你。 我爱你`]], + ["token-classification", [`我叫沃尔夫冈,我住在柏林。`, `我叫萨拉,我住在伦敦。`, `我叫克拉拉,我住在加州伯克利。`]], + [ + "question-answering", + [ + { + text: `我住在哪里?`, + context: `我叫沃尔夫冈,我住在柏林。`, + }, + { + text: `我住在哪里?`, + context: `我叫萨拉,我住在伦敦。`, + }, + { + text: `我的名字是什么?`, + context: `我叫克拉拉,我住在伯克利。`, + }, + ], + ], + ["translation", [`我叫沃尔夫冈,我住在柏林。`, `我叫萨拉,我住在伦敦。`]], + [ + "zero-shot-classification", + [ + { + text: "房间干净明亮,非常不错", + candidate_labels: "这是一条差评, 这是一条好评", + }, + ], + ], + [ + "summarization", + [ + `该塔高324米(1063英尺),与一幢81层的建筑物一样高,是巴黎最高的建筑物。 它的底座是方形的,每边长125米(410英尺)。 在建造过程中,艾菲尔铁塔超过了华盛顿纪念碑,成为世界上最高的人造结构,它保持了41年的头衔,直到1930年纽约市的克莱斯勒大楼竣工。这是第一个到达300米高度的结构。 由于1957年在塔顶增加了广播天线,因此它现在比克莱斯勒大厦高5.2米(17英尺)。 除发射器外,艾菲尔铁塔是法国第二高的独立式建筑,仅次于米劳高架桥。`, + ], + ], + [ + "text-generation", + [`我叫朱利安,我喜欢`, `我叫托马斯,我的主要`, `我叫玛丽亚,我最喜欢的`, `我叫克拉拉,我是`, `从前,`], + ], + ["fill-mask", [`巴黎是国的首都。`, `生活的真谛是。`]], + [ + "sentence-similarity", + [ + { + source_sentence: "那是 個快樂的人", + sentences: ["那是 條快樂的狗", "那是 個非常幸福的人", "今天是晴天"], + }, + ], + ], +]); + +const MAPPING_FR: PerLanguageMapping = new Map([ + ["text-classification", [`Je t'apprécie beaucoup. Je t'aime.`]], + ["token-classification", [`Mon nom est Wolfgang et je vis à Berlin`]], + [ + "question-answering", + [ + { + text: `Où est-ce que je vis?`, + context: `Mon nom est Wolfgang et je vis à Berlin`, + }, + ], + ], + ["translation", [`Mon nom est Wolfgang et je vis à Berlin`]], + [ + "summarization", + [ + `La tour fait 324 mètres (1,063 pieds) de haut, environ la même hauteur qu'un immeuble de 81 étages, et est la plus haute structure de Paris. Sa base est carrée, mesurant 125 mètres (410 pieds) sur chaque côté. Durant sa construction, la tour Eiffel surpassa le Washington Monument pour devenir la plus haute structure construite par l'homme dans le monde, un titre qu'elle conserva pendant 41 ans jusqu'à l'achèvement du Chrysler Building à New-York City en 1930. Ce fut la première structure à atteindre une hauteur de 300 mètres. Avec l'ajout d'une antenne de radiodiffusion au sommet de la tour Eiffel en 1957, celle-ci redevint plus haute que le Chrysler Building de 5,2 mètres (17 pieds). En excluant les transmetteurs, elle est la seconde plus haute stucture autoportante de France après le viaduc de Millau.`, + ], + ], + ["text-generation", [`Mon nom est Julien et j'aime`, `Mon nom est Thomas et mon principal`, `Il était une fois`]], + ["fill-mask", [`Paris est la de la France.`]], + [ + "sentence-similarity", + [ + { + source_sentence: "C'est une personne heureuse", + sentences: [ + "C'est un chien heureux", + "C'est une personne très heureuse", + "Aujourd'hui est une journée ensoleillée", + ], + }, + ], + ], +]); + +const MAPPING_ES: PerLanguageMapping = new Map([ + ["text-classification", [`Te quiero. Te amo.`]], + ["token-classification", [`Me llamo Wolfgang y vivo en Berlin`]], + [ + "question-answering", + [ + { + text: `¿Dónde vivo?`, + context: `Me llamo Wolfgang y vivo en Berlin`, + }, + { + text: `¿Quién inventó el submarino?`, + context: `Isaac Peral fue un murciano que inventó el submarino`, + }, + { + text: `¿Cuántas personas hablan español?`, + context: `El español es el segundo idioma más hablado del mundo con más de 442 millones de hablantes`, + }, + ], + ], + [ + "translation", + [ + `Me llamo Wolfgang y vivo en Berlin`, + `Los ingredientes de una tortilla de patatas son: huevos, patatas y cebolla`, + ], + ], + [ + "summarization", + [ + `La torre tiene 324 metros (1.063 pies) de altura, aproximadamente la misma altura que un edificio de 81 pisos y la estructura más alta de París. Su base es cuadrada, mide 125 metros (410 pies) a cada lado. Durante su construcción, la Torre Eiffel superó al Washington Monument para convertirse en la estructura artificial más alta del mundo, un título que mantuvo durante 41 años hasta que el Chrysler Building en la ciudad de Nueva York se terminó en 1930. Fue la primera estructura en llegar Una altura de 300 metros. Debido a la adición de una antena de transmisión en la parte superior de la torre en 1957, ahora es más alta que el Chrysler Building en 5,2 metros (17 pies). Excluyendo los transmisores, la Torre Eiffel es la segunda estructura independiente más alta de Francia después del Viaducto de Millau.`, + ], + ], + [ + "text-generation", + [ + `Me llamo Julien y me gusta`, + `Me llamo Thomas y mi principal`, + `Me llamo Manuel y trabajo en`, + `Érase una vez,`, + `Si tú me dices ven, `, + ], + ], + ["fill-mask", [`Mi nombre es y vivo en Nueva York.`, `El español es un idioma muy en el mundo.`]], + [ + "sentence-similarity", + [ + { + source_sentence: "Esa es una persona feliz", + sentences: ["Ese es un perro feliz", "Esa es una persona muy feliz", "Hoy es un día soleado"], + }, + ], + ], +]); + +const MAPPING_RU: PerLanguageMapping = new Map([ + ["text-classification", [`Ты мне нравишься. Я тебя люблю`]], + ["token-classification", [`Меня зовут Вольфганг и я живу в Берлине`]], + [ + "question-answering", + [ + { + text: `Где живу?`, + context: `Меня зовут Вольфганг и я живу в Берлине`, + }, + ], + ], + ["translation", [`Меня зовут Вольфганг и я живу в Берлине`]], + [ + "summarization", + [ + `Высота башни составляет 324 метра (1063 фута), примерно такая же высота, как у 81-этажного здания, и самое высокое сооружение в Париже. Его основание квадратно, размером 125 метров (410 футов) с любой стороны. Во время строительства Эйфелева башня превзошла монумент Вашингтона, став самым высоким искусственным сооружением в мире, и этот титул она удерживала в течение 41 года до завершения строительство здания Крайслер в Нью-Йорке в 1930 году. Это первое сооружение которое достигло высоты 300 метров. Из-за добавления вещательной антенны на вершине башни в 1957 году она сейчас выше здания Крайслер на 5,2 метра (17 футов). За исключением передатчиков, Эйфелева башня является второй самой высокой отдельно стоящей структурой во Франции после виадука Мийо.`, + ], + ], + ["text-generation", [`Меня зовут Жюльен и`, `Меня зовут Томас и мой основной`, `Однажды`]], + ["fill-mask", [`Меня зовут и я инженер живущий в Нью-Йорке.`]], + [ + "sentence-similarity", + [ + { + source_sentence: "Это счастливый человек", + sentences: ["Это счастливая собака", "Это очень счастливый человек", "Сегодня солнечный день"], + }, + ], + ], +]); + +const MAPPING_UK: PerLanguageMapping = new Map([ + ["translation", [`Мене звати Вольфґанґ і я живу в Берліні.`]], + ["fill-mask", [`Мене звати .`]], +]); + +const MAPPING_IT: PerLanguageMapping = new Map([ + ["text-classification", [`Mi piaci. Ti amo`]], + [ + "token-classification", + [ + `Mi chiamo Wolfgang e vivo a Berlino`, + `Mi chiamo Sarah e vivo a Londra`, + `Mi chiamo Clara e vivo a Berkeley in California.`, + ], + ], + [ + "question-answering", + [ + { + text: `Dove vivo?`, + context: `Mi chiamo Wolfgang e vivo a Berlino`, + }, + { + text: `Dove vivo?`, + context: `Mi chiamo Sarah e vivo a Londra`, + }, + { + text: `Come mio chiamo?`, + context: `Mi chiamo Clara e vivo a Berkeley.`, + }, + ], + ], + ["translation", [`Mi chiamo Wolfgang e vivo a Berlino`, `Mi chiamo Sarah e vivo a Londra`]], + [ + "summarization", + [ + `La torre degli Asinelli è una delle cosiddette due torri di Bologna, simbolo della città, situate in piazza di porta Ravegnana, all'incrocio tra le antiche strade San Donato (ora via Zamboni), San Vitale, Maggiore e Castiglione. Eretta, secondo la tradizione, fra il 1109 e il 1119 dal nobile Gherardo Asinelli, la torre è alta 97,20 metri, pende verso ovest per 2,23 metri e presenta all'interno una scalinata composta da 498 gradini. Ancora non si può dire con certezza quando e da chi fu costruita la torre degli Asinelli. Si presume che la torre debba il proprio nome a Gherardo Asinelli, il nobile cavaliere di fazione ghibellina al quale se ne attribuisce la costruzione, iniziata secondo una consolidata tradizione l'11 ottobre 1109 e terminata dieci anni dopo, nel 1119.`, + ], + ], + [ + "text-generation", + [ + `Mi chiamo Loreto e mi piace`, + `Mi chiamo Thomas e il mio principale`, + `Mi chiamo Marianna, la mia cosa preferita`, + `Mi chiamo Clara e sono`, + `C'era una volta`, + ], + ], + ["fill-mask", [`Roma è la d'Italia.`, `Lo scopo della vita è .`]], + [ + "sentence-similarity", + [ + { + source_sentence: "Questa è una persona felice", + sentences: ["Questo è un cane felice", "Questa è una persona molto felice", "Oggi è una giornata di sole"], + }, + ], + ], +]); + +const MAPPING_FA: PerLanguageMapping = new Map([ + [ + "text-classification", + [`پروژه به موقع تحویل شد و همه چیز خوب بود.`, `سیب‌زمینی بی‌کیفیت بود.`, `قیمت و کیفیت عالی`, `خوب نبود اصلا`], + ], + [ + "token-classification", + [ + `این سریال به صورت رسمی در تاریخ دهم می ۲۰۱۱ توسط شبکه فاکس برای پخش رزرو شد.`, + `دفتر مرکزی شرکت پارس‌مینو در شهر اراک در استان مرکزی قرار دارد.`, + `وی در سال ۲۰۱۳ درگذشت و مسئول خاکسپاری و اقوامش برای او مراسم یادبود گرفتند.`, + ], + ], + [ + "question-answering", + [ + { + text: `من کجا زندگی میکنم؟`, + context: `نام من پژمان است و در گرگان زندگی میکنم.`, + }, + { + text: `نامم چیست و کجا زندگی می‌کنم؟`, + context: `اسمم سارا است و در آفریقای جنوبی زندگی میکنم.`, + }, + { + text: `نام من چیست؟`, + context: `من مریم هستم و در تبریز زندگی می‌کنم.`, + }, + { + text: `بیشترین مساحت جنگل آمازون در کدام کشور است؟`, + context: [ + "آمازون نام بزرگ‌ترین جنگل بارانی جهان است که در شمال آمریکای جنوبی قرار گرفته و بیشتر آن در خاک برزیل و پرو", + "جای دارد. بیش از نیمی از همه جنگل‌های بارانی باقی‌مانده در جهان در آمازون قرار دارد.", + "مساحت جنگل‌های آمازون ۵٫۵ میلیون کیلومتر مربع است که بین ۹ کشور تقسیم شده‌است.", + ].join("\n"), + }, + ], + ], + [ + "translation", + [ + "بیشتر مساحت جنگل‌های آمازون در حوضه آبریز رود آمازون و ۱۱۰۰ شاخه آن واقع شده‌است.", + "مردمان نَبَطی از هزاره‌های یکم و دوم پیش از میلاد در این منطقه زندگی می‌کردند.", + ], + ], + [ + "summarization", + [ + [ + "شاهنامه اثر حکیم ابوالقاسم فردوسی توسی، حماسه‌ای منظوم، بر حسب دست نوشته‌های ", + "موجود دربرگیرنده نزدیک به ۵۰٬۰۰۰ بیت تا نزدیک به ۶۱٬۰۰۰ بیت و یکی از ", + "بزرگ‌ترین و برجسته‌ترین سروده‌های حماسی جهان است که سرایش آن دست‌آوردِ ", + "دست‌کم سی سال کارِ پیوستهٔ این سخن‌سرای نامدار ایرانی است. موضوع این شاهکار ادبی،", + " افسانه‌ها و تاریخ ایران از آغاز تا حملهٔ عرب‌ها به ایران در سدهٔ هفتم میلادی است", + " (شاهنامه از سه بخش اسطوره، پهلوانی و تاریخی تشکیل شده‌است) که در چهار", + " دودمان پادشاهیِ پیشدادیان، کیانیان، اشکانیان و ساسانیان گنجانده می‌شود.", + " شاهنامه بر وزن «فَعولُن فعولن فعولن فَعَلْ»، در بحرِ مُتَقارِبِ مثمَّنِ محذوف نگاشته شده‌است.", + "هنگامی که زبان دانش و ادبیات در ایران زبان عربی بود، فردوسی، با سرودن شاهنامه", + " با ویژگی‌های هدف‌مندی که داشت، زبان پارسی را زنده و پایدار کرد. یکی از ", + " بن‌مایه‌های مهمی که فردوسی برای سرودن شاهنامه از آن استفاده کرد،", + " شاهنامهٔ ابومنصوری بود. شاهنامه نفوذ بسیاری در جهت‌گیری ", + " فرهنگ فارسی و نیز بازتاب‌های شکوه‌مندی در ادبیات جهان داشته‌است و شاعران ", + " بزرگی مانند گوته و ویکتور هوگو از آن به نیکی یاد کرده‌اند.", + ].join("\n"), + ], + ], + ["text-generation", ["اسم من نازنین است و من", "روزی روزگاری"]], + [ + "fill-mask", + [ + `زندگی یک سوال است و این که چگونه کنیم پاسخ این سوال!`, + `زندگی از مرگ پرسید: چرا همه من را دارند اما از تو متنفرند؟`, + ], + ], +]); + +const MAPPING_AR: PerLanguageMapping = new Map([ + ["text-classification", [`أحبك. أهواك`]], + [ + "token-classification", + [`إسمي محمد وأسكن في برلين`, `إسمي ساره وأسكن في لندن`, `إسمي سامي وأسكن في القدس في فلسطين.`], + ], + [ + "question-answering", + [ + { + text: `أين أسكن؟`, + context: `إسمي محمد وأسكن في بيروت`, + }, + { + text: `أين أسكن؟`, + context: `إسمي ساره وأسكن في لندن`, + }, + { + text: `ما اسمي؟`, + context: `اسمي سعيد وأسكن في حيفا.`, + }, + { + text: `ما لقب خالد بن الوليد بالعربية؟`, + context: `خالد بن الوليد من أبطال وقادة الفتح الإسلامي وقد تحدثت عنه اللغات الإنجليزية والفرنسية والإسبانية ولقب بسيف الله المسلول.`, + }, + ], + ], + ["translation", [`إسمي محمد وأسكن في برلين`, `إسمي ساره وأسكن في لندن`]], + [ + "summarization", + [ + `تقع الأهرامات في الجيزة قرب القاهرة في مصر وقد بنيت منذ عدة قرون، وقيل إنها كانت قبورا للفراعنة وتم بناؤها بعملية هندسية رائعة واستقدمت حجارتها من جبل المقطم وتم نقلها بالسفن أو على الرمل، وما تزال شامخة ويقصدها السياح من كافة أرجاء المعمورة.`, + ], + ], + [ + "text-generation", + [ + `إسمي محمد وأحب أن`, + `دع المكارم لا ترحل لبغيتها - واقعد فإنك أنت الطاعم الكاسي.`, + `لماذا نحن هنا؟`, + `القدس مدينة تاريخية، بناها الكنعانيون في`, + `كان يا ما كان في قديم الزمان`, + ], + ], + ["fill-mask", [`باريس فرنسا.`, `فلسفة الحياة هي .`]], + [ + "sentence-similarity", + [ + { + source_sentence: "هذا شخص سعيد", + sentences: ["هذا كلب سعيد", "هذا شخص سعيد جدا", "اليوم هو يوم مشمس"], + }, + ], + ], +]); + +const MAPPING_BN: PerLanguageMapping = new Map([ + ["text-classification", [`বাঙালির ঘরে ঘরে আজ নবান্ন উৎসব।`]], + [ + "token-classification", + [`আমার নাম জাহিদ এবং আমি ঢাকায় বাস করি।`, `তিনি গুগলে চাকরী করেন।`, `আমার নাম সুস্মিতা এবং আমি কলকাতায় বাস করি।`], + ], + ["translation", [`আমার নাম জাহিদ, আমি রংপুরে বাস করি।`, `আপনি কী আজকে বাসায় আসবেন?`]], + [ + "summarization", + [ + `‘ইকোনমিস্ট’ লিখেছে, অ্যান্টিবডির চার মাস স্থায়ী হওয়ার খবরটি দুই কারণে আনন্দের। অ্যান্টিবডি যত দিন পর্যন্ত শরীরে টিকবে, তত দিন সংক্রমণ থেকে সুরক্ষিত থাকা সম্ভব। অর্থাৎ, এমন এক টিকার প্রয়োজন হবে, যা অ্যান্টিবডির উত্পাদনকে প্ররোচিত করতে পারে এবং দীর্ঘস্থায়ী সুরক্ষা দিতে পারে। এগুলো খুঁজে বের করাও সহজ। এটি আভাস দেয়, ব্যাপক হারে অ্যান্টিবডি শনাক্তকরণ ফলাফল মোটামুটি নির্ভুল হওয়া উচিত। দ্বিতীয় আরেকটি গবেষণার নেতৃত্ব দিয়েছেন যুক্তরাজ্যের মেডিকেল রিসার্চ কাউন্সিলের (এমআরসি) ইমিউনোলজিস্ট তাও দং। তিনি টি-সেল শনাক্তকরণে কাজ করেছেন। টি-সেল শনাক্তকরণের প্রক্রিয়া অবশ্য অ্যান্টিবডির মতো এত আলোচিত নয়। তবে সংক্রমণের বিরুদ্ধে লড়াই এবং দীর্ঘমেয়াদি সুরক্ষায় সমান গুরুত্বপূর্ণ ভূমিকা পালন করে। গবেষণাসংক্রান্ত নিবন্ধ প্রকাশিত হয়েছে ‘নেচার ইমিউনোলজি’ সাময়িকীতে। তাঁরা বলছেন, গবেষণার ক্ষেত্রে কোভিড-১৯ মৃদু সংক্রমণের শিকার ২৮ ব্যক্তির রক্তের নমুনা, ১৪ জন গুরুতর অসুস্থ ও ১৬ জন সুস্থ ব্যক্তির রক্তের নমুনা পরীক্ষা করেছেন। গবেষণা নিবন্ধে বলা হয়, সংক্রমিত ব্যক্তিদের ক্ষেত্রে টি-সেলের তীব্র প্রতিক্রিয়া তাঁরা দেখেছেন। এ ক্ষেত্রে মৃদু ও গুরুতর অসুস্থ ব্যক্তিদের ক্ষেত্রে প্রতিক্রিয়ার ভিন্নতা পাওয়া গেছে।`, + ], + ], + ["text-generation", [`আমি রতন এবং আমি`, `তুমি যদি চাও তবে`, `মিথিলা আজকে বড্ড`]], + ["fill-mask", [`আমি বাংলায় গাই।`, `আমি খুব ভালোবাসি। `]], + [ + "question-answering", + [ + { + text: `প্রথম এশিয়া কাপ ক্রিকেট টুর্নামেন্ট কোথায় অনুষ্ঠিত হয় ?`, + context: `প্রথম টুর্নামেন্ট অনুষ্ঠিত হয় ১৯৮৪ সালে সংযুক্ত আরব আমিরাত এর শারজাহ তে যেখানে কাউন্সিলের মূল অফিস ছিল (১৯৯৫ পর্যন্ত)। ভারত শ্রীলঙ্কার সাথে আন্তরিকতাহীন ক্রিকেট সম্পর্কের কারণে ১৯৮৬ সালের টুর্নামেন্ট বর্জন করে। ১৯৯৩ সালে ভারত ও পাকিস্তান এর মধ্যে রাজনৈতিক অস্থিরতার কারণে এটি বাতিল হয়ে যায়। শ্রীলঙ্কা এশিয়া কাপ শুরু থেকে অংশ গ্রহণ করে আসছে। আন্তর্জাতিক ক্রিকেট কাউন্সিল নিয়ম করে দিয়েছে যে এশিয়া কাপের সকল খেলা অনুষ্ঠিত হবে অফিসিয়াল একদিনের আন্তর্জাতিক ক্রিকেট হিসেবে। এসিসি ঘোষনা অনুযায়ী প্রতি দুই বছর পর পর টুর্নামেন্ট অনুষ্ঠিত হয় ২০০৮ সাল থেকে।`, + }, + { + text: `ভারতীয় বাঙালি কথাসাহিত্যিক মহাশ্বেতা দেবীর মৃত্যু কবে হয় ?`, + context: `২০১৬ সালের ২৩ জুলাই হৃদরোগে আক্রান্ত হয়ে মহাশ্বেতা দেবী কলকাতার বেল ভিউ ক্লিনিকে ভর্তি হন। সেই বছরই ২৮ জুলাই একাধিক অঙ্গ বিকল হয়ে তাঁর মৃত্যু ঘটে। তিনি মধুমেহ, সেপ্টিসেমিয়া ও মূত্র সংক্রমণ রোগেও ভুগছিলেন।`, + }, + { + text: `মাস্টারদা সূর্যকুমার সেনের বাবার নাম কী ছিল ?`, + context: `সূর্য সেন ১৮৯৪ সালের ২২ মার্চ চট্টগ্রামের রাউজান থানার নোয়াপাড়ায় অর্থনৈতিক ভাবে অস্বচ্ছল পরিবারে জন্মগ্রহণ করেন। তাঁর পিতার নাম রাজমনি সেন এবং মাতার নাম শশী বালা সেন। রাজমনি সেনের দুই ছেলে আর চার মেয়ে। সূর্য সেন তাঁদের পরিবারের চতুর্থ সন্তান। দুই ছেলের নাম সূর্য ও কমল। চার মেয়ের নাম বরদাসুন্দরী, সাবিত্রী, ভানুমতী ও প্রমিলা। শৈশবে পিতা মাতাকে হারানো সূর্য সেন কাকা গৌরমনি সেনের কাছে মানুষ হয়েছেন। সূর্য সেন ছেলেবেলা থেকেই খুব মনোযোগী ভাল ছাত্র ছিলেন এবং ধর্মভাবাপন্ন গম্ভীর প্রকৃতির ছিলেন।`, + }, + ], + ], + [ + "sentence-similarity", + [ + { + source_sentence: "সে একজন সুখী ব্যক্তি", + sentences: ["সে হ্যাপি কুকুর", "সে খুব সুখী মানুষ", "আজ একটি রৌদ্রোজ্জ্বল দিন"], + }, + ], + ], +]); + +const MAPPING_MN: PerLanguageMapping = new Map([ + ["text-classification", [`Би чамд хайртай`]], + [ + "token-classification", + [ + `Намайг Дорж гэдэг. Би Улаанбаатарт амьдардаг.`, + `Намайг Ганбат гэдэг. Би Увс аймагт төрсөн.`, + `Манай улс таван хошуу малтай.`, + ], + ], + [ + "question-answering", + [ + { + text: `Та хаана амьдардаг вэ?`, + context: `Намайг Дорж гэдэг. Би Улаанбаатарт амьдардаг.`, + }, + { + text: `Таныг хэн гэдэг вэ?`, + context: `Намайг Дорж гэдэг. Би Улаанбаатарт амьдардаг.`, + }, + { + text: `Миний нэрийг хэн гэдэг вэ?`, + context: `Намайг Ганбат гэдэг. Би Увс аймагт төрсөн.`, + }, + ], + ], + ["translation", [`Намайг Дорж гэдэг. Би Улаанбаатарт амьдардаг.`, `Намайг Ганбат гэдэг. Би Увс аймагт төрсөн.`]], + [ + "summarization", + [ + `Монгол Улс (1992 оноос хойш) — дорно болон төв Азид оршдог бүрэн эрхт улс. Хойд талаараа Орос, бусад талаараа Хятад улстай хиллэдэг далайд гарцгүй орон. Нийслэл — Улаанбаатар хот. Алтайн нуруунаас Хянган, Соёноос Говь хүрсэн 1 сая 566 мянган км2 уудам нутагтай, дэлхийд нутаг дэвсгэрийн хэмжээгээр 19-рт жагсдаг. 2015 оны эхэнд Монгол Улсын хүн ам 3 сая хүрсэн (135-р олон). Үндсэндээ монгол үндэстэн (95 хувь), мөн хасаг, тува хүн байна. 16-р зуунаас хойш буддын шашин, 20-р зуунаас шашингүй байдал дэлгэрсэн ба албан хэрэгт монгол хэлээр харилцана.`, + ], + ], + [ + "text-generation", + [`Намайг Дорж гэдэг. Би`, `Хамгийн сайн дуучин бол`, `Миний дуртай хамтлаг бол`, `Эрт урьдын цагт`], + ], + ["fill-mask", [`Монгол улсын Улаанбаатар хотоос ярьж байна.`, `Миний амьдралын зорилго бол .`]], + [ + "automatic-speech-recognition", + [ + { + label: `Common Voice Train Example`, + src: `https://cdn-media.huggingface.co/common_voice/train/common_voice_mn_18577472.wav`, + }, + { + label: `Common Voice Test Example`, + src: `https://cdn-media.huggingface.co/common_voice/test/common_voice_mn_18577346.wav`, + }, + ], + ], + [ + "text-to-speech", + [ + `Би Монгол улсын иргэн.`, + `Энэхүү жишээ нь цаанаа ямар ч утга агуулаагүй болно`, + `Сар шинэдээ сайхан шинэлэж байна уу?`, + ], + ], + [ + "sentence-similarity", + [ + { + source_sentence: "Энэ бол аз жаргалтай хүн юм", + sentences: ["Энэ бол аз жаргалтай нохой юм", "Энэ бол маш их аз жаргалтай хүн юм", "Өнөөдөр нарлаг өдөр байна"], + }, + ], + ], +]); + +const MAPPING_SI: PerLanguageMapping = new Map([ + ["translation", [`සිංහල ඉතා අලංකාර භාෂාවකි.`, `මෙම තාක්ෂණය භාවිතා කරන ඔබට ස්තූතියි.`]], + ["fill-mask", [`මම ගෙදර .`, ` ඉගෙනීමට ගියාය.`]], +]); + +const MAPPING_DE: PerLanguageMapping = new Map([ + [ + "question-answering", + [ + { + text: `Wo wohne ich?`, + context: `Mein Name ist Wolfgang und ich lebe in Berlin`, + }, + { + text: `Welcher Name wird auch verwendet, um den Amazonas-Regenwald auf Englisch zu beschreiben?`, + context: `Der Amazonas-Regenwald, auf Englisch auch als Amazonien oder Amazonas-Dschungel bekannt, ist ein feuchter Laubwald, der den größten Teil des Amazonas-Beckens Südamerikas bedeckt. Dieses Becken umfasst 7.000.000 Quadratkilometer (2.700.000 Quadratmeilen), von denen 5.500.000 Quadratkilometer (2.100.000 Quadratmeilen) vom Regenwald bedeckt sind. Diese Region umfasst Gebiete von neun Nationen. Der größte Teil des Waldes befindet sich in Brasilien mit 60% des Regenwaldes, gefolgt von Peru mit 13%, Kolumbien mit 10% und geringen Mengen in Venezuela, Ecuador, Bolivien, Guyana, Suriname und Französisch-Guayana. Staaten oder Abteilungen in vier Nationen enthalten "Amazonas" in ihren Namen. Der Amazonas repräsentiert mehr als die Hälfte der verbleibenden Regenwälder des Planeten und umfasst den größten und artenreichsten tropischen Regenwald der Welt mit geschätzten 390 Milliarden Einzelbäumen, die in 16.000 Arten unterteilt sind.`, + }, + ], + ], + [ + "sentence-similarity", + [ + { + source_sentence: "Das ist eine glückliche Person", + sentences: [ + "Das ist ein glücklicher Hund", + "Das ist eine sehr glückliche Person", + "Heute ist ein sonniger Tag", + ], + }, + ], + ], +]); + +const MAPPING_DV: PerLanguageMapping = new Map([ + ["text-classification", [`އަހަރެން ގަޔާވޭ. އަހަރެން ލޯބިވޭ`]], + [ + "token-classification", + [ + `އަހަރެންގެ ނަމަކީ އަހުމަދު އަދި އަހަރެން ދިރިއުޅެނީ މާލޭގަ`, + `އަހަރެންގެ ނަމަކީ ސާރާ އަދި އަހަރެން ދިރިއުޅެނީ އުތީމުގަ`, + `އަހަރެންގެ ނަމަކީ އައިޝާ އަދި އަހަރެން ދިރިއުޅެނީ ފޭދޫ، އައްޑޫގަ`, + ], + ], + [ + "question-answering", + [ + { + text: `އަހަރެން ދިރިއުޅެނީ ކޮންތާކު؟`, + context: `އަހަރެންގެ ނަމަކީ އަހުމަދު އަދި އަހަރެން ދިރިއުޅެނީ މާލޭގަ`, + }, + { + text: `އަހަރެން ދިރިއުޅެނީ ކޮންތާކު؟`, + context: `އަހަރެންގެ ނަމަކީ ސާރާ އަދި އަހަރެން ދިރިއުޅެނީ އުތީމުގަ`, + }, + { + text: `އަހަރެންގެ ނަމަކީ ކޮބާ؟`, + context: `އަހަރެންގެ ނަމަކީ އައިޝާ އަދި އަހަރެން ދިރިއުޅެނީ ފޭދޫގަ`, + }, + { + text: `އެމޭޒަން ރެއިންފޮރެސްޓް ސިފަކޮށްދިނުމަށް އިނގިރޭސި ބަހުން ބޭނުންކުރާނީ ކޮންނަމެއް؟`, + context: `އެމޭޒަން ރެއިންފޮރެސްޓް (ޕޯޗުޖީޒް: ފްލޮރެސްޓާ އެމަސޮނިކާ ނުވަތަ އެމަސޮނިއާ؛ ސްޕެނިޝް: ސެލްވާ އެމަސޮނިކާ, އެމަސޮނިއާ ނޫނީ އާންމުކޮށް އެމަޒޯނިއާ؛ ފްރެންޗް: ފޮރޭ އެމެޒޮނިއެން؛ ޑަޗް: އެމެޒޯންރޭގެވައުޑް)، އިގިރޭސި ބަހުން ބުނާ އެމެޒޯނިއާ ނުވަތަ ދަ އެމޭޒަން ޖަންގަލް އަކީ, ސައުތު އެމެރިކާގެ އެމޭޒަން ބޭސިން ސަރަހައްދުގެ ބޮޑުބައެއްގައި ހިމެނޭ މޮއިސްޓް ބޮރޯޑްލީފް ފޮރެސްޓެއެކެވެ. އެމޭޒަން ބޭސިން ސަރަހައްދުގެ ބޮޑު މިނަކީ 7 މިލިއަން އަކަ ކިލޯމީޓަރ (2.7 މިލިއަން އަކަ މައިލް(. މީގެ ތެރެއިން 5.5 މިލިއަން އަކަ ކިލޯމީޓަރ (2.1 މިލިއަން އަކަ މައިލް) އަކީ މި ފޮރެސްޓެވެ. މި ސަރަހައްދުގައި 9 ގައުމަކަށް ނިސްބަތްވާ ޓެރިޓަރީ ހިމެނެއެވެ. 60% އާއިއެކެ އެންމެ ބޮޑު ބައެއް ނިސްބަތްވަނީ ބްރެޒިލްއަށެވެ. އޭގެ ފަހުތުން 13% އާއެކު ޕެރޫ އާއި 10% އާއެކު ކޮލަމްބިއާ އަދި ކުޑަ ބައެއް ހިމެނޭ ގޮތުން ވެނެޒުއެލާ, އެކްއަޑޯ, ބޮލިވިއާ, ގުޔާނާ, ސުރިނާމް އަދި ފްރެންޗް ގްއާނާ އަށް ވެސް ނިސްބަތްވެއެވެ. މީގެ ތެރެއިން 4 ގައުމެއްގައި "އެމެޒޮނާސް" ހިމަނައިގެން ސްޓޭޓް ނުވަތަ ޑިޕާޓްމަންޓް އަކަށް ނަންދީފައިވެއެވެ. މުޅި ދުނިޔޭގައި ބާކީ ހުރި ރެއިންފޮރެސްޓްގެ ތެރެއިން ދެބައިކުޅަ އެއްބަޔަށްވުރެބޮޑުވަރެއް އެމޭޒޮން ރެއިންފޮރެސްޓް ހިއްސާކުރެއެވެ. މިއީ މުޅި ދުނިޔެއިން އެންމޮ ބޮޑު އަދި އެންމެ ބައޮޑައިވަރސް ރެއިންފޮރެސްޓް ޓްރެކްޓެވެ. ލަފާކުރެވޭ ގޮތުން 16 ހާސް ސްޕީޝީސްއަށް ބެހިގެންވާ 390 މިލިއަން ވައްތަރުގެ ގަސް މިތާގައި ހިމެނެއެވެ`, + }, + ], + ], + [ + "translation", + [ + `އަހަރެންގެ ނަމަކީ އަހުމަދު އަދި އަހަރެން ދިރިއުޅެނީ މާލޭގަ`, + `އަހަރެންގެ ނަމަކީ ސާރާ އަދި އަހަރެން ދިރިއުޅެނީ އުތީމުގަ`, + ], + ], + [ + "summarization", + [ + `ޓަވަރުގެ އުސްމިނަކީ 324 މީޓަރު، އެއީ ގާތްގަނޑަކަށް 81 ބުރީގެ އިމާރާތަކާއި އެއްވަރެވެ. އެއީ ޕެރިސްގައި ހުރި އެންމެ އުސް އިމާރާތެވެ. އޭގެ ހަތަރެސްކަނަށް ހުރި ބުޑުގެ ދިގުމިނަކީ ކޮންމެ ފަރާތަކުން 125 މީޓަރެވެ. (410 ފޫޓު) އައިފިލް ޓަވަރު ބިނާކުރި އިރު، ވޮޝިންގްޓަން މޮނިއުމެންޓްގެ އުސްމިން ފަހަނައަޅާ ގޮސް، ދުނިޔޭގައި މީހުން އުފެއްދި ތަންތަނުގެ ތެރެއިން އެންމެ އުސް ތަނުގެ ލަގަބު ލިބުނެވެ. އަދި 1930 ގައި ނިއު ޔޯކްގެ ކްރައިސްލަރ ބިލްޑިންގް ބިނާކުރުމާއި ހަމައަށް 41 އަހަރު ވަންދެން މިލަގަބު ހިފެހެއްޓިއެވެ. މިއީ 300 މީޓަރަށް ވުރެ އުސްކޮށް އިމާރާތްކުރެވުނު ފުރަތަމަ ތަނެވެ. 1957 ގައި ޓަވަރުގެ އެންމެ މަތީގައި ހަރުކުރެވުނު ބްރޯޑްކާސްޓިންގ އޭރިއަލްގެ ސަބަބުން މިހާރު މި ޓަވަރު ކްރައިސްލަރ ބިލްޑިންގއަށް ވުރެ 5.2 މީޓަރ (17 ފޫޓު) އުހެވެ. މި ޓްރާންސްމިޓަރު ނުލާ، އައިފިލް ޓަވަރަކީ، މިލާއު ވިއާޑަކްޓަށް ފަހު ފްރާންސްގައި ހުރި 2 ވަނައަށް އެންމެ އުސް ފްރީސްޓޭންޑިންގ އިމާރާތެވެ`, + ], + ], + [ + "text-generation", + [ + `އަހަރެންގެ ނަމަކީ ޔޫސުފް އަދި އަހަރެންގެ މައިގަނޑު`, + `އަހަރެންގެ ނަމަކީ މަރިއަމް، އަހަރެން އެންމެ ގަޔާވާ`, + `އަހަރެންގެ ނަމަކީ ފާތުމަތު އަދި އަހަރެން`, + `،އެއް ޒަމާނެއްގައި`, + ], + ], + ["fill-mask", [`. މާލެ އަކީ ދިވެހިރާއްޖޭގެ`, `ގަރުދިޔައަކީ ދިވެހިންގެ މެދުގައި ކެއުމެއް.`]], +]); + +export const MAPPING_DEFAULT_WIDGET = new Map([ + ["en", MAPPING_EN], + ["zh", MAPPING_ZH], + ["fr", MAPPING_FR], + ["es", MAPPING_ES], + ["ru", MAPPING_RU], + ["uk", MAPPING_UK], + ["it", MAPPING_IT], + ["fa", MAPPING_FA], + ["ar", MAPPING_AR], + ["bn", MAPPING_BN], + ["mn", MAPPING_MN], + ["si", MAPPING_SI], + ["de", MAPPING_DE], + ["dv", MAPPING_DV], +]); diff --git a/packages/widgets/src/lib/interfaces/InferenceDisplayability.ts b/packages/widgets/src/lib/interfaces/InferenceDisplayability.ts new file mode 100644 index 0000000000000000000000000000000000000000..06f3fd4037da41d531e45a75f8c5c9b42654a6a2 --- /dev/null +++ b/packages/widgets/src/lib/interfaces/InferenceDisplayability.ts @@ -0,0 +1,14 @@ +export enum InferenceDisplayability { + /** + * Yes + */ + Yes = "Yes", + /** + * And then, all the possible reasons why it's no: + */ + ExplicitOptOut = "ExplicitOptOut", + CustomCode = "CustomCode", + LibraryNotDetected = "LibraryNotDetected", + PipelineNotDetected = "PipelineNotDetected", + PipelineLibraryPairNotSupported = "PipelineLibraryPairNotSupported", +} diff --git a/packages/widgets/src/lib/interfaces/Libraries.ts b/packages/widgets/src/lib/interfaces/Libraries.ts new file mode 100644 index 0000000000000000000000000000000000000000..d723b627c05c4ccc79a489d349eb9e14015ef6ff --- /dev/null +++ b/packages/widgets/src/lib/interfaces/Libraries.ts @@ -0,0 +1,766 @@ +import type { ModelLibraryKey } from "@huggingface/tasks"; +/* eslint-disable @typescript-eslint/naming-convention */ +import type { ModelData } from "./Types.js"; + +/** + * Elements configurable by a model library. + */ +export interface LibraryUiElement { + /** + * Name displayed on the main + * call-to-action button on the model page. + */ + btnLabel: string; + /** + * Repo name + */ + repoName: string; + /** + * URL to library's repo + */ + repoUrl: string; + /** + * URL to library's docs + */ + docsUrl?: string; + /** + * Code snippet displayed on model page + */ + snippets: (model: ModelData) => string[]; +} + +function nameWithoutNamespace(modelId: string): string { + const splitted = modelId.split("/"); + return splitted.length === 1 ? splitted[0] : splitted[1]; +} + +//#region snippets + +const adapter_transformers = (model: ModelData) => [ + `from transformers import ${model.config?.adapter_transformers?.model_class} + +model = ${model.config?.adapter_transformers?.model_class}.from_pretrained("${model.config?.adapter_transformers?.model_name}") +model.load_adapter("${model.id}", source="hf")`, +]; + +const allennlpUnknown = (model: ModelData) => [ + `import allennlp_models +from allennlp.predictors.predictor import Predictor + +predictor = Predictor.from_path("hf://${model.id}")`, +]; + +const allennlpQuestionAnswering = (model: ModelData) => [ + `import allennlp_models +from allennlp.predictors.predictor import Predictor + +predictor = Predictor.from_path("hf://${model.id}") +predictor_input = {"passage": "My name is Wolfgang and I live in Berlin", "question": "Where do I live?"} +predictions = predictor.predict_json(predictor_input)`, +]; + +const allennlp = (model: ModelData) => { + if (model.tags?.includes("question-answering")) { + return allennlpQuestionAnswering(model); + } + return allennlpUnknown(model); +}; + +const asteroid = (model: ModelData) => [ + `from asteroid.models import BaseModel + +model = BaseModel.from_pretrained("${model.id}")`, +]; + +function get_base_diffusers_model(model: ModelData): string { + return model.cardData?.base_model ?? "fill-in-base-model"; +} + +const bertopic = (model: ModelData) => [ + `from bertopic import BERTopic + +model = BERTopic.load("${model.id}")`, +]; + +const diffusers_default = (model: ModelData) => [ + `from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("${model.id}")`, +]; + +const diffusers_controlnet = (model: ModelData) => [ + `from diffusers import ControlNetModel, StableDiffusionControlNetPipeline + +controlnet = ControlNetModel.from_pretrained("${model.id}") +pipeline = StableDiffusionControlNetPipeline.from_pretrained( + "${get_base_diffusers_model(model)}", controlnet=controlnet +)`, +]; + +const diffusers_lora = (model: ModelData) => [ + `from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}") +pipeline.load_lora_weights("${model.id}")`, +]; + +const diffusers_textual_inversion = (model: ModelData) => [ + `from diffusers import DiffusionPipeline + +pipeline = DiffusionPipeline.from_pretrained("${get_base_diffusers_model(model)}") +pipeline.load_textual_inversion("${model.id}")`, +]; + +const diffusers = (model: ModelData) => { + if (model.tags?.includes("controlnet")) { + return diffusers_controlnet(model); + } else if (model.tags?.includes("lora")) { + return diffusers_lora(model); + } else if (model.tags?.includes("textual_inversion")) { + return diffusers_textual_inversion(model); + } else { + return diffusers_default(model); + } +}; + +const espnetTTS = (model: ModelData) => [ + `from espnet2.bin.tts_inference import Text2Speech + +model = Text2Speech.from_pretrained("${model.id}") + +speech, *_ = model("text to generate speech from")`, +]; + +const espnetASR = (model: ModelData) => [ + `from espnet2.bin.asr_inference import Speech2Text + +model = Speech2Text.from_pretrained( + "${model.id}" +) + +speech, rate = soundfile.read("speech.wav") +text, *_ = model(speech)[0]`, +]; + +const espnetUnknown = () => [`unknown model type (must be text-to-speech or automatic-speech-recognition)`]; + +const espnet = (model: ModelData) => { + if (model.tags?.includes("text-to-speech")) { + return espnetTTS(model); + } else if (model.tags?.includes("automatic-speech-recognition")) { + return espnetASR(model); + } + return espnetUnknown(); +}; + +const fairseq = (model: ModelData) => [ + `from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub + +models, cfg, task = load_model_ensemble_and_task_from_hf_hub( + "${model.id}" +)`, +]; + +const flair = (model: ModelData) => [ + `from flair.models import SequenceTagger + +tagger = SequenceTagger.load("${model.id}")`, +]; + +const keras = (model: ModelData) => [ + `from huggingface_hub import from_pretrained_keras + +model = from_pretrained_keras("${model.id}") +`, +]; + +const open_clip = (model: ModelData) => [ + `import open_clip + +model, preprocess_train, preprocess_val = open_clip.create_model_and_transforms('hf-hub:${model.id}') +tokenizer = open_clip.get_tokenizer('hf-hub:${model.id}')`, +]; + +const paddlenlp = (model: ModelData) => { + if (model.config?.architectures?.[0]) { + const architecture = model.config.architectures[0]; + return [ + [ + `from paddlenlp.transformers import AutoTokenizer, ${architecture}`, + "", + `tokenizer = AutoTokenizer.from_pretrained("${model.id}"${ + model.private ? ", use_auth_token=True" : "" + }, from_hf_hub=True)`, + `model = ${architecture}.from_pretrained("${model.id}"${ + model.private ? ", use_auth_token=True" : "" + }, from_hf_hub=True)`, + ].join("\n"), + ]; + } else { + return [ + [ + `# ⚠️ Type of model unknown`, + `from paddlenlp.transformers import AutoTokenizer, AutoModel`, + "", + `tokenizer = AutoTokenizer.from_pretrained("${model.id}"${ + model.private ? ", use_auth_token=True" : "" + }, from_hf_hub=True)`, + `model = AutoModel.from_pretrained("${model.id}"${ + model.private ? ", use_auth_token=True" : "" + }, from_hf_hub=True)`, + ].join("\n"), + ]; + } +}; + +const pyannote_audio_pipeline = (model: ModelData) => [ + `from pyannote.audio import Pipeline + +pipeline = Pipeline.from_pretrained("${model.id}") + +# inference on the whole file +pipeline("file.wav") + +# inference on an excerpt +from pyannote.core import Segment +excerpt = Segment(start=2.0, end=5.0) + +from pyannote.audio import Audio +waveform, sample_rate = Audio().crop("file.wav", excerpt) +pipeline({"waveform": waveform, "sample_rate": sample_rate})`, +]; + +const pyannote_audio_model = (model: ModelData) => [ + `from pyannote.audio import Model, Inference + +model = Model.from_pretrained("${model.id}") +inference = Inference(model) + +# inference on the whole file +inference("file.wav") + +# inference on an excerpt +from pyannote.core import Segment +excerpt = Segment(start=2.0, end=5.0) +inference.crop("file.wav", excerpt)`, +]; + +const pyannote_audio = (model: ModelData) => { + if (model.tags?.includes("pyannote-audio-pipeline")) { + return pyannote_audio_pipeline(model); + } + return pyannote_audio_model(model); +}; + +const tensorflowttsTextToMel = (model: ModelData) => [ + `from tensorflow_tts.inference import AutoProcessor, TFAutoModel + +processor = AutoProcessor.from_pretrained("${model.id}") +model = TFAutoModel.from_pretrained("${model.id}") +`, +]; + +const tensorflowttsMelToWav = (model: ModelData) => [ + `from tensorflow_tts.inference import TFAutoModel + +model = TFAutoModel.from_pretrained("${model.id}") +audios = model.inference(mels) +`, +]; + +const tensorflowttsUnknown = (model: ModelData) => [ + `from tensorflow_tts.inference import TFAutoModel + +model = TFAutoModel.from_pretrained("${model.id}") +`, +]; + +const tensorflowtts = (model: ModelData) => { + if (model.tags?.includes("text-to-mel")) { + return tensorflowttsTextToMel(model); + } else if (model.tags?.includes("mel-to-wav")) { + return tensorflowttsMelToWav(model); + } + return tensorflowttsUnknown(model); +}; + +const timm = (model: ModelData) => [ + `import timm + +model = timm.create_model("hf_hub:${model.id}", pretrained=True)`, +]; + +const skopsPickle = (model: ModelData, modelFile: string) => { + return [ + `import joblib +from skops.hub_utils import download +download("${model.id}", "path_to_folder") +model = joblib.load( + "${modelFile}" +) +# only load pickle files from sources you trust +# read more about it here https://skops.readthedocs.io/en/stable/persistence.html`, + ]; +}; + +const skopsFormat = (model: ModelData, modelFile: string) => { + return [ + `from skops.hub_utils import download +from skops.io import load +download("${model.id}", "path_to_folder") +# make sure model file is in skops format +# if model is a pickle file, make sure it's from a source you trust +model = load("path_to_folder/${modelFile}")`, + ]; +}; + +const skopsJobLib = (model: ModelData) => { + return [ + `from huggingface_hub import hf_hub_download +import joblib +model = joblib.load( + hf_hub_download("${model.id}", "sklearn_model.joblib") +) +# only load pickle files from sources you trust +# read more about it here https://skops.readthedocs.io/en/stable/persistence.html`, + ]; +}; + +const sklearn = (model: ModelData) => { + if (model.tags?.includes("skops")) { + const skopsmodelFile = model.config?.sklearn?.filename; + const skopssaveFormat = model.config?.sklearn?.model_format; + if (!skopsmodelFile) { + return [`# ⚠️ Model filename not specified in config.json`]; + } + if (skopssaveFormat === "pickle") { + return skopsPickle(model, skopsmodelFile); + } else { + return skopsFormat(model, skopsmodelFile); + } + } else { + return skopsJobLib(model); + } +}; + +const fastai = (model: ModelData) => [ + `from huggingface_hub import from_pretrained_fastai + +learn = from_pretrained_fastai("${model.id}")`, +]; + +const sampleFactory = (model: ModelData) => [ + `python -m sample_factory.huggingface.load_from_hub -r ${model.id} -d ./train_dir`, +]; + +const sentenceTransformers = (model: ModelData) => [ + `from sentence_transformers import SentenceTransformer + +model = SentenceTransformer("${model.id}")`, +]; + +const spacy = (model: ModelData) => [ + `!pip install https://huggingface.co/${model.id}/resolve/main/${nameWithoutNamespace(model.id)}-any-py3-none-any.whl + +# Using spacy.load(). +import spacy +nlp = spacy.load("${nameWithoutNamespace(model.id)}") + +# Importing as module. +import ${nameWithoutNamespace(model.id)} +nlp = ${nameWithoutNamespace(model.id)}.load()`, +]; + +const span_marker = (model: ModelData) => [ + `from span_marker import SpanMarkerModel + +model = SpanMarkerModel.from_pretrained("${model.id}")`, +]; + +const stanza = (model: ModelData) => [ + `import stanza + +stanza.download("${nameWithoutNamespace(model.id).replace("stanza-", "")}") +nlp = stanza.Pipeline("${nameWithoutNamespace(model.id).replace("stanza-", "")}")`, +]; + +const speechBrainMethod = (speechbrainInterface: string) => { + switch (speechbrainInterface) { + case "EncoderClassifier": + return "classify_file"; + case "EncoderDecoderASR": + case "EncoderASR": + return "transcribe_file"; + case "SpectralMaskEnhancement": + return "enhance_file"; + case "SepformerSeparation": + return "separate_file"; + default: + return undefined; + } +}; + +const speechbrain = (model: ModelData) => { + const speechbrainInterface = model.config?.speechbrain?.interface; + if (speechbrainInterface === undefined) { + return [`# interface not specified in config.json`]; + } + + const speechbrainMethod = speechBrainMethod(speechbrainInterface); + if (speechbrainMethod === undefined) { + return [`# interface in config.json invalid`]; + } + + return [ + `from speechbrain.pretrained import ${speechbrainInterface} +model = ${speechbrainInterface}.from_hparams( + "${model.id}" +) +model.${speechbrainMethod}("file.wav")`, + ]; +}; + +const transformers = (model: ModelData) => { + const info = model.transformersInfo; + if (!info) { + return [`# ⚠️ Type of model unknown`]; + } + const remote_code_snippet = info.custom_class ? ", trust_remote_code=True" : ""; + + let autoSnippet: string; + if (info.processor) { + const varName = + info.processor === "AutoTokenizer" + ? "tokenizer" + : info.processor === "AutoFeatureExtractor" + ? "extractor" + : "processor"; + autoSnippet = [ + "# Load model directly", + `from transformers import ${info.processor}, ${info.auto_model}`, + "", + `${varName} = ${info.processor}.from_pretrained("${model.id}"` + remote_code_snippet + ")", + `model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")", + ].join("\n"); + } else { + autoSnippet = [ + "# Load model directly", + `from transformers import ${info.auto_model}`, + `model = ${info.auto_model}.from_pretrained("${model.id}"` + remote_code_snippet + ")", + ].join("\n"); + } + + if (model.pipeline_tag) { + const pipelineSnippet = [ + "# Use a pipeline as a high-level helper", + "from transformers import pipeline", + "", + `pipe = pipeline("${model.pipeline_tag}", model="${model.id}"` + remote_code_snippet + ")", + ].join("\n"); + return [pipelineSnippet, autoSnippet]; + } + return [autoSnippet]; +}; + +const transformersJS = (model: ModelData) => { + if (!model.pipeline_tag) { + return [`// ⚠️ Unknown pipeline tag`]; + } + + const libName = "@xenova/transformers"; + + return [ + `// npm i ${libName} +import { pipeline } from '${libName}'; + +// Allocate pipeline +const pipe = await pipeline('${model.pipeline_tag}', '${model.id}');`, + ]; +}; + +const peftTask = (peftTaskType?: string) => { + switch (peftTaskType) { + case "CAUSAL_LM": + return "CausalLM"; + case "SEQ_2_SEQ_LM": + return "Seq2SeqLM"; + case "TOKEN_CLS": + return "TokenClassification"; + case "SEQ_CLS": + return "SequenceClassification"; + default: + return undefined; + } +}; + +const peft = (model: ModelData) => { + const { base_model_name: peftBaseModel, task_type: peftTaskType } = model.config?.peft ?? {}; + const pefttask = peftTask(peftTaskType); + if (!pefttask) { + return [`Task type is invalid.`]; + } + if (!peftBaseModel) { + return [`Base model is not found.`]; + } + + return [ + `from peft import PeftModel, PeftConfig +from transformers import AutoModelFor${pefttask} + +config = PeftConfig.from_pretrained("${model.id}") +model = AutoModelFor${pefttask}.from_pretrained("${peftBaseModel}") +model = PeftModel.from_pretrained(model, "${model.id}")`, + ]; +}; + +const fasttext = (model: ModelData) => [ + `from huggingface_hub import hf_hub_download +import fasttext + +model = fasttext.load_model(hf_hub_download("${model.id}", "model.bin"))`, +]; + +const stableBaselines3 = (model: ModelData) => [ + `from huggingface_sb3 import load_from_hub +checkpoint = load_from_hub( + repo_id="${model.id}", + filename="{MODEL FILENAME}.zip", +)`, +]; + +const nemoDomainResolver = (domain: string, model: ModelData): string[] | undefined => { + switch (domain) { + case "ASR": + return [ + `import nemo.collections.asr as nemo_asr +asr_model = nemo_asr.models.ASRModel.from_pretrained("${model.id}") + +transcriptions = asr_model.transcribe(["file.wav"])`, + ]; + default: + return undefined; + } +}; + +const mlAgents = (model: ModelData) => [`mlagents-load-from-hf --repo-id="${model.id}" --local-dir="./downloads"`]; + +const nemo = (model: ModelData) => { + let command: string[] | undefined = undefined; + // Resolve the tag to a nemo domain/sub-domain + if (model.tags?.includes("automatic-speech-recognition")) { + command = nemoDomainResolver("ASR", model); + } + + return command ?? [`# tag did not correspond to a valid NeMo domain.`]; +}; + +const pythae = (model: ModelData) => [ + `from pythae.models import AutoModel + +model = AutoModel.load_from_hf_hub("${model.id}")`, +]; + +//#endregion + +export const MODEL_LIBRARIES_UI_ELEMENTS: Partial> = { + "adapter-transformers": { + btnLabel: "Adapter Transformers", + repoName: "adapter-transformers", + repoUrl: "https://github.com/Adapter-Hub/adapter-transformers", + docsUrl: "https://huggingface.co/docs/hub/adapter-transformers", + snippets: adapter_transformers, + }, + allennlp: { + btnLabel: "AllenNLP", + repoName: "AllenNLP", + repoUrl: "https://github.com/allenai/allennlp", + docsUrl: "https://huggingface.co/docs/hub/allennlp", + snippets: allennlp, + }, + asteroid: { + btnLabel: "Asteroid", + repoName: "Asteroid", + repoUrl: "https://github.com/asteroid-team/asteroid", + docsUrl: "https://huggingface.co/docs/hub/asteroid", + snippets: asteroid, + }, + bertopic: { + btnLabel: "BERTopic", + repoName: "BERTopic", + repoUrl: "https://github.com/MaartenGr/BERTopic", + snippets: bertopic, + }, + diffusers: { + btnLabel: "Diffusers", + repoName: "🤗/diffusers", + repoUrl: "https://github.com/huggingface/diffusers", + docsUrl: "https://huggingface.co/docs/hub/diffusers", + snippets: diffusers, + }, + espnet: { + btnLabel: "ESPnet", + repoName: "ESPnet", + repoUrl: "https://github.com/espnet/espnet", + docsUrl: "https://huggingface.co/docs/hub/espnet", + snippets: espnet, + }, + fairseq: { + btnLabel: "Fairseq", + repoName: "fairseq", + repoUrl: "https://github.com/pytorch/fairseq", + snippets: fairseq, + }, + flair: { + btnLabel: "Flair", + repoName: "Flair", + repoUrl: "https://github.com/flairNLP/flair", + docsUrl: "https://huggingface.co/docs/hub/flair", + snippets: flair, + }, + keras: { + btnLabel: "Keras", + repoName: "Keras", + repoUrl: "https://github.com/keras-team/keras", + docsUrl: "https://huggingface.co/docs/hub/keras", + snippets: keras, + }, + nemo: { + btnLabel: "NeMo", + repoName: "NeMo", + repoUrl: "https://github.com/NVIDIA/NeMo", + snippets: nemo, + }, + open_clip: { + btnLabel: "OpenCLIP", + repoName: "OpenCLIP", + repoUrl: "https://github.com/mlfoundations/open_clip", + snippets: open_clip, + }, + paddlenlp: { + btnLabel: "paddlenlp", + repoName: "PaddleNLP", + repoUrl: "https://github.com/PaddlePaddle/PaddleNLP", + docsUrl: "https://huggingface.co/docs/hub/paddlenlp", + snippets: paddlenlp, + }, + peft: { + btnLabel: "PEFT", + repoName: "PEFT", + repoUrl: "https://github.com/huggingface/peft", + snippets: peft, + }, + "pyannote-audio": { + btnLabel: "pyannote.audio", + repoName: "pyannote-audio", + repoUrl: "https://github.com/pyannote/pyannote-audio", + snippets: pyannote_audio, + }, + "sentence-transformers": { + btnLabel: "sentence-transformers", + repoName: "sentence-transformers", + repoUrl: "https://github.com/UKPLab/sentence-transformers", + docsUrl: "https://huggingface.co/docs/hub/sentence-transformers", + snippets: sentenceTransformers, + }, + sklearn: { + btnLabel: "Scikit-learn", + repoName: "Scikit-learn", + repoUrl: "https://github.com/scikit-learn/scikit-learn", + snippets: sklearn, + }, + fastai: { + btnLabel: "fastai", + repoName: "fastai", + repoUrl: "https://github.com/fastai/fastai", + docsUrl: "https://huggingface.co/docs/hub/fastai", + snippets: fastai, + }, + spacy: { + btnLabel: "spaCy", + repoName: "spaCy", + repoUrl: "https://github.com/explosion/spaCy", + docsUrl: "https://huggingface.co/docs/hub/spacy", + snippets: spacy, + }, + "span-marker": { + btnLabel: "SpanMarker", + repoName: "SpanMarkerNER", + repoUrl: "https://github.com/tomaarsen/SpanMarkerNER", + docsUrl: "https://huggingface.co/docs/hub/span_marker", + snippets: span_marker, + }, + speechbrain: { + btnLabel: "speechbrain", + repoName: "speechbrain", + repoUrl: "https://github.com/speechbrain/speechbrain", + docsUrl: "https://huggingface.co/docs/hub/speechbrain", + snippets: speechbrain, + }, + stanza: { + btnLabel: "Stanza", + repoName: "stanza", + repoUrl: "https://github.com/stanfordnlp/stanza", + docsUrl: "https://huggingface.co/docs/hub/stanza", + snippets: stanza, + }, + tensorflowtts: { + btnLabel: "TensorFlowTTS", + repoName: "TensorFlowTTS", + repoUrl: "https://github.com/TensorSpeech/TensorFlowTTS", + snippets: tensorflowtts, + }, + timm: { + btnLabel: "timm", + repoName: "pytorch-image-models", + repoUrl: "https://github.com/rwightman/pytorch-image-models", + docsUrl: "https://huggingface.co/docs/hub/timm", + snippets: timm, + }, + transformers: { + btnLabel: "Transformers", + repoName: "🤗/transformers", + repoUrl: "https://github.com/huggingface/transformers", + docsUrl: "https://huggingface.co/docs/hub/transformers", + snippets: transformers, + }, + "transformers.js": { + btnLabel: "Transformers.js", + repoName: "transformers.js", + repoUrl: "https://github.com/xenova/transformers.js", + docsUrl: "https://huggingface.co/docs/hub/transformers-js", + snippets: transformersJS, + }, + fasttext: { + btnLabel: "fastText", + repoName: "fastText", + repoUrl: "https://fasttext.cc/", + snippets: fasttext, + }, + "sample-factory": { + btnLabel: "sample-factory", + repoName: "sample-factory", + repoUrl: "https://github.com/alex-petrenko/sample-factory", + docsUrl: "https://huggingface.co/docs/hub/sample-factory", + snippets: sampleFactory, + }, + "stable-baselines3": { + btnLabel: "stable-baselines3", + repoName: "stable-baselines3", + repoUrl: "https://github.com/huggingface/huggingface_sb3", + docsUrl: "https://huggingface.co/docs/hub/stable-baselines3", + snippets: stableBaselines3, + }, + "ml-agents": { + btnLabel: "ml-agents", + repoName: "ml-agents", + repoUrl: "https://github.com/huggingface/ml-agents", + docsUrl: "https://huggingface.co/docs/hub/ml-agents", + snippets: mlAgents, + }, + pythae: { + btnLabel: "pythae", + repoName: "pythae", + repoUrl: "https://github.com/clementchadebec/benchmark_VAE", + snippets: pythae, + }, +} as const; diff --git a/packages/widgets/src/lib/interfaces/LibrariesToTasks.ts b/packages/widgets/src/lib/interfaces/LibrariesToTasks.ts new file mode 100644 index 0000000000000000000000000000000000000000..780323f9f5514a3eb2d5dc3f6f4afb535c59352e --- /dev/null +++ b/packages/widgets/src/lib/interfaces/LibrariesToTasks.ts @@ -0,0 +1,46 @@ +import type { ModelLibraryKey, PipelineType } from "@huggingface/tasks"; + +/** + * Mapping from library name (excluding Transformers) to its supported tasks. + * Inference API should be disabled for all other (library, task) pairs beyond this mapping. + * As an exception, we assume Transformers supports all inference tasks. + * This mapping is generated automatically by "python-api-export-tasks" action in huggingface/api-inference-community repo upon merge. + * Ref: https://github.com/huggingface/api-inference-community/pull/158 + */ +export const LIBRARY_TASK_MAPPING_EXCLUDING_TRANSFORMERS: Partial> = { + "adapter-transformers": ["question-answering", "text-classification", "token-classification"], + allennlp: ["question-answering"], + asteroid: [ + // "audio-source-separation", + "audio-to-audio", + ], + bertopic: ["text-classification"], + diffusers: ["image-to-image", "text-to-image"], + doctr: ["object-detection"], + espnet: ["text-to-speech", "automatic-speech-recognition"], + fairseq: ["text-to-speech", "audio-to-audio"], + fastai: ["image-classification"], + fasttext: ["feature-extraction", "text-classification"], + flair: ["token-classification"], + k2: ["automatic-speech-recognition"], + keras: ["image-classification"], + nemo: ["automatic-speech-recognition"], + open_clip: ["zero-shot-classification", "zero-shot-image-classification"], + paddlenlp: ["conversational", "fill-mask", "summarization", "zero-shot-classification"], + peft: ["text-generation"], + "pyannote-audio": ["automatic-speech-recognition"], + "sentence-transformers": ["feature-extraction", "sentence-similarity"], + sklearn: ["tabular-classification", "tabular-regression", "text-classification"], + spacy: ["token-classification", "text-classification", "sentence-similarity"], + "span-marker": ["token-classification"], + speechbrain: [ + "audio-classification", + "audio-to-audio", + "automatic-speech-recognition", + "text-to-speech", + "text2text-generation", + ], + stanza: ["token-classification"], + timm: ["image-classification"], + mindspore: ["image-classification"], +}; diff --git a/packages/widgets/src/lib/interfaces/README.md b/packages/widgets/src/lib/interfaces/README.md new file mode 100644 index 0000000000000000000000000000000000000000..d2de7746091e0410a6c941421dc3f1a484c14baf --- /dev/null +++ b/packages/widgets/src/lib/interfaces/README.md @@ -0,0 +1,13 @@ +# Interfaces + +This folder contains the definition files (written in Typescript) for the huggingface.co hub's: + +- **pipeline types** a.k.a. **task types** (used to determine which widget to display on the model page, and which inference API to run) +- **default widget inputs** (when they aren't provided in the model card) +- definitions and UI elements for **third party libraries**. + +Please add to any of those definitions by opening a PR. Thanks 🔥 + +⚠️ The hub's definitive doc is at https://huggingface.co/docs/hub. + +## Feedback (feature requests, bugs, etc.) is super welcome 💙💚💛💜♥️🧡 diff --git a/packages/widgets/src/lib/interfaces/Types.ts b/packages/widgets/src/lib/interfaces/Types.ts new file mode 100644 index 0000000000000000000000000000000000000000..7c9ef752fbf0e268aa7474c7fa96c4100df96a3f --- /dev/null +++ b/packages/widgets/src/lib/interfaces/Types.ts @@ -0,0 +1,111 @@ +import type { PipelineType } from "@huggingface/tasks"; +import type { WidgetExample } from "../components/InferenceWidget/shared/WidgetExample.js"; +import type { InferenceDisplayability } from "./InferenceDisplayability.js"; + +/** + * Public interface for model metadata + */ +export interface ModelData { + /** + * id of model (e.g. 'user/repo_name') + */ + id: string; + /** + * Kept for backward compatibility + */ + modelId?: string; + /** + * Whether or not to enable inference widget for this model + */ + inference: InferenceDisplayability; + /** + * is this model private? + */ + private?: boolean; + /** + * this dictionary has useful information about the model configuration + */ + config?: Record & { + adapter_transformers?: { model_class?: string; model_name?: string }; + architectures?: string[]; + sklearn?: { + filename?: string; + model_format?: string; + }; + speechbrain?: { + interface?: string; + }; + peft?: { + base_model_name?: string; + task_type?: string; + }; + }; + /** + * all the model tags + */ + tags?: string[]; + /** + * transformers-specific info to display in the code sample. + */ + transformersInfo?: TransformersInfo; + /** + * Pipeline type + */ + pipeline_tag?: PipelineType | undefined; + /** + * for relevant models, get mask token + */ + mask_token?: string | undefined; + /** + * Example data that will be fed into the widget. + * + * can be set in the model card metadata (under `widget`), + * or by default in `DefaultWidget.ts` + */ + widgetData?: WidgetExample[] | undefined; + /** + * Parameters that will be used by the widget when calling Inference API + * https://huggingface.co/docs/api-inference/detailed_parameters + * + * can be set in the model card metadata (under `inference/parameters`) + * Example: + * inference: + * parameters: + * key: val + */ + cardData?: { + inference?: + | boolean + | { + parameters?: Record; + }; + base_model?: string; + }; + /** + * Library name + * Example: transformers, SpeechBrain, Stanza, etc. + */ + library_name?: string; +} + +/** + * transformers-specific info to display in the code sample. + */ +export interface TransformersInfo { + /** + * e.g. AutoModelForSequenceClassification + */ + auto_model: string; + /** + * if set in config.json's auto_map + */ + custom_class?: string; + /** + * e.g. text-classification + */ + pipeline_tag?: PipelineType; + /** + * e.g. "AutoTokenizer" | "AutoFeatureExtractor" | "AutoProcessor" + */ + processor?: string; +} diff --git a/packages/widgets/src/lib/utils/ViewUtils.ts b/packages/widgets/src/lib/utils/ViewUtils.ts new file mode 100644 index 0000000000000000000000000000000000000000..86cdd0df1401714eb33a5034689f57a63016445c --- /dev/null +++ b/packages/widgets/src/lib/utils/ViewUtils.ts @@ -0,0 +1,138 @@ +import type { PipelineType } from "@huggingface/tasks"; + +const ESCAPED = { + '"': """, + "'": "'", + "&": "&", + "<": "<", + ">": ">", +}; + +/** + * Returns a function that clamps input value to range [min <= x <= max]. + */ +export function clamp(x: number, min: number, max: number): number { + return Math.max(min, Math.min(x, max)); +} + +/** + * Similar to lodash's uniqBy. In case of multiple items with the same key, + * only the first one is kept. + */ +export function uniqBy(items: T[], itemToKey: (item: T) => K): T[] { + const keys = new Set(items.map((item) => itemToKey(item))); + + return items.filter((item) => { + // Will return true if was in set - e.g. was the first item with its key. + return keys.delete(itemToKey(item)); + }); +} + +export function typedKeys(obj: { [k in K]: V }): K[] { + return Object.keys(obj) as K[]; +} + +/** + * HTML escapes the passed string + */ +export function escape(html: unknown): string { + return String(html).replace(/["'&<>]/g, (match) => ESCAPED[match as keyof typeof ESCAPED]); +} + +/** + * Returns a promise that will resolve after the specified time + * @param ms Number of ms to wait + */ +export function delay(ms: number): Promise { + return new Promise((resolve) => { + setTimeout(() => resolve(), ms); + }); +} + +/** + * "Real" modulo (always >= 0), not remainder. + */ +export function mod(a: number, n: number): number { + return ((a % n) + n) % n; +} + +/** + * Sum of elements in array + */ +export function sum(arr: number[]): number { + return arr.reduce((a, b) => a + b, 0); +} + +/** + * Return a random item from an array + */ +export function randomItem(arr: T[]): T { + return arr[Math.floor(Math.random() * arr.length)]; +} + +/** + * Safely parse JSON + */ +export function parseJSON(x: unknown): T | undefined { + if (!x || typeof x !== "string") { + return undefined; + } + try { + return JSON.parse(x); + } catch (e) { + if (e instanceof SyntaxError) { + console.error(e.name); + } else if (e instanceof Error) { + console.error(e.message); + } else { + console.error(e); + } + return undefined; + } +} + +/** + * Return true if an HTML element is scrolled all the way + */ +export function isFullyScrolled(elt: HTMLElement): boolean { + return elt.scrollHeight - Math.abs(elt.scrollTop) === elt.clientHeight; +} + +/** + * Smoothly scroll an element all the way + */ +export function scrollToMax(elt: HTMLElement, axis: "x" | "y" = "y"): void { + elt.scroll({ + behavior: "smooth", + left: axis === "x" ? elt.scrollWidth : undefined, + top: axis === "y" ? elt.scrollHeight : undefined, + }); +} + +/** + * Converts hex string to rgb array (i.e. [r,g,b]) + * original from https://stackoverflow.com/a/39077686/6558628 + */ +export function hexToRgb(hex: string): number[] { + return ( + hex + .replace(/^#?([a-f\d])([a-f\d])([a-f\d])$/i, (_, r, g, b) => "#" + r + r + g + g + b + b) + .substring(1) + .match(/.{2}/g) + ?.map((x) => parseInt(x, 16)) || [0, 0, 0] + ); +} + +// Get the Task id corresponding to the modelPipeline (should be === in 99% cases) +export function getPipelineTask(modelPipeline: PipelineType): PipelineType { + return modelPipeline === "text2text-generation" ? "text-generation" : modelPipeline; +} + +/** +* For Tailwind: +bg-blue-100 border-blue-100 dark:bg-blue-800 dark:border-blue-800 +bg-green-100 border-green-100 dark:bg-green-800 dark:border-green-800 +bg-yellow-100 border-yellow-100 dark:bg-yellow-800 dark:border-yellow-800 +bg-purple-100 border-purple-100 dark:bg-purple-800 dark:border-purple-800 +bg-red-100 border-red-100 dark:bg-red-800 dark:border-red-800 +*/ diff --git a/packages/widgets/src/routes/+layout.svelte b/packages/widgets/src/routes/+layout.svelte new file mode 100644 index 0000000000000000000000000000000000000000..fd328954f1a4f36c0cc264102f6d6c709ef9001f --- /dev/null +++ b/packages/widgets/src/routes/+layout.svelte @@ -0,0 +1,7 @@ + + +
+ +
diff --git a/packages/widgets/src/routes/+page.svelte b/packages/widgets/src/routes/+page.svelte new file mode 100644 index 0000000000000000000000000000000000000000..ad261beddb855a1567fc8af8c6d5a88e1837cbd7 --- /dev/null +++ b/packages/widgets/src/routes/+page.svelte @@ -0,0 +1,581 @@ + + +
+ + + + +
+

Showcase of all types of inference widgets running

+
+ {#each models as model} + + {/each} +
+
+ +
+

Showcase of all types of disabled inference

+
+ {#each modelsDisabled as model} + + {/each} +
+
+ +
+

Showcase of all types of disabled inference with example outputs

+
+ {#each modelsDisabledWithExamples as model} + + {/each} +
+
+
diff --git a/packages/widgets/src/routes/[...model]/+page.svelte b/packages/widgets/src/routes/[...model]/+page.svelte new file mode 100644 index 0000000000000000000000000000000000000000..c63519c95561bc38307b9072bca688451d37d8b3 --- /dev/null +++ b/packages/widgets/src/routes/[...model]/+page.svelte @@ -0,0 +1,28 @@ + + +← Back to index + + +
+ {#if data.model} + + +
+			{JSON.stringify(data.model, null, 2)}
+		
+ {:else} +
Error. Probably non existent model. {data.message}
+ {/if} +
diff --git a/packages/widgets/src/routes/[...model]/+page.ts b/packages/widgets/src/routes/[...model]/+page.ts new file mode 100644 index 0000000000000000000000000000000000000000..ac2edfd7d99f2f072f6277ed6c759a4e4d074f7a --- /dev/null +++ b/packages/widgets/src/routes/[...model]/+page.ts @@ -0,0 +1,17 @@ +import type { WidgetProps } from "$lib/components/InferenceWidget/shared/types.js"; +import type { Load } from "@sveltejs/kit"; + +export const load: Load = async ({ params, fetch }): Promise<{ model?: WidgetProps["model"]; message?: string }> => { + const url = `https://huggingface.co/api/models/${params.model}`; + try { + const model = await (await fetch(url)).json(); + return { + model, + }; + } catch { + // todo: throw error() instead + return { + message: `Model ${params.model} not found (probably)`, + }; + } +}; diff --git a/packages/widgets/src/tailwind.css b/packages/widgets/src/tailwind.css new file mode 100644 index 0000000000000000000000000000000000000000..8f335baed31d5702c693a95e496c60549c208f8b --- /dev/null +++ b/packages/widgets/src/tailwind.css @@ -0,0 +1,226 @@ +@tailwind base; + +@layer base { + html.dark { + --scrollbarBG: #020011; + --thumbBG: #374151; + } + .dark *::-webkit-scrollbar { + width: 11px; + height: 11px; + } + .dark * { + scrollbar-width: thin; + scrollbar-color: var(--thumbBG) var(--scrollbarBG); + } + + .dark input, + .dark textarea, + .dark [contenteditable] { + caret-color: white !important; + } + + .dark *::-webkit-scrollbar-track { + background: var(--scrollbarBG); + } + .dark *::-webkit-scrollbar-thumb { + background-color: var(--thumbBG); + border-radius: 6px; + border: 3px solid var(--scrollbarBG); + } + /* .dark input:-internal-autofill-selected { + @apply bg-gray-925; + } */ + .dark .bg-white { + @apply bg-gray-950; + } + .dark .text-black { + @apply text-gray-200; + } + .dark .text-gray-900 { + @apply text-gray-200; + } + .dark .text-gray-800 { + @apply text-gray-300; + } + .dark .text-gray-700 { + @apply text-gray-300; + } + .dark .text-gray-600 { + @apply text-gray-350; + } + .dark .text-gray-500 { + @apply text-gray-400; + } + .dark .border-gray-200, + .dark .border-gray-100, + .dark .border, + .dark .border-b { + @apply border-gray-850; + } +} + +@tailwind components; + +@layer components { + .btn, + .btn-widget { + @apply inline-flex cursor-pointer select-none items-center justify-center whitespace-nowrap rounded-lg border bg-gradient-to-b px-3 py-1 focus:outline-none focus:ring; + } + .btn { + @apply border-gray-200 from-white to-gray-100 text-gray-800 hover:shadow-inner dark:border-gray-900 dark:from-gray-800 dark:to-gray-950 dark:text-gray-200 dark:hover:from-gray-700; + } + .btn-widget { + @apply h-8 from-gray-50 to-gray-200 hover:from-gray-100 dark:border-gray-900 dark:from-gray-800 dark:to-gray-950 dark:hover:from-gray-700; + } + .btn:disabled, + .btn-widget:disabled { + @apply cursor-not-allowed opacity-50; + } + .btn.btn-lg { + @apply px-4 py-1.5 font-semibold; + } + .overview-card-wrapper { + @apply from-gray-50-to-white rounded-lg border border-gray-100 bg-gradient-to-r via-white text-base shadow-sm hover:via-gray-50 hover:to-gray-100 dark:border-gray-900 dark:via-gray-950 dark:hover:from-gray-950 dark:hover:via-gray-925 dark:hover:to-gray-925; + } + .overview-card-wrapper.white { + @apply from-white to-white dark:from-gray-925 dark:to-gray-950; + } + .tab { + @apply -mb-px flex cursor-pointer select-none items-center border-r border-gray-200 px-4 text-center; + } + .tab:not(.active) { + @apply hover:text-gray-700; + } + .tab.active { + @apply flex items-center border-r border-gray-200 bg-white px-4 text-center font-semibold; + } + .tab-alternate { + @apply flex h-full items-center whitespace-nowrap border-b-2 border-transparent px-2.5 font-medium text-gray-600 dark:text-gray-400 sm:px-3.5; + } + .tab-alternate:not(.active) { + @apply hover:border-gray-200 dark:hover:border-gray-800; + } + .tab-alternate.active { + @apply border-gray-700 font-semibold text-gray-800 dark:border-gray-400; + } + .tag { + @apply mr-1 mb-1 inline-flex h-7 max-w-full flex-none items-center overflow-hidden truncate rounded-lg border border-transparent bg-gradient-to-b text-sm dark:border-gray-900 md:mr-1.5 md:mb-1.5; + } + .tag > span { + @apply px-2; + } + .tag.inactive { + @apply filter-grayscale opacity-50; + } + .tag-blue { + @apply from-blue-50 to-blue-50 text-blue-800 hover:to-blue-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950; + } + .tag-ghost { + @apply from-transparent to-transparent text-gray-400 hover:from-gray-100 hover:to-gray-100 hover:text-gray-600; + } + .tag-green { + @apply from-green-50 to-green-50 text-green-800 hover:to-green-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950; + } + .tag-indigo { + @apply from-indigo-50 to-indigo-50 text-indigo-800 hover:to-indigo-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950; + } + .tag-orange { + @apply from-orange-50 to-orange-50 text-orange-800 hover:to-orange-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950; + } + .tag-purple { + @apply from-purple-50 to-purple-50 text-purple-800 hover:to-purple-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950; + } + .tag-red { + @apply from-red-50 to-red-50 text-red-800 hover:to-red-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950; + } + .tag-yellow { + @apply from-yellow-50 text-yellow-800 hover:to-yellow-100 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950; + } + .tag-white { + @apply border-gray-100 from-white to-white text-gray-700 hover:to-gray-100 dark:border-gray-900 dark:from-gray-925 dark:to-gray-925 dark:text-gray-300 dark:hover:to-gray-950; + } + .tag-ico { + @apply flex h-7 w-8 flex-none items-center bg-gradient-to-t to-white pl-2 dark:to-gray-950; + } + .tag-ico-blue { + @apply from-blue-50 text-blue-500 dark:from-gray-925; + } + .tag-ico-green { + @apply from-green-50 text-green-500 dark:from-gray-925; + } + .tag-ico-indigo { + @apply from-indigo-50 text-indigo-500 dark:from-gray-925; + } + .tag-ico-orange { + @apply from-orange-50 text-orange-500 dark:from-gray-925; + } + .tag-ico-purple { + @apply from-purple-50 text-purple-500 dark:from-gray-925; + } + .tag-ico-red { + @apply from-red-50 text-red-500 dark:from-gray-925; + } + .tag-ico-yellow { + @apply from-yellow-50 text-yellow-500 dark:from-gray-925; + } + .form-input:not([type="checkbox"]) { + @apply border-2 border-gray-200 shadow-sm + focus:border-blue-300 focus:ring focus:ring-blue-200 + focus:ring-opacity-50 dark:border-gray-700 dark:bg-gray-950; + } + .form-input:not([type="radio"]):not([type="checkbox"]) { + @apply mt-1 block w-full rounded-md; + } + .form-input[type="radio"] { + @apply mt-2 mr-1.5 h-3.5 w-3.5; + } + .form-input[type="checkbox"] { + @apply rounded border-transparent bg-gray-200 text-blue-500 focus:border-transparent focus:ring-1 focus:ring-gray-200 focus:ring-offset-2; + } + .form-input[type="checkbox"]:checked { + @apply bg-blue-500; + } + .form-input:disabled { + @apply cursor-not-allowed opacity-50; + } + .form-input-alt { + @apply h-10 rounded-lg border border-gray-200 px-3 placeholder-gray-400 shadow-inner outline-none focus:shadow-inner focus:ring-1 focus:ring-inset focus:ring-indigo-200 dark:bg-gray-925 dark:focus:ring-indigo-50; + } +} + +@tailwind utilities; + +@layer utilities { + .filter-none { + filter: none; + } + .filter-grayscale { + filter: grayscale(100%); + } + .from-gray-50-to-white { + @apply from-gray-50 to-white dark:from-gray-925 dark:to-gray-950; + } + + .from-gray-100-to-white { + @apply from-gray-100 to-white dark:from-gray-925 dark:to-gray-925; + } + .min-h-main { + min-height: calc(100vh - theme(spacing.16) - 1px); + } +} +.alert { + @apply rounded-md border border-blue-100 bg-blue-50 py-2 px-3 text-blue-900 dark:border-blue-700 dark:bg-blue-800 dark:text-blue-200; +} +.alert a { + @apply underline; +} +.alert-error { + @apply border-red-100 bg-red-50 text-red-900 dark:border-red-700 dark:bg-red-800 dark:text-red-200; +} +.alert-success { + @apply border-green-100 bg-green-50 text-green-900; +} +.alert-warning { + @apply border-yellow-100 bg-yellow-50 text-yellow-900; +} diff --git a/packages/widgets/static/audioProcessor.js b/packages/widgets/static/audioProcessor.js new file mode 100644 index 0000000000000000000000000000000000000000..f1cde75a59d5cbfdc31c9704f30f0a8f66c65647 --- /dev/null +++ b/packages/widgets/static/audioProcessor.js @@ -0,0 +1,39 @@ +// for js/src/lib/components/InferenceWidget/shared/WidgetRealtimeRecorder/Recorder.ts +class AudioDataExtractor extends AudioWorkletProcessor { + _updateIntervalInMS; + _sampleInFrames; + _index; + _buffer; + + constructor() { + super(); + this._updateIntervalInMS = 50; + this._sampleInFrames = parseInt((this._updateIntervalInMS / 1000.0) * sampleRate); + this._index = 0; + this._buffer = new Float32Array(this._sampleInFrames); + } + + process(inputs, outputs, parameters) { + // Note that the input will be down-mixed to mono; however, if no inputs are + // connected then zero channels will be passed in. + if (inputs.length > 0 && inputs[0].length > 0) { + const rest = this._buffer.length - this._index; + if (rest < inputs[0][0].length) { + this._buffer.set(inputs[0][0].slice(0, rest), this._index); + this.port.postMessage({ + buffer: this._buffer.slice(0), + sampling_rate: sampleRate, + }); + this._buffer.fill(0); + this._index = inputs[0][0].length - rest; + } else { + this._buffer.set(inputs[0][0], this._index); + this._index += inputs[0][0].length; + } + } + + return true; + } +} + +registerProcessor("AudioDataExtractor", AudioDataExtractor); diff --git a/packages/widgets/static/cats.jpg b/packages/widgets/static/cats.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e131e8ecdf32c3f751ab0f7b2e5f002683babda2 Binary files /dev/null and b/packages/widgets/static/cats.jpg differ diff --git a/packages/widgets/static/favicon.png b/packages/widgets/static/favicon.png new file mode 100644 index 0000000000000000000000000000000000000000..825b9e65af7c104cfb07089bb28659393b4f2097 Binary files /dev/null and b/packages/widgets/static/favicon.png differ diff --git a/packages/widgets/svelte.config.js b/packages/widgets/svelte.config.js new file mode 100644 index 0000000000000000000000000000000000000000..456afa84b8259c237e5904b2e5db71ff8f338b15 --- /dev/null +++ b/packages/widgets/svelte.config.js @@ -0,0 +1,18 @@ +import adapter from "@sveltejs/adapter-auto"; +import { vitePreprocess } from "@sveltejs/kit/vite"; + +/** @type {import('@sveltejs/kit').Config} */ +const config = { + // Consult https://kit.svelte.dev/docs/integrations#preprocessors + // for more information about preprocessors + preprocess: vitePreprocess(), + + kit: { + // adapter-auto only supports some environments, see https://kit.svelte.dev/docs/adapter-auto for a list. + // If your environment is not supported or you settled on a specific environment, switch out the adapter. + // See https://kit.svelte.dev/docs/adapters for more information about adapters. + adapter: adapter(), + }, +}; + +export default config; diff --git a/packages/widgets/tailwind.config.cjs b/packages/widgets/tailwind.config.cjs new file mode 100644 index 0000000000000000000000000000000000000000..6853e61fadeefe1205b529a95bbf6a9065b3d464 --- /dev/null +++ b/packages/widgets/tailwind.config.cjs @@ -0,0 +1,43 @@ +// eslint-disable-next-line @typescript-eslint/no-var-requires +const defaultTheme = require("tailwindcss/defaultTheme"); +// eslint-disable-next-line @typescript-eslint/no-var-requires +const colors = require("tailwindcss/colors"); + +module.exports = { + content: ["./src/**/*.{html,js,svelte,ts}"], + darkMode: "class", + theme: { + container: { + center: true, + padding: { DEFAULT: "1rem" }, + }, + extend: { + colors: { + green: colors.emerald, + yellow: colors.amber, + purple: colors.violet, + gray: { + 350: "#b3bcc9", + // Dark blue + // 925: '#131f3d', + // 950: '#0a1226', + // Darker + 850: "#141c2e", + 925: "#101623", + 950: "#0b0f19", + // Darkest + // 925: '#081122', + // 950: '#000511', + }, + }, + gridTemplateRows: { + full: "100%", + }, + fontFamily: { + sans: ["Source Sans Pro", ...defaultTheme.fontFamily.sans], + mono: ["IBM Plex Mono", ...defaultTheme.fontFamily.mono], + }, + }, + }, + plugins: [require("@tailwindcss/forms")], +}; diff --git a/packages/widgets/tsconfig.json b/packages/widgets/tsconfig.json new file mode 100644 index 0000000000000000000000000000000000000000..6f788f1603f23e76c22eddfabc3c45a58471d27f --- /dev/null +++ b/packages/widgets/tsconfig.json @@ -0,0 +1,15 @@ +{ + "extends": "./.svelte-kit/tsconfig.json", + "compilerOptions": { + "allowJs": true, + "checkJs": true, + "esModuleInterop": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "skipLibCheck": true, + "sourceMap": true, + "strict": true, + "module": "NodeNext", + "moduleResolution": "NodeNext" + } +} diff --git a/packages/widgets/vite.config.ts b/packages/widgets/vite.config.ts new file mode 100644 index 0000000000000000000000000000000000000000..6b9eb5d390d85bd47250c16bd31672968cd3ca4b --- /dev/null +++ b/packages/widgets/vite.config.ts @@ -0,0 +1,6 @@ +import { sveltekit } from "@sveltejs/kit/vite"; +import { defineConfig } from "vite"; + +export default defineConfig({ + plugins: [sveltekit()], +}); diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml new file mode 100644 index 0000000000000000000000000000000000000000..26d255be45d5a42290df94e32b24093d97af1083 --- /dev/null +++ b/pnpm-lock.yaml @@ -0,0 +1,4252 @@ +lockfileVersion: '6.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +dependencies: + '@typescript-eslint/eslint-plugin': + specifier: ^5.51.0 + version: 5.62.0(@typescript-eslint/parser@5.62.0)(eslint@8.54.0)(typescript@5.3.2) + '@typescript-eslint/parser': + specifier: ^5.51.0 + version: 5.62.0(eslint@8.54.0)(typescript@5.3.2) + eslint: + specifier: ^8.35.0 + version: 8.54.0 + eslint-config-prettier: + specifier: ^9.0.0 + version: 9.0.0(eslint@8.54.0) + eslint-plugin-prettier: + specifier: ^4.2.1 + version: 4.2.1(eslint-config-prettier@9.0.0)(eslint@8.54.0)(prettier@3.1.0) + eslint-plugin-svelte: + specifier: ^2.30.0 + version: 2.35.1(eslint@8.54.0)(svelte@4.2.7)(ts-node@10.9.1) + prettier: + specifier: ^3.0.0 + version: 3.1.0 + prettier-plugin-svelte: + specifier: ^3.0.0 + version: 3.1.1(prettier@3.1.0)(svelte@4.2.7) + typescript: + specifier: ^5.0.0 + version: 5.3.2 + vite: + specifier: 4.1.4 + version: 4.1.4(@types/node@20.9.3) + +devDependencies: + '@vitest/browser': + specifier: ^0.29.7 + version: 0.29.8(vitest@0.29.8) + semver: + specifier: ^7.5.0 + version: 7.5.4 + ts-node: + specifier: ^10.9.1 + version: 10.9.1(@types/node@20.9.3)(typescript@5.3.2) + tsup: + specifier: ^6.7.0 + version: 6.7.0(postcss@8.4.31)(ts-node@10.9.1)(typescript@5.3.2) + vitest: + specifier: ^0.29.4 + version: 0.29.8(@vitest/browser@0.29.8)(webdriverio@8.23.4) + webdriverio: + specifier: ^8.6.7 + version: 8.23.4(typescript@5.3.2) + +packages: + + /@aashutoshrathi/word-wrap@1.2.6: + resolution: {integrity: sha512-1Yjs2SvM8TflER/OD3cOjhWWOZb58A2t7wpE2S9XfBYTiIl+XFhQG2bjy4Pu1I+EAlCNUzRDYDdFwFYUKvXcIA==} + engines: {node: '>=0.10.0'} + dev: false + + /@ampproject/remapping@2.2.1: + resolution: {integrity: sha512-lFMjJTrFL3j7L9yBxwYfCq2k6qqwHyzuUl/XBnif78PWTJYyL/dfowQHWE3sp6U6ZzqWiiIZnpTMO96zhkjwtg==} + engines: {node: '>=6.0.0'} + dependencies: + '@jridgewell/gen-mapping': 0.3.3 + '@jridgewell/trace-mapping': 0.3.20 + dev: false + + /@cspotcode/source-map-support@0.8.1: + resolution: {integrity: sha512-IchNf6dN4tHoMFIn/7OE8LWZ19Y6q/67Bmf6vnGREv8RSbBVb9LPJxEcnwrcwX6ixSvaiGoomAUvu4YSxXrVgw==} + engines: {node: '>=12'} + dependencies: + '@jridgewell/trace-mapping': 0.3.9 + + /@esbuild/android-arm64@0.16.17: + resolution: {integrity: sha512-MIGl6p5sc3RDTLLkYL1MyL8BMRN4tLMRCn+yRJJmEDvYZ2M7tmAf80hx1kbNEUX2KJ50RRtxZ4JHLvCfuB6kBg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + requiresBuild: true + optional: true + + /@esbuild/android-arm64@0.17.19: + resolution: {integrity: sha512-KBMWvEZooR7+kzY0BtbTQn0OAYY7CsiydT63pVEaPtVYF0hXbUaOyZog37DKxK7NF3XacBJOpYT4adIJh+avxA==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + requiresBuild: true + dev: true + optional: true + + /@esbuild/android-arm@0.16.17: + resolution: {integrity: sha512-N9x1CMXVhtWEAMS7pNNONyA14f71VPQN9Cnavj1XQh6T7bskqiLLrSca4O0Vr8Wdcga943eThxnVp3JLnBMYtw==} + engines: {node: '>=12'} + cpu: [arm] + os: [android] + requiresBuild: true + optional: true + + /@esbuild/android-arm@0.17.19: + resolution: {integrity: sha512-rIKddzqhmav7MSmoFCmDIb6e2W57geRsM94gV2l38fzhXMwq7hZoClug9USI2pFRGL06f4IOPHHpFNOkWieR8A==} + engines: {node: '>=12'} + cpu: [arm] + os: [android] + requiresBuild: true + dev: true + optional: true + + /@esbuild/android-x64@0.16.17: + resolution: {integrity: sha512-a3kTv3m0Ghh4z1DaFEuEDfz3OLONKuFvI4Xqczqx4BqLyuFaFkuaG4j2MtA6fuWEFeC5x9IvqnX7drmRq/fyAQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + requiresBuild: true + optional: true + + /@esbuild/android-x64@0.17.19: + resolution: {integrity: sha512-uUTTc4xGNDT7YSArp/zbtmbhO0uEEK9/ETW29Wk1thYUJBz3IVnvgEiEwEa9IeLyvnpKrWK64Utw2bgUmDveww==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + requiresBuild: true + dev: true + optional: true + + /@esbuild/darwin-arm64@0.16.17: + resolution: {integrity: sha512-/2agbUEfmxWHi9ARTX6OQ/KgXnOWfsNlTeLcoV7HSuSTv63E4DqtAc+2XqGw1KHxKMHGZgbVCZge7HXWX9Vn+w==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + requiresBuild: true + optional: true + + /@esbuild/darwin-arm64@0.17.19: + resolution: {integrity: sha512-80wEoCfF/hFKM6WE1FyBHc9SfUblloAWx6FJkFWTWiCoht9Mc0ARGEM47e67W9rI09YoUxJL68WHfDRYEAvOhg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@esbuild/darwin-x64@0.16.17: + resolution: {integrity: sha512-2By45OBHulkd9Svy5IOCZt376Aa2oOkiE9QWUK9fe6Tb+WDr8hXL3dpqi+DeLiMed8tVXspzsTAvd0jUl96wmg==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + requiresBuild: true + optional: true + + /@esbuild/darwin-x64@0.17.19: + resolution: {integrity: sha512-IJM4JJsLhRYr9xdtLytPLSH9k/oxR3boaUIYiHkAawtwNOXKE8KoU8tMvryogdcT8AU+Bflmh81Xn6Q0vTZbQw==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /@esbuild/freebsd-arm64@0.16.17: + resolution: {integrity: sha512-mt+cxZe1tVx489VTb4mBAOo2aKSnJ33L9fr25JXpqQqzbUIw/yzIzi+NHwAXK2qYV1lEFp4OoVeThGjUbmWmdw==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + requiresBuild: true + optional: true + + /@esbuild/freebsd-arm64@0.17.19: + resolution: {integrity: sha512-pBwbc7DufluUeGdjSU5Si+P3SoMF5DQ/F/UmTSb8HXO80ZEAJmrykPyzo1IfNbAoaqw48YRpv8shwd1NoI0jcQ==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/freebsd-x64@0.16.17: + resolution: {integrity: sha512-8ScTdNJl5idAKjH8zGAsN7RuWcyHG3BAvMNpKOBaqqR7EbUhhVHOqXRdL7oZvz8WNHL2pr5+eIT5c65kA6NHug==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + requiresBuild: true + optional: true + + /@esbuild/freebsd-x64@0.17.19: + resolution: {integrity: sha512-4lu+n8Wk0XlajEhbEffdy2xy53dpR06SlzvhGByyg36qJw6Kpfk7cp45DR/62aPH9mtJRmIyrXAS5UWBrJT6TQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-arm64@0.16.17: + resolution: {integrity: sha512-7S8gJnSlqKGVJunnMCrXHU9Q8Q/tQIxk/xL8BqAP64wchPCTzuM6W3Ra8cIa1HIflAvDnNOt2jaL17vaW+1V0g==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-arm64@0.17.19: + resolution: {integrity: sha512-ct1Tg3WGwd3P+oZYqic+YZF4snNl2bsnMKRkb3ozHmnM0dGWuxcPTTntAF6bOP0Sp4x0PjSF+4uHQ1xvxfRKqg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-arm@0.16.17: + resolution: {integrity: sha512-iihzrWbD4gIT7j3caMzKb/RsFFHCwqqbrbH9SqUSRrdXkXaygSZCZg1FybsZz57Ju7N/SHEgPyaR0LZ8Zbe9gQ==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-arm@0.17.19: + resolution: {integrity: sha512-cdmT3KxjlOQ/gZ2cjfrQOtmhG4HJs6hhvm3mWSRDPtZ/lP5oe8FWceS10JaSJC13GBd4eH/haHnqf7hhGNLerA==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-ia32@0.16.17: + resolution: {integrity: sha512-kiX69+wcPAdgl3Lonh1VI7MBr16nktEvOfViszBSxygRQqSpzv7BffMKRPMFwzeJGPxcio0pdD3kYQGpqQ2SSg==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-ia32@0.17.19: + resolution: {integrity: sha512-w4IRhSy1VbsNxHRQpeGCHEmibqdTUx61Vc38APcsRbuVgK0OPEnQ0YD39Brymn96mOx48Y2laBQGqgZ0j9w6SQ==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-loong64@0.14.54: + resolution: {integrity: sha512-bZBrLAIX1kpWelV0XemxBZllyRmM6vgFQQG2GdNb+r3Fkp0FOh1NJSvekXDs7jq70k4euu1cryLMfU+mTXlEpw==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-loong64@0.16.17: + resolution: {integrity: sha512-dTzNnQwembNDhd654cA4QhbS9uDdXC3TKqMJjgOWsC0yNCbpzfWoXdZvp0mY7HU6nzk5E0zpRGGx3qoQg8T2DQ==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-loong64@0.17.19: + resolution: {integrity: sha512-2iAngUbBPMq439a+z//gE+9WBldoMp1s5GWsUSgqHLzLJ9WoZLZhpwWuym0u0u/4XmZ3gpHmzV84PonE+9IIdQ==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-mips64el@0.16.17: + resolution: {integrity: sha512-ezbDkp2nDl0PfIUn0CsQ30kxfcLTlcx4Foz2kYv8qdC6ia2oX5Q3E/8m6lq84Dj/6b0FrkgD582fJMIfHhJfSw==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-mips64el@0.17.19: + resolution: {integrity: sha512-LKJltc4LVdMKHsrFe4MGNPp0hqDFA1Wpt3jE1gEyM3nKUvOiO//9PheZZHfYRfYl6AwdTH4aTcXSqBerX0ml4A==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-ppc64@0.16.17: + resolution: {integrity: sha512-dzS678gYD1lJsW73zrFhDApLVdM3cUF2MvAa1D8K8KtcSKdLBPP4zZSLy6LFZ0jYqQdQ29bjAHJDgz0rVbLB3g==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-ppc64@0.17.19: + resolution: {integrity: sha512-/c/DGybs95WXNS8y3Ti/ytqETiW7EU44MEKuCAcpPto3YjQbyK3IQVKfF6nbghD7EcLUGl0NbiL5Rt5DMhn5tg==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-riscv64@0.16.17: + resolution: {integrity: sha512-ylNlVsxuFjZK8DQtNUwiMskh6nT0vI7kYl/4fZgV1llP5d6+HIeL/vmmm3jpuoo8+NuXjQVZxmKuhDApK0/cKw==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-riscv64@0.17.19: + resolution: {integrity: sha512-FC3nUAWhvFoutlhAkgHf8f5HwFWUL6bYdvLc/TTuxKlvLi3+pPzdZiFKSWz/PF30TB1K19SuCxDTI5KcqASJqA==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-s390x@0.16.17: + resolution: {integrity: sha512-gzy7nUTO4UA4oZ2wAMXPNBGTzZFP7mss3aKR2hH+/4UUkCOyqmjXiKpzGrY2TlEUhbbejzXVKKGazYcQTZWA/w==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-s390x@0.17.19: + resolution: {integrity: sha512-IbFsFbxMWLuKEbH+7sTkKzL6NJmG2vRyy6K7JJo55w+8xDk7RElYn6xvXtDW8HCfoKBFK69f3pgBJSUSQPr+4Q==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/linux-x64@0.16.17: + resolution: {integrity: sha512-mdPjPxfnmoqhgpiEArqi4egmBAMYvaObgn4poorpUaqmvzzbvqbowRllQ+ZgzGVMGKaPkqUmPDOOFQRUFDmeUw==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + requiresBuild: true + optional: true + + /@esbuild/linux-x64@0.17.19: + resolution: {integrity: sha512-68ngA9lg2H6zkZcyp22tsVt38mlhWde8l3eJLWkyLrp4HwMUr3c1s/M2t7+kHIhvMjglIBrFpncX1SzMckomGw==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /@esbuild/netbsd-x64@0.16.17: + resolution: {integrity: sha512-/PzmzD/zyAeTUsduZa32bn0ORug+Jd1EGGAUJvqfeixoEISYpGnAezN6lnJoskauoai0Jrs+XSyvDhppCPoKOA==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + requiresBuild: true + optional: true + + /@esbuild/netbsd-x64@0.17.19: + resolution: {integrity: sha512-CwFq42rXCR8TYIjIfpXCbRX0rp1jo6cPIUPSaWwzbVI4aOfX96OXY8M6KNmtPcg7QjYeDmN+DD0Wp3LaBOLf4Q==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/openbsd-x64@0.16.17: + resolution: {integrity: sha512-2yaWJhvxGEz2RiftSk0UObqJa/b+rIAjnODJgv2GbGGpRwAfpgzyrg1WLK8rqA24mfZa9GvpjLcBBg8JHkoodg==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + requiresBuild: true + optional: true + + /@esbuild/openbsd-x64@0.17.19: + resolution: {integrity: sha512-cnq5brJYrSZ2CF6c35eCmviIN3k3RczmHz8eYaVlNasVqsNY+JKohZU5MKmaOI+KkllCdzOKKdPs762VCPC20g==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + requiresBuild: true + dev: true + optional: true + + /@esbuild/sunos-x64@0.16.17: + resolution: {integrity: sha512-xtVUiev38tN0R3g8VhRfN7Zl42YCJvyBhRKw1RJjwE1d2emWTVToPLNEQj/5Qxc6lVFATDiy6LjVHYhIPrLxzw==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + requiresBuild: true + optional: true + + /@esbuild/sunos-x64@0.17.19: + resolution: {integrity: sha512-vCRT7yP3zX+bKWFeP/zdS6SqdWB8OIpaRq/mbXQxTGHnIxspRtigpkUcDMlSCOejlHowLqII7K2JKevwyRP2rg==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + requiresBuild: true + dev: true + optional: true + + /@esbuild/win32-arm64@0.16.17: + resolution: {integrity: sha512-ga8+JqBDHY4b6fQAmOgtJJue36scANy4l/rL97W+0wYmijhxKetzZdKOJI7olaBaMhWt8Pac2McJdZLxXWUEQw==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + requiresBuild: true + optional: true + + /@esbuild/win32-arm64@0.17.19: + resolution: {integrity: sha512-yYx+8jwowUstVdorcMdNlzklLYhPxjniHWFKgRqH7IFlUEa0Umu3KuYplf1HUZZ422e3NU9F4LGb+4O0Kdcaag==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@esbuild/win32-ia32@0.16.17: + resolution: {integrity: sha512-WnsKaf46uSSF/sZhwnqE4L/F89AYNMiD4YtEcYekBt9Q7nj0DiId2XH2Ng2PHM54qi5oPrQ8luuzGszqi/veig==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + requiresBuild: true + optional: true + + /@esbuild/win32-ia32@0.17.19: + resolution: {integrity: sha512-eggDKanJszUtCdlVs0RB+h35wNlb5v4TWEkq4vZcmVt5u/HiDZrTXe2bWFQUez3RgNHwx/x4sk5++4NSSicKkw==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@esbuild/win32-x64@0.16.17: + resolution: {integrity: sha512-y+EHuSchhL7FjHgvQL/0fnnFmO4T1bhvWANX6gcnqTjtnKWbTvUMCpGnv2+t+31d7RzyEAYAd4u2fnIhHL6N/Q==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + requiresBuild: true + optional: true + + /@esbuild/win32-x64@0.17.19: + resolution: {integrity: sha512-lAhycmKnVOuRYNtRtatQR1LPQf2oYCkRGkSFnseDAKPl8lu5SOsK/e1sXe5a0Pc5kHIHe6P2I/ilntNv2xf3cA==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /@eslint-community/eslint-utils@4.4.0(eslint@8.54.0): + resolution: {integrity: sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || >=8.0.0 + dependencies: + eslint: 8.54.0 + eslint-visitor-keys: 3.4.3 + dev: false + + /@eslint-community/regexpp@4.10.0: + resolution: {integrity: sha512-Cu96Sd2By9mCNTx2iyKOmq10v22jUVQv0lQnlGNy16oE9589yE+QADPbrMGCkA51cKZSg3Pu/aTJVTGfL/qjUA==} + engines: {node: ^12.0.0 || ^14.0.0 || >=16.0.0} + dev: false + + /@eslint/eslintrc@2.1.3: + resolution: {integrity: sha512-yZzuIG+jnVu6hNSzFEN07e8BxF3uAzYtQb6uDkaYZLo6oYZDCq454c5kB8zxnzfCYyP4MIuyBn10L0DqwujTmA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + ajv: 6.12.6 + debug: 4.3.4 + espree: 9.6.1 + globals: 13.23.0 + ignore: 5.3.0 + import-fresh: 3.3.0 + js-yaml: 4.1.0 + minimatch: 3.1.2 + strip-json-comments: 3.1.1 + transitivePeerDependencies: + - supports-color + dev: false + + /@eslint/js@8.54.0: + resolution: {integrity: sha512-ut5V+D+fOoWPgGGNj83GGjnntO39xDy6DWxO0wb7Jp3DcMX0TfIqdzHF85VTQkerdyGmuuMD9AKAo5KiNlf/AQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: false + + /@humanwhocodes/config-array@0.11.13: + resolution: {integrity: sha512-JSBDMiDKSzQVngfRjOdFXgFfklaXI4K9nLF49Auh21lmBWRLIK3+xTErTWD4KU54pb6coM6ESE7Awz/FNU3zgQ==} + engines: {node: '>=10.10.0'} + dependencies: + '@humanwhocodes/object-schema': 2.0.1 + debug: 4.3.4 + minimatch: 3.1.2 + transitivePeerDependencies: + - supports-color + dev: false + + /@humanwhocodes/module-importer@1.0.1: + resolution: {integrity: sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==} + engines: {node: '>=12.22'} + dev: false + + /@humanwhocodes/object-schema@2.0.1: + resolution: {integrity: sha512-dvuCeX5fC9dXgJn9t+X5atfmgQAzUOWqS1254Gh0m6i8wKd10ebXkfNKiRK+1GWi/yTvvLDHpoxLr0xxxeslWw==} + dev: false + + /@isaacs/cliui@8.0.2: + resolution: {integrity: sha512-O8jcjabXaleOG9DQ0+ARXWZBTfnP4WNAqzuiJK7ll44AmxGKv/J2M4TPjxjY3znBCfvBXFzucm1twdyFybFqEA==} + engines: {node: '>=12'} + dependencies: + string-width: 5.1.2 + string-width-cjs: /string-width@4.2.3 + strip-ansi: 7.1.0 + strip-ansi-cjs: /strip-ansi@6.0.1 + wrap-ansi: 8.1.0 + wrap-ansi-cjs: /wrap-ansi@7.0.0 + dev: true + + /@jridgewell/gen-mapping@0.3.3: + resolution: {integrity: sha512-HLhSWOLRi875zjjMG/r+Nv0oCW8umGb0BgEhyX3dDX3egwZtB8PqLnjz3yedt8R5StBrzcg4aBpnh8UA9D1BoQ==} + engines: {node: '>=6.0.0'} + dependencies: + '@jridgewell/set-array': 1.1.2 + '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/trace-mapping': 0.3.20 + + /@jridgewell/resolve-uri@3.1.1: + resolution: {integrity: sha512-dSYZh7HhCDtCKm4QakX0xFpsRDqjjtZf/kjI/v3T3Nwt5r8/qz/M19F9ySyOqU94SXBmeG9ttTul+YnR4LOxFA==} + engines: {node: '>=6.0.0'} + + /@jridgewell/set-array@1.1.2: + resolution: {integrity: sha512-xnkseuNADM0gt2bs+BvhO0p78Mk762YnZdsuzFV018NoG1Sj1SCQvpSqa7XUaTam5vAGasABV9qXASMKnFMwMw==} + engines: {node: '>=6.0.0'} + + /@jridgewell/sourcemap-codec@1.4.15: + resolution: {integrity: sha512-eF2rxCRulEKXHTRiDrDy6erMYWqNw4LPdQ8UQA4huuxaQsVeRPFl2oM8oDGxMFhJUWZf9McpLtJasDDZb/Bpeg==} + + /@jridgewell/trace-mapping@0.3.20: + resolution: {integrity: sha512-R8LcPeWZol2zR8mmH3JeKQ6QRCFb7XgUhV9ZlGhHLGyg4wpPiPZNQOOWhFZhxKw8u//yTbNGI42Bx/3paXEQ+Q==} + dependencies: + '@jridgewell/resolve-uri': 3.1.1 + '@jridgewell/sourcemap-codec': 1.4.15 + + /@jridgewell/trace-mapping@0.3.9: + resolution: {integrity: sha512-3Belt6tdc8bPgAtbcmdtNJlirVoTmEb5e2gC94PnkwEW9jI6CAHUeoG85tjWP5WquqfavoMtMwiG4P926ZKKuQ==} + dependencies: + '@jridgewell/resolve-uri': 3.1.1 + '@jridgewell/sourcemap-codec': 1.4.15 + + /@jspm/core@2.0.0-beta.24: + resolution: {integrity: sha512-a4Bo/80Z6CoJNor5ldgs6002utmmbttP4JYd/FJ0Ob2fVdf6O6ha5SORBCqrnDnBvMc1TlrHY7dCfat5+H0a6A==} + dev: true + + /@nodelib/fs.scandir@2.1.5: + resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==} + engines: {node: '>= 8'} + dependencies: + '@nodelib/fs.stat': 2.0.5 + run-parallel: 1.2.0 + + /@nodelib/fs.stat@2.0.5: + resolution: {integrity: sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==} + engines: {node: '>= 8'} + + /@nodelib/fs.walk@1.2.8: + resolution: {integrity: sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==} + engines: {node: '>= 8'} + dependencies: + '@nodelib/fs.scandir': 2.1.5 + fastq: 1.15.0 + + /@pkgjs/parseargs@0.11.0: + resolution: {integrity: sha512-+1VkjdD0QBLPodGrJUeqarH8VAIvQODIbwh9XpP5Syisf7YoQgsJKPNFoqqLQlu+VQ/tVSshMR6loPMn8U+dPg==} + engines: {node: '>=14'} + requiresBuild: true + dev: true + optional: true + + /@polka/url@1.0.0-next.23: + resolution: {integrity: sha512-C16M+IYz0rgRhWZdCmK+h58JMv8vijAA61gmz2rspCSwKwzBebpdcsiUmwrtJRdphuY30i6BSLEOP8ppbNLyLg==} + dev: true + + /@puppeteer/browsers@1.4.6(typescript@5.3.2): + resolution: {integrity: sha512-x4BEjr2SjOPowNeiguzjozQbsc6h437ovD/wu+JpaenxVLm3jkgzHY2xOslMTp50HoTvQreMjiexiGQw1sqZlQ==} + engines: {node: '>=16.3.0'} + hasBin: true + peerDependencies: + typescript: '>= 4.7.4' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + debug: 4.3.4 + extract-zip: 2.0.1 + progress: 2.0.3 + proxy-agent: 6.3.0 + tar-fs: 3.0.4 + typescript: 5.3.2 + unbzip2-stream: 1.4.3 + yargs: 17.7.1 + transitivePeerDependencies: + - supports-color + dev: true + + /@puppeteer/browsers@1.8.0: + resolution: {integrity: sha512-TkRHIV6k2D8OlUe8RtG+5jgOF/H98Myx0M6AOafC8DdNVOFiBSFa5cpRDtpm8LXOa9sVwe0+e6Q3FC56X/DZfg==} + engines: {node: '>=16.3.0'} + hasBin: true + dependencies: + debug: 4.3.4 + extract-zip: 2.0.1 + progress: 2.0.3 + proxy-agent: 6.3.1 + tar-fs: 3.0.4 + unbzip2-stream: 1.4.3 + yargs: 17.7.2 + transitivePeerDependencies: + - supports-color + dev: true + + /@rollup/plugin-inject@4.0.4(rollup@2.79.1): + resolution: {integrity: sha512-4pbcU4J/nS+zuHk+c+OL3WtmEQhqxlZ9uqfjQMQDOHOPld7PsCd8k5LWs8h5wjwJN7MgnAn768F2sDxEP4eNFQ==} + peerDependencies: + rollup: ^1.20.0 || ^2.0.0 + dependencies: + '@rollup/pluginutils': 3.1.0(rollup@2.79.1) + estree-walker: 2.0.2 + magic-string: 0.25.9 + rollup: 2.79.1 + dev: true + + /@rollup/pluginutils@3.1.0(rollup@2.79.1): + resolution: {integrity: sha512-GksZ6pr6TpIjHm8h9lSQ8pi8BE9VeubNT0OMJ3B5uZJ8pz73NPiqOtCog/x2/QzM1ENChPKxMDhiQuRHsqc+lg==} + engines: {node: '>= 8.0.0'} + peerDependencies: + rollup: ^1.20.0||^2.0.0 + dependencies: + '@types/estree': 0.0.39 + estree-walker: 1.0.1 + picomatch: 2.3.1 + rollup: 2.79.1 + dev: true + + /@sindresorhus/is@5.6.0: + resolution: {integrity: sha512-TV7t8GKYaJWsn00tFDqBw8+Uqmr8A0fRU1tvTQhyZzGv0sJCGRQL3JGMI3ucuKo3XIZdUP+Lx7/gh2t3lewy7g==} + engines: {node: '>=14.16'} + dev: true + + /@szmarczak/http-timer@5.0.1: + resolution: {integrity: sha512-+PmQX0PiAYPMeVYe237LJAYvOMYW1j2rH5YROyS3b4CTVJum34HfRvKvAzozHAQG0TnHNdUfY9nCeUyRAs//cw==} + engines: {node: '>=14.16'} + dependencies: + defer-to-connect: 2.0.1 + dev: true + + /@tootallnate/quickjs-emscripten@0.23.0: + resolution: {integrity: sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==} + dev: true + + /@tsconfig/node10@1.0.9: + resolution: {integrity: sha512-jNsYVVxU8v5g43Erja32laIDHXeoNvFEpX33OK4d6hljo3jDhCBDhx5dhCCTMWUojscpAagGiRkBKxpdl9fxqA==} + + /@tsconfig/node12@1.0.11: + resolution: {integrity: sha512-cqefuRsh12pWyGsIoBKJA9luFu3mRxCA+ORZvA4ktLSzIuCUtWVxGIuXigEwO5/ywWFMZ2QEGKWvkZG1zDMTag==} + + /@tsconfig/node14@1.0.3: + resolution: {integrity: sha512-ysT8mhdixWK6Hw3i1V2AeRqZ5WfXg1G43mqoYlM2nc6388Fq5jcXyr5mRsqViLx/GJYdoL0bfXD8nmF+Zn/Iow==} + + /@tsconfig/node16@1.0.4: + resolution: {integrity: sha512-vxhUy4J8lyeyinH7Azl1pdd43GJhZH/tP2weN8TntQblOY+A0XbT8DJk1/oCPuOOyg/Ja757rG0CgHcWC8OfMA==} + + /@types/chai-subset@1.3.5: + resolution: {integrity: sha512-c2mPnw+xHtXDoHmdtcCXGwyLMiauiAyxWMzhGpqHC4nqI/Y5G2XhTampslK2rb59kpcuHon03UH8W6iYUzw88A==} + dependencies: + '@types/chai': 4.3.11 + dev: true + + /@types/chai@4.3.11: + resolution: {integrity: sha512-qQR1dr2rGIHYlJulmr8Ioq3De0Le9E4MJ5AiaeAETJJpndT1uUNHsGFK3L/UIu+rbkQSdj8J/w2bCsBZc/Y5fQ==} + dev: true + + /@types/estree@0.0.39: + resolution: {integrity: sha512-EYNwp3bU+98cpU4lAWYYL7Zz+2gryWH1qbdDTidVd6hkiR6weksdbMadyXKXNPEkQFhXM+hVO9ZygomHXp+AIw==} + dev: true + + /@types/estree@1.0.5: + resolution: {integrity: sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==} + dev: false + + /@types/http-cache-semantics@4.0.4: + resolution: {integrity: sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA==} + dev: true + + /@types/json-schema@7.0.15: + resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + dev: false + + /@types/node@20.9.3: + resolution: {integrity: sha512-nk5wXLAXGBKfrhLB0cyHGbSqopS+nz0BUgZkUQqSHSSgdee0kssp1IAqlQOu333bW+gMNs2QREx7iynm19Abxw==} + dependencies: + undici-types: 5.26.5 + + /@types/semver@7.5.6: + resolution: {integrity: sha512-dn1l8LaMea/IjDoHNd9J52uBbInB796CDffS6VdIxvqYCPSG0V0DzHp76GpaWnlhg88uYyPbXCDIowa86ybd5A==} + dev: false + + /@types/which@2.0.2: + resolution: {integrity: sha512-113D3mDkZDjo+EeUEHCFy0qniNc1ZpecGiAU7WSo7YDoSzolZIQKpYFHrPpjkB2nuyahcKfrmLXeQlh7gqJYdw==} + dev: true + + /@types/ws@8.5.10: + resolution: {integrity: sha512-vmQSUcfalpIq0R9q7uTo2lXs6eGIpt9wtnLdMv9LVpIjCA/+ufZRozlVoVelIYixx1ugCBKDhn89vnsEGOCx9A==} + dependencies: + '@types/node': 20.9.3 + dev: true + + /@types/yauzl@2.10.3: + resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==} + requiresBuild: true + dependencies: + '@types/node': 20.9.3 + dev: true + optional: true + + /@typescript-eslint/eslint-plugin@5.62.0(@typescript-eslint/parser@5.62.0)(eslint@8.54.0)(typescript@5.3.2): + resolution: {integrity: sha512-TiZzBSJja/LbhNPvk6yc0JrX9XqhQ0hdh6M2svYfsHGejaKFIAGd9MQ+ERIMzLGlN/kZoYIgdxFV0PuljTKXag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + '@typescript-eslint/parser': ^5.0.0 + eslint: ^6.0.0 || ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@eslint-community/regexpp': 4.10.0 + '@typescript-eslint/parser': 5.62.0(eslint@8.54.0)(typescript@5.3.2) + '@typescript-eslint/scope-manager': 5.62.0 + '@typescript-eslint/type-utils': 5.62.0(eslint@8.54.0)(typescript@5.3.2) + '@typescript-eslint/utils': 5.62.0(eslint@8.54.0)(typescript@5.3.2) + debug: 4.3.4 + eslint: 8.54.0 + graphemer: 1.4.0 + ignore: 5.3.0 + natural-compare-lite: 1.4.0 + semver: 7.5.4 + tsutils: 3.21.0(typescript@5.3.2) + typescript: 5.3.2 + transitivePeerDependencies: + - supports-color + dev: false + + /@typescript-eslint/parser@5.62.0(eslint@8.54.0)(typescript@5.3.2): + resolution: {integrity: sha512-VlJEV0fOQ7BExOsHYAGrgbEiZoi8D+Bl2+f6V2RrXerRSylnp+ZBHmPvaIa8cz0Ajx7WO7Z5RqfgYg7ED1nRhA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || ^8.0.0 + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@typescript-eslint/scope-manager': 5.62.0 + '@typescript-eslint/types': 5.62.0 + '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.3.2) + debug: 4.3.4 + eslint: 8.54.0 + typescript: 5.3.2 + transitivePeerDependencies: + - supports-color + dev: false + + /@typescript-eslint/scope-manager@5.62.0: + resolution: {integrity: sha512-VXuvVvZeQCQb5Zgf4HAxc04q5j+WrNAtNh9OwCsCgpKqESMTu3tF/jhZ3xG6T4NZwWl65Bg8KuS2uEvhSfLl0w==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + '@typescript-eslint/types': 5.62.0 + '@typescript-eslint/visitor-keys': 5.62.0 + dev: false + + /@typescript-eslint/type-utils@5.62.0(eslint@8.54.0)(typescript@5.3.2): + resolution: {integrity: sha512-xsSQreu+VnfbqQpW5vnCJdq1Z3Q0U31qiWmRhr98ONQmcp/yhiPJFPq8MXiJVLiksmOKSjIldZzkebzHuCGzew==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: '*' + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.3.2) + '@typescript-eslint/utils': 5.62.0(eslint@8.54.0)(typescript@5.3.2) + debug: 4.3.4 + eslint: 8.54.0 + tsutils: 3.21.0(typescript@5.3.2) + typescript: 5.3.2 + transitivePeerDependencies: + - supports-color + dev: false + + /@typescript-eslint/types@5.62.0: + resolution: {integrity: sha512-87NVngcbVXUahrRTqIK27gD2t5Cu1yuCXxbLcFtCzZGlfyVWWh8mLHkoxzjsB6DDNnvdL+fW8MiwPEJyGJQDgQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: false + + /@typescript-eslint/typescript-estree@5.62.0(typescript@5.3.2): + resolution: {integrity: sha512-CmcQ6uY7b9y694lKdRB8FEel7JbU/40iSAPomu++SjLMntB+2Leay2LO6i8VnJk58MtE9/nQSFIH6jpyRWyYzA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + typescript: '*' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@typescript-eslint/types': 5.62.0 + '@typescript-eslint/visitor-keys': 5.62.0 + debug: 4.3.4 + globby: 11.1.0 + is-glob: 4.0.3 + semver: 7.5.4 + tsutils: 3.21.0(typescript@5.3.2) + typescript: 5.3.2 + transitivePeerDependencies: + - supports-color + dev: false + + /@typescript-eslint/utils@5.62.0(eslint@8.54.0)(typescript@5.3.2): + resolution: {integrity: sha512-n8oxjeb5aIbPFEtmQxQYOLI0i9n5ySBEY/ZEHHZqKQSFnxio1rv6dthascc9dLuwrL0RC5mPCxB7vnAVGAYWAQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^6.0.0 || ^7.0.0 || ^8.0.0 + dependencies: + '@eslint-community/eslint-utils': 4.4.0(eslint@8.54.0) + '@types/json-schema': 7.0.15 + '@types/semver': 7.5.6 + '@typescript-eslint/scope-manager': 5.62.0 + '@typescript-eslint/types': 5.62.0 + '@typescript-eslint/typescript-estree': 5.62.0(typescript@5.3.2) + eslint: 8.54.0 + eslint-scope: 5.1.1 + semver: 7.5.4 + transitivePeerDependencies: + - supports-color + - typescript + dev: false + + /@typescript-eslint/visitor-keys@5.62.0: + resolution: {integrity: sha512-07ny+LHRzQXepkGg6w0mFY41fVUNBrL2Roj/++7V1txKugfjm/Ci/qSND03r2RhlJhJYMcTn9AhhSSqQp0Ysyw==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + '@typescript-eslint/types': 5.62.0 + eslint-visitor-keys: 3.4.3 + dev: false + + /@ungap/structured-clone@1.2.0: + resolution: {integrity: sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==} + dev: false + + /@vitest/browser@0.29.8(vitest@0.29.8): + resolution: {integrity: sha512-EEZV9GOOVrNaSE1eg41e3mUFzRVnfvemJl2aDIkVHqvFWuueMe9MFel+KipXyLUGTxmq40q++3wTkEX58rhVHA==} + peerDependencies: + vitest: '>=0.29.4' + dependencies: + '@vitest/runner': 0.29.8 + local-pkg: 0.4.3 + mlly: 1.4.2 + modern-node-polyfills: 0.1.0 + rollup-plugin-node-polyfills: 0.2.1 + sirv: 2.0.3 + vitest: 0.29.8(@vitest/browser@0.29.8)(webdriverio@8.23.4) + dev: true + + /@vitest/expect@0.29.8: + resolution: {integrity: sha512-xlcVXn5I5oTq6NiZSY3ykyWixBxr5mG8HYtjvpgg6KaqHm0mvhX18xuwl5YGxIRNt/A5jidd7CWcNHrSvgaQqQ==} + dependencies: + '@vitest/spy': 0.29.8 + '@vitest/utils': 0.29.8 + chai: 4.3.10 + dev: true + + /@vitest/runner@0.29.8: + resolution: {integrity: sha512-FzdhnRDwEr/A3Oo1jtIk/B952BBvP32n1ObMEb23oEJNO+qO5cBet6M2XWIDQmA7BDKGKvmhUf2naXyp/2JEwQ==} + dependencies: + '@vitest/utils': 0.29.8 + p-limit: 4.0.0 + pathe: 1.1.1 + dev: true + + /@vitest/spy@0.29.8: + resolution: {integrity: sha512-VdjBe9w34vOMl5I5mYEzNX8inTxrZ+tYUVk9jxaZJmHFwmDFC/GV3KBFTA/JKswr3XHvZL+FE/yq5EVhb6pSAw==} + dependencies: + tinyspy: 1.1.1 + dev: true + + /@vitest/utils@0.29.8: + resolution: {integrity: sha512-qGzuf3vrTbnoY+RjjVVIBYfuWMjn3UMUqyQtdGNZ6ZIIyte7B37exj6LaVkrZiUTvzSadVvO/tJm8AEgbGCBPg==} + dependencies: + cli-truncate: 3.1.0 + diff: 5.1.0 + loupe: 2.3.7 + pretty-format: 27.5.1 + dev: true + + /@wdio/config@8.23.1: + resolution: {integrity: sha512-MljMBvMr+QYoy4/FytFHWorFE3CrBdEWuroOaGzC/0gkVOcHRO4nOy2rKahdcPXJAuxFwJNqqHhBPj+4tWiz9w==} + engines: {node: ^16.13 || >=18} + dependencies: + '@wdio/logger': 8.16.17 + '@wdio/types': 8.23.1 + '@wdio/utils': 8.23.1 + decamelize: 6.0.0 + deepmerge-ts: 5.1.0 + glob: 10.3.10 + import-meta-resolve: 3.1.1 + transitivePeerDependencies: + - supports-color + dev: true + + /@wdio/logger@8.16.17: + resolution: {integrity: sha512-zeQ41z3T+b4IsrriZZipayXxLNDuGsm7TdExaviNGojPVrIsQUCSd/FvlLHM32b7ZrMyInHenu/zx1cjAZO71g==} + engines: {node: ^16.13 || >=18} + dependencies: + chalk: 5.3.0 + loglevel: 1.8.1 + loglevel-plugin-prefix: 0.8.4 + strip-ansi: 7.1.0 + dev: true + + /@wdio/protocols@8.23.0: + resolution: {integrity: sha512-2XTzD+lqQP3g8BWn+Bn5BTFzjHqzZNwq7DjlYrb27Bq8nOA+1DEcj3WzQ6V6CktTnKI/LAYKA1IFAF//Azrp/Q==} + dev: true + + /@wdio/repl@8.23.1: + resolution: {integrity: sha512-u6zG2cgBm67V5/WlQzadWqLGXs3moH8MOsgoljULQncelSBBZGZ5DyLB4p7jKcUAsKtMjgmFQmIvpQoqmyvdfg==} + engines: {node: ^16.13 || >=18} + dependencies: + '@types/node': 20.9.3 + dev: true + + /@wdio/types@8.23.1: + resolution: {integrity: sha512-ym3tWSUGvmKwQ9vNPQfcKvJwGNK/Fh3e5WloNj3zoaUTKgD0aJeFQ0+Dz6KGlNowA0j5VkcqTTXo+UZ3l4Cx9A==} + engines: {node: ^16.13 || >=18} + dependencies: + '@types/node': 20.9.3 + dev: true + + /@wdio/utils@8.23.1: + resolution: {integrity: sha512-VA47MOpt+7svHj3W9r+DUl3t73tJbjF7+ZXL0Lk7QLe79xevd+mPk+YmuTEepn+0MljJWAuqRCEKFG/HK77RNw==} + engines: {node: ^16.13 || >=18} + dependencies: + '@puppeteer/browsers': 1.8.0 + '@wdio/logger': 8.16.17 + '@wdio/types': 8.23.1 + decamelize: 6.0.0 + deepmerge-ts: 5.1.0 + edgedriver: 5.3.8 + geckodriver: 4.2.1 + get-port: 7.0.0 + got: 13.0.0 + import-meta-resolve: 3.1.1 + locate-app: 2.1.0 + safaridriver: 0.1.0 + split2: 4.2.0 + wait-port: 1.1.0 + transitivePeerDependencies: + - supports-color + dev: true + + /acorn-jsx@5.3.2(acorn@8.11.2): + resolution: {integrity: sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==} + peerDependencies: + acorn: ^6.0.0 || ^7.0.0 || ^8.0.0 + dependencies: + acorn: 8.11.2 + dev: false + + /acorn-walk@8.3.0: + resolution: {integrity: sha512-FS7hV565M5l1R08MXqo8odwMTB02C2UqzB17RVgu9EyuYFBqJZ3/ZY97sQD5FewVu1UyDFc1yztUDrAwT0EypA==} + engines: {node: '>=0.4.0'} + + /acorn@8.11.2: + resolution: {integrity: sha512-nc0Axzp/0FILLEVsm4fNwLCwMttvhEI263QtVPQcbpfZZ3ts0hLsZGOpE6czNlid7CJ9MlyH8reXkpsf3YUY4w==} + engines: {node: '>=0.4.0'} + hasBin: true + + /agent-base@7.1.0: + resolution: {integrity: sha512-o/zjMZRhJxny7OyEF+Op8X+efiELC7k7yOjMzgfzVqOzXqkBkWI79YoTdOtsuWd5BWhAGAuOY/Xa6xpiaWXiNg==} + engines: {node: '>= 14'} + dependencies: + debug: 4.3.4 + transitivePeerDependencies: + - supports-color + dev: true + + /ajv@6.12.6: + resolution: {integrity: sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==} + dependencies: + fast-deep-equal: 3.1.3 + fast-json-stable-stringify: 2.1.0 + json-schema-traverse: 0.4.1 + uri-js: 4.4.1 + dev: false + + /ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + /ansi-regex@6.0.1: + resolution: {integrity: sha512-n5M855fKb2SsfMIiFFoVrABHJC8QtHwVx+mHWP3QcEqBHYienj5dHSgjbxtC0WEZXYt4wcD6zrQElDPhFuZgfA==} + engines: {node: '>=12'} + dev: true + + /ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + dependencies: + color-convert: 2.0.1 + + /ansi-styles@5.2.0: + resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==} + engines: {node: '>=10'} + dev: true + + /ansi-styles@6.2.1: + resolution: {integrity: sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==} + engines: {node: '>=12'} + dev: true + + /any-promise@1.3.0: + resolution: {integrity: sha512-7UvmKalWRt1wgjL1RrGxoSJW/0QZFIegpeGvZG9kjp8vrRu55XTHbwnqq2GpXm9uLbcuhxm3IqX9OB4MZR1b2A==} + dev: true + + /anymatch@3.1.3: + resolution: {integrity: sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==} + engines: {node: '>= 8'} + dependencies: + normalize-path: 3.0.0 + picomatch: 2.3.1 + dev: true + + /archiver-utils@4.0.1: + resolution: {integrity: sha512-Q4Q99idbvzmgCTEAAhi32BkOyq8iVI5EwdO0PmBDSGIzzjYNdcFn7Q7k3OzbLy4kLUPXfJtG6fO2RjftXbobBg==} + engines: {node: '>= 12.0.0'} + dependencies: + glob: 8.1.0 + graceful-fs: 4.2.11 + lazystream: 1.0.1 + lodash: 4.17.21 + normalize-path: 3.0.0 + readable-stream: 3.6.2 + dev: true + + /archiver@6.0.1: + resolution: {integrity: sha512-CXGy4poOLBKptiZH//VlWdFuUC1RESbdZjGjILwBuZ73P7WkAUN0htfSfBq/7k6FRFlpu7bg4JOkj1vU9G6jcQ==} + engines: {node: '>= 12.0.0'} + dependencies: + archiver-utils: 4.0.1 + async: 3.2.5 + buffer-crc32: 0.2.13 + readable-stream: 3.6.2 + readdir-glob: 1.1.3 + tar-stream: 3.1.6 + zip-stream: 5.0.1 + dev: true + + /arg@4.1.3: + resolution: {integrity: sha512-58S9QDqG0Xx27YwPSt9fJxivjYl432YCwfDMfZ+71RAqUrZef7LrKQZ3LHLOwCS4FLNBplP533Zx895SeOCHvA==} + + /argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + dev: false + + /aria-query@5.3.0: + resolution: {integrity: sha512-b0P0sZPKtyu8HkeRAfCq0IfURZK+SuwMjY1UXGBU27wpAiTwQAIlq56IbIO+ytk/JjS1fMR14ee5WBBfKi5J6A==} + dependencies: + dequal: 2.0.3 + + /array-union@2.1.0: + resolution: {integrity: sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==} + engines: {node: '>=8'} + + /assertion-error@1.1.0: + resolution: {integrity: sha512-jgsaNduz+ndvGyFt3uSuWqvy4lCnIJiovtouQN5JZHOKCS2QuhEdbcQHFhVksz2N2U9hXJo8odG7ETyWlEeuDw==} + dev: true + + /ast-types@0.13.4: + resolution: {integrity: sha512-x1FCFnFifvYDDzTaLII71vG5uvDwgtmDTEVWAxrgeiR8VjMONcCXJx7E+USjDtHlwFmt9MysbqgF9b9Vjr6w+w==} + engines: {node: '>=4'} + dependencies: + tslib: 2.6.2 + dev: true + + /async@3.2.5: + resolution: {integrity: sha512-baNZyqaaLhyLVKm/DlvdW051MSgO6b8eVfIezl9E5PqWxFgzLm/wQntEW4zOytVburDEr0JlALEpdOFwvErLsg==} + dev: true + + /axobject-query@3.2.1: + resolution: {integrity: sha512-jsyHu61e6N4Vbz/v18DHwWYKK0bSWLqn47eeDSKPB7m8tqMHF9YJ+mhIk2lVteyZrY8tnSj/jHOv4YiTCuCJgg==} + dependencies: + dequal: 2.0.3 + dev: false + + /b4a@1.6.4: + resolution: {integrity: sha512-fpWrvyVHEKyeEvbKZTVOeZF3VSKKWtJxFIxX/jaVPf+cLbGUSitjb49pHLqPV2BUNNZ0LcoeEGfE/YCpyDYHIw==} + dev: true + + /balanced-match@1.0.2: + resolution: {integrity: sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==} + + /base64-js@1.5.1: + resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} + dev: true + + /basic-ftp@5.0.3: + resolution: {integrity: sha512-QHX8HLlncOLpy54mh+k/sWIFd0ThmRqwe9ZjELybGZK+tZ8rUb9VO0saKJUROTbE+KhzDUT7xziGpGrW8Kmd+g==} + engines: {node: '>=10.0.0'} + dev: true + + /big-integer@1.6.52: + resolution: {integrity: sha512-QxD8cf2eVqJOOz63z6JIN9BzvVs/dlySa5HGSBH5xtR8dPteIRQnBxxKqkNTiT6jbDTF6jAfrd4oMcND9RGbQg==} + engines: {node: '>=0.6'} + dev: true + + /binary-extensions@2.2.0: + resolution: {integrity: sha512-jDctJ/IVQbZoJykoeHbhXpOlNBqGNcwXJKJog42E5HDPUwQTSdjCHdihjj0DlnheQ7blbT6dHOafNAiS8ooQKA==} + engines: {node: '>=8'} + dev: true + + /binary@0.3.0: + resolution: {integrity: sha512-D4H1y5KYwpJgK8wk1Cue5LLPgmwHKYSChkbspQg5JtVuR5ulGckxfR62H3AE9UDkdMC8yyXlqYihuz3Aqg2XZg==} + dependencies: + buffers: 0.1.1 + chainsaw: 0.1.0 + dev: true + + /bluebird@3.4.7: + resolution: {integrity: sha512-iD3898SR7sWVRHbiQv+sHUtHnMvC1o3nW5rAcqnq3uOn07DSAppZYUkIGslDz6gXC7HfunPe7YVBgoEJASPcHA==} + dev: true + + /brace-expansion@1.1.11: + resolution: {integrity: sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==} + dependencies: + balanced-match: 1.0.2 + concat-map: 0.0.1 + + /brace-expansion@2.0.1: + resolution: {integrity: sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==} + dependencies: + balanced-match: 1.0.2 + dev: true + + /braces@3.0.2: + resolution: {integrity: sha512-b8um+L1RzM3WDSzvhm6gIz1yfTbBt6YTlcEKAvsmqCZZFw46z626lVj9j1yEPW33H5H+lBQpZMP1k8l+78Ha0A==} + engines: {node: '>=8'} + dependencies: + fill-range: 7.0.1 + + /buffer-crc32@0.2.13: + resolution: {integrity: sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==} + dev: true + + /buffer-indexof-polyfill@1.0.2: + resolution: {integrity: sha512-I7wzHwA3t1/lwXQh+A5PbNvJxgfo5r3xulgpYDB5zckTu/Z9oUK9biouBKQUjEqzaz3HnAT6TYoovmE+GqSf7A==} + engines: {node: '>=0.10'} + dev: true + + /buffer@5.7.1: + resolution: {integrity: sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==} + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + dev: true + + /buffers@0.1.1: + resolution: {integrity: sha512-9q/rDEGSb/Qsvv2qvzIzdluL5k7AaJOTrw23z9reQthrbF7is4CtlT0DXyO1oei2DCp4uojjzQ7igaSHp1kAEQ==} + engines: {node: '>=0.2.0'} + dev: true + + /bundle-require@4.0.2(esbuild@0.17.19): + resolution: {integrity: sha512-jwzPOChofl67PSTW2SGubV9HBQAhhR2i6nskiOThauo9dzwDUgOWQScFVaJkjEfYX+UXiD+LEx8EblQMc2wIag==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + peerDependencies: + esbuild: '>=0.17' + dependencies: + esbuild: 0.17.19 + load-tsconfig: 0.2.5 + dev: true + + /cac@6.7.14: + resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} + engines: {node: '>=8'} + dev: true + + /cacheable-lookup@7.0.0: + resolution: {integrity: sha512-+qJyx4xiKra8mZrcwhjMRMUhD5NR1R8esPkzIYxX96JiecFoxAXFuz/GpR3+ev4PE1WamHip78wV0vcmPQtp8w==} + engines: {node: '>=14.16'} + dev: true + + /cacheable-request@10.2.14: + resolution: {integrity: sha512-zkDT5WAF4hSSoUgyfg5tFIxz8XQK+25W/TLVojJTMKBaxevLBBtLxgqguAuVQB8PVW79FVjHcU+GJ9tVbDZ9mQ==} + engines: {node: '>=14.16'} + dependencies: + '@types/http-cache-semantics': 4.0.4 + get-stream: 6.0.1 + http-cache-semantics: 4.1.1 + keyv: 4.5.4 + mimic-response: 4.0.0 + normalize-url: 8.0.0 + responselike: 3.0.0 + dev: true + + /callsites@3.1.0: + resolution: {integrity: sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==} + engines: {node: '>=6'} + dev: false + + /chai@4.3.10: + resolution: {integrity: sha512-0UXG04VuVbruMUYbJ6JctvH0YnC/4q3/AkT18q4NaITo91CUm0liMS9VqzT9vZhVQ/1eqPanMWjBM+Juhfb/9g==} + engines: {node: '>=4'} + dependencies: + assertion-error: 1.1.0 + check-error: 1.0.3 + deep-eql: 4.1.3 + get-func-name: 2.0.2 + loupe: 2.3.7 + pathval: 1.1.1 + type-detect: 4.0.8 + dev: true + + /chainsaw@0.1.0: + resolution: {integrity: sha512-75kWfWt6MEKNC8xYXIdRpDehRYY/tNSgwKaJq+dbbDcxORuVrrQ+SEHoWsniVn9XPYfP4gmdWIeDk/4YNp1rNQ==} + dependencies: + traverse: 0.3.9 + dev: true + + /chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + /chalk@5.3.0: + resolution: {integrity: sha512-dLitG79d+GV1Nb/VYcCDFivJeK1hiukt9QjRNVOsUtTy1rR1YJsmpGGTZ3qJos+uw7WmWF4wUwBd9jxjocFC2w==} + engines: {node: ^12.17.0 || ^14.13 || >=16.0.0} + dev: true + + /check-error@1.0.3: + resolution: {integrity: sha512-iKEoDYaRmd1mxM90a2OEfWhjsjPpYPuQ+lMYsoxB126+t8fw7ySEO48nmDg5COTjxDI65/Y2OWpeEHk3ZOe8zg==} + dependencies: + get-func-name: 2.0.2 + dev: true + + /chokidar@3.5.3: + resolution: {integrity: sha512-Dr3sfKRP6oTcjf2JmUmFJfeVMvXBdegxB0iVQ5eb2V10uFJUCAS8OByZdVAyVb8xXNz3GjjTgj9kLWsZTqE6kw==} + engines: {node: '>= 8.10.0'} + dependencies: + anymatch: 3.1.3 + braces: 3.0.2 + glob-parent: 5.1.2 + is-binary-path: 2.1.0 + is-glob: 4.0.3 + normalize-path: 3.0.0 + readdirp: 3.6.0 + optionalDependencies: + fsevents: 2.3.3 + dev: true + + /chromium-bidi@0.4.16(devtools-protocol@0.0.1147663): + resolution: {integrity: sha512-7ZbXdWERxRxSwo3txsBjjmc/NLxqb1Bk30mRb0BMS4YIaiV6zvKZqL/UAH+DdqcDYayDWk2n/y8klkBDODrPvA==} + peerDependencies: + devtools-protocol: '*' + dependencies: + devtools-protocol: 0.0.1147663 + mitt: 3.0.0 + dev: true + + /cli-truncate@3.1.0: + resolution: {integrity: sha512-wfOBkjXteqSnI59oPcJkcPl/ZmwvMMOj340qUIY1SKZCv0B9Cf4D4fAucRkIKQmsIuYK3x1rrgU7MeGRruiuiA==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + slice-ansi: 5.0.0 + string-width: 5.1.2 + dev: true + + /cliui@8.0.1: + resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==} + engines: {node: '>=12'} + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + dev: true + + /code-red@1.0.4: + resolution: {integrity: sha512-7qJWqItLA8/VPVlKJlFXU+NBlo/qyfs39aJcuMT/2ere32ZqvF5OSxgdM5xOfJJ7O429gg2HM47y8v9P+9wrNw==} + dependencies: + '@jridgewell/sourcemap-codec': 1.4.15 + '@types/estree': 1.0.5 + acorn: 8.11.2 + estree-walker: 3.0.3 + periscopic: 3.1.0 + dev: false + + /color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + dependencies: + color-name: 1.1.4 + + /color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + /commander@4.1.1: + resolution: {integrity: sha512-NOKm8xhkzAjzFx8B2v5OAHT+u5pRQc2UCa2Vq9jYL/31o2wi9mxBA7LIFs3sV5VSC49z6pEhfbMULvShKj26WA==} + engines: {node: '>= 6'} + dev: true + + /commander@9.5.0: + resolution: {integrity: sha512-KRs7WVDKg86PWiuAqhDrAQnTXZKraVcCc6vFdL14qrZ/DcWwuRo7VoiYXalXO7S5GKpqYiVEwCbgFDfxNHKJBQ==} + engines: {node: ^12.20.0 || >=14} + dev: true + + /compress-commons@5.0.1: + resolution: {integrity: sha512-MPh//1cERdLtqwO3pOFLeXtpuai0Y2WCd5AhtKxznqM7WtaMYaOEMSgn45d9D10sIHSfIKE603HlOp8OPGrvag==} + engines: {node: '>= 12.0.0'} + dependencies: + crc-32: 1.2.2 + crc32-stream: 5.0.0 + normalize-path: 3.0.0 + readable-stream: 3.6.2 + dev: true + + /concat-map@0.0.1: + resolution: {integrity: sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==} + + /core-util-is@1.0.3: + resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} + dev: true + + /crc-32@1.2.2: + resolution: {integrity: sha512-ROmzCKrTnOwybPcJApAA6WBWij23HVfGVNKqqrZpuyZOHqK2CwHSvpGuyt/UNNvaIjEd8X5IFGp4Mh+Ie1IHJQ==} + engines: {node: '>=0.8'} + hasBin: true + dev: true + + /crc32-stream@5.0.0: + resolution: {integrity: sha512-B0EPa1UK+qnpBZpG+7FgPCu0J2ETLpXq09o9BkLkEAhdB6Z61Qo4pJ3JYu0c+Qi+/SAL7QThqnzS06pmSSyZaw==} + engines: {node: '>= 12.0.0'} + dependencies: + crc-32: 1.2.2 + readable-stream: 3.6.2 + dev: true + + /create-require@1.1.1: + resolution: {integrity: sha512-dcKFX3jn0MpIaXjisoRvexIJVEKzaq7z2rZKxf+MSr9TkdmHmsU4m2lcLojrj/FHl8mk5VxMmYA+ftRkP/3oKQ==} + + /cross-fetch@4.0.0: + resolution: {integrity: sha512-e4a5N8lVvuLgAWgnCrLr2PP0YyDOTHa9H/Rj54dirp61qXnNq46m82bRhNqIA5VccJtWBvPTFRV3TtvHUKPB1g==} + dependencies: + node-fetch: 2.7.0 + transitivePeerDependencies: + - encoding + dev: true + + /cross-spawn@7.0.3: + resolution: {integrity: sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==} + engines: {node: '>= 8'} + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + /css-shorthand-properties@1.1.1: + resolution: {integrity: sha512-Md+Juc7M3uOdbAFwOYlTrccIZ7oCFuzrhKYQjdeUEW/sE1hv17Jp/Bws+ReOPpGVBTYCBoYo+G17V5Qo8QQ75A==} + dev: true + + /css-tree@2.3.1: + resolution: {integrity: sha512-6Fv1DV/TYw//QF5IzQdqsNDjx/wc8TrMBZsqjL9eW01tWb7R7k/mq+/VXfJCl7SoD5emsJop9cOByJZfs8hYIw==} + engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0} + dependencies: + mdn-data: 2.0.30 + source-map-js: 1.0.2 + dev: false + + /css-value@0.0.1: + resolution: {integrity: sha512-FUV3xaJ63buRLgHrLQVlVgQnQdR4yqdLGaDu7g8CQcWjInDfM9plBTPI9FRfpahju1UBSaMckeb2/46ApS/V1Q==} + dev: true + + /cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + dev: false + + /data-uri-to-buffer@4.0.1: + resolution: {integrity: sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==} + engines: {node: '>= 12'} + dev: true + + /data-uri-to-buffer@6.0.1: + resolution: {integrity: sha512-MZd3VlchQkp8rdend6vrx7MmVDJzSNTBvghvKjirLkD+WTChA3KUf0jkE68Q4UyctNqI11zZO9/x2Yx+ub5Cvg==} + engines: {node: '>= 14'} + dev: true + + /debug@4.3.4: + resolution: {integrity: sha512-PRWFHuSU3eDtQJPvnNY7Jcket1j0t5OuOsFzPPzsekD52Zl8qUfFIPEiswXqIvHWGVHOgX+7G/vCNNhehwxfkQ==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + dependencies: + ms: 2.1.2 + + /decamelize@6.0.0: + resolution: {integrity: sha512-Fv96DCsdOgB6mdGl67MT5JaTNKRzrzill5OH5s8bjYJXVlcXyPYGyPsUkWyGV5p1TXI5esYIYMMeDJL0hEIwaA==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dev: true + + /decompress-response@6.0.0: + resolution: {integrity: sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==} + engines: {node: '>=10'} + dependencies: + mimic-response: 3.1.0 + dev: true + + /deep-eql@4.1.3: + resolution: {integrity: sha512-WaEtAOpRA1MQ0eohqZjpGD8zdI0Ovsm8mmFhaDN8dvDZzyoUMcYDnf5Y6iu7HTXxf8JDS23qWa4a+hKCDyOPzw==} + engines: {node: '>=6'} + dependencies: + type-detect: 4.0.8 + dev: true + + /deep-is@0.1.4: + resolution: {integrity: sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==} + dev: false + + /deepmerge-ts@5.1.0: + resolution: {integrity: sha512-eS8dRJOckyo9maw9Tu5O5RUi/4inFLrnoLkBe3cPfDMx3WZioXtmOew4TXQaxq7Rhl4xjDtR7c6x8nNTxOvbFw==} + engines: {node: '>=16.0.0'} + dev: true + + /defer-to-connect@2.0.1: + resolution: {integrity: sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==} + engines: {node: '>=10'} + dev: true + + /degenerator@5.0.1: + resolution: {integrity: sha512-TllpMR/t0M5sqCXfj85i4XaAzxmS5tVA16dqvdkMwGmzI+dXLXnw3J+3Vdv7VKw+ThlTMboK6i9rnZ6Nntj5CQ==} + engines: {node: '>= 14'} + dependencies: + ast-types: 0.13.4 + escodegen: 2.1.0 + esprima: 4.0.1 + dev: true + + /dequal@2.0.3: + resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} + engines: {node: '>=6'} + + /devtools-protocol@0.0.1147663: + resolution: {integrity: sha512-hyWmRrexdhbZ1tcJUGpO95ivbRhWXz++F4Ko+n21AY5PNln2ovoJw+8ZMNDTtip+CNFQfrtLVh/w4009dXO/eQ==} + dev: true + + /devtools-protocol@0.0.1213968: + resolution: {integrity: sha512-o4n/beY+3CcZwFctYapjGelKptR4AuQT5gXS1Kvgbig+ArwkxK7f8wDVuD1wsoswiJWCwV6OK+Qb7vhNzNmABQ==} + dev: true + + /diff@4.0.2: + resolution: {integrity: sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==} + engines: {node: '>=0.3.1'} + + /diff@5.1.0: + resolution: {integrity: sha512-D+mk+qE8VC/PAUrlAU34N+VfXev0ghe5ywmpqrawphmVZc1bEfn56uo9qpyGp1p4xpzOHkSW4ztBd6L7Xx4ACw==} + engines: {node: '>=0.3.1'} + dev: true + + /dir-glob@3.0.1: + resolution: {integrity: sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==} + engines: {node: '>=8'} + dependencies: + path-type: 4.0.0 + + /doctrine@3.0.0: + resolution: {integrity: sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==} + engines: {node: '>=6.0.0'} + dependencies: + esutils: 2.0.3 + dev: false + + /duplexer2@0.1.4: + resolution: {integrity: sha512-asLFVfWWtJ90ZyOUHMqk7/S2w2guQKxUI2itj3d92ADHhxUSbCMGi1f1cBcJ7xM1To+pE/Khbwo1yuNbMEPKeA==} + dependencies: + readable-stream: 2.3.8 + dev: true + + /eastasianwidth@0.2.0: + resolution: {integrity: sha512-I88TYZWc9XiYHRQ4/3c5rjjfgkjhLyW2luGIheGERbNQ6OY7yTybanSpDXZa8y7VUP9YmDcYa+eyq4ca7iLqWA==} + dev: true + + /edge-paths@3.0.5: + resolution: {integrity: sha512-sB7vSrDnFa4ezWQk9nZ/n0FdpdUuC6R1EOrlU3DL+bovcNFK28rqu2emmAUjujYEJTWIgQGqgVVWUZXMnc8iWg==} + engines: {node: '>=14.0.0'} + dependencies: + '@types/which': 2.0.2 + which: 2.0.2 + dev: true + + /edgedriver@5.3.8: + resolution: {integrity: sha512-FWLPDuwJDeGGgtmlqTXb4lQi/HV9yylLo1F9O1g9TLqSemA5T6xH28seUIfyleVirLFtDQyKNUxKsMhMT4IfnA==} + hasBin: true + requiresBuild: true + dependencies: + '@wdio/logger': 8.16.17 + decamelize: 6.0.0 + edge-paths: 3.0.5 + node-fetch: 3.3.2 + unzipper: 0.10.14 + which: 4.0.0 + dev: true + + /emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + dev: true + + /emoji-regex@9.2.2: + resolution: {integrity: sha512-L18DaJsXSUk2+42pv8mLs5jJT2hqFkFE4j21wOmgbUqsZ2hL72NsUU785g9RXgo3s0ZNgVl42TiHp3ZtOv/Vyg==} + dev: true + + /end-of-stream@1.4.4: + resolution: {integrity: sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==} + dependencies: + once: 1.4.0 + dev: true + + /esbuild-android-64@0.14.54: + resolution: {integrity: sha512-Tz2++Aqqz0rJ7kYBfz+iqyE3QMycD4vk7LBRyWaAVFgFtQ/O8EJOnVmTOiDWYZ/uYzB4kvP+bqejYdVKzE5lAQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + requiresBuild: true + dev: true + optional: true + + /esbuild-android-arm64@0.14.54: + resolution: {integrity: sha512-F9E+/QDi9sSkLaClO8SOV6etqPd+5DgJje1F9lOWoNncDdOBL2YF59IhsWATSt0TLZbYCf3pNlTHvVV5VfHdvg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + requiresBuild: true + dev: true + optional: true + + /esbuild-darwin-64@0.14.54: + resolution: {integrity: sha512-jtdKWV3nBviOd5v4hOpkVmpxsBy90CGzebpbO9beiqUYVMBtSc0AL9zGftFuBon7PNDcdvNCEuQqw2x0wP9yug==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /esbuild-darwin-arm64@0.14.54: + resolution: {integrity: sha512-OPafJHD2oUPyvJMrsCvDGkRrVCar5aVyHfWGQzY1dWnzErjrDuSETxwA2HSsyg2jORLY8yBfzc1MIpUkXlctmw==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + requiresBuild: true + dev: true + optional: true + + /esbuild-freebsd-64@0.14.54: + resolution: {integrity: sha512-OKwd4gmwHqOTp4mOGZKe/XUlbDJ4Q9TjX0hMPIDBUWWu/kwhBAudJdBoxnjNf9ocIB6GN6CPowYpR/hRCbSYAg==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + requiresBuild: true + dev: true + optional: true + + /esbuild-freebsd-arm64@0.14.54: + resolution: {integrity: sha512-sFwueGr7OvIFiQT6WeG0jRLjkjdqWWSrfbVwZp8iMP+8UHEHRBvlaxL6IuKNDwAozNUmbb8nIMXa7oAOARGs1Q==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + requiresBuild: true + dev: true + optional: true + + /esbuild-linux-32@0.14.54: + resolution: {integrity: sha512-1ZuY+JDI//WmklKlBgJnglpUL1owm2OX+8E1syCD6UAxcMM/XoWd76OHSjl/0MR0LisSAXDqgjT3uJqT67O3qw==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /esbuild-linux-64@0.14.54: + resolution: {integrity: sha512-EgjAgH5HwTbtNsTqQOXWApBaPVdDn7XcK+/PtJwZLT1UmpLoznPd8c5CxqsH2dQK3j05YsB3L17T8vE7cp4cCg==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /esbuild-linux-arm64@0.14.54: + resolution: {integrity: sha512-WL71L+0Rwv+Gv/HTmxTEmpv0UgmxYa5ftZILVi2QmZBgX3q7+tDeOQNqGtdXSdsL8TQi1vIaVFHUPDe0O0kdig==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /esbuild-linux-arm@0.14.54: + resolution: {integrity: sha512-qqz/SjemQhVMTnvcLGoLOdFpCYbz4v4fUo+TfsWG+1aOu70/80RV6bgNpR2JCrppV2moUQkww+6bWxXRL9YMGw==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /esbuild-linux-mips64le@0.14.54: + resolution: {integrity: sha512-qTHGQB8D1etd0u1+sB6p0ikLKRVuCWhYQhAHRPkO+OF3I/iSlTKNNS0Lh2Oc0g0UFGguaFZZiPJdJey3AGpAlw==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /esbuild-linux-ppc64le@0.14.54: + resolution: {integrity: sha512-j3OMlzHiqwZBDPRCDFKcx595XVfOfOnv68Ax3U4UKZ3MTYQB5Yz3X1mn5GnodEVYzhtZgxEBidLWeIs8FDSfrQ==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /esbuild-linux-riscv64@0.14.54: + resolution: {integrity: sha512-y7Vt7Wl9dkOGZjxQZnDAqqn+XOqFD7IMWiewY5SPlNlzMX39ocPQlOaoxvT4FllA5viyV26/QzHtvTjVNOxHZg==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /esbuild-linux-s390x@0.14.54: + resolution: {integrity: sha512-zaHpW9dziAsi7lRcyV4r8dhfG1qBidQWUXweUjnw+lliChJqQr+6XD71K41oEIC3Mx1KStovEmlzm+MkGZHnHA==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + requiresBuild: true + dev: true + optional: true + + /esbuild-netbsd-64@0.14.54: + resolution: {integrity: sha512-PR01lmIMnfJTgeU9VJTDY9ZerDWVFIUzAtJuDHwwceppW7cQWjBBqP48NdeRtoP04/AtO9a7w3viI+PIDr6d+w==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + requiresBuild: true + dev: true + optional: true + + /esbuild-openbsd-64@0.14.54: + resolution: {integrity: sha512-Qyk7ikT2o7Wu76UsvvDS5q0amJvmRzDyVlL0qf5VLsLchjCa1+IAvd8kTBgUxD7VBUUVgItLkk609ZHUc1oCaw==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + requiresBuild: true + dev: true + optional: true + + /esbuild-sunos-64@0.14.54: + resolution: {integrity: sha512-28GZ24KmMSeKi5ueWzMcco6EBHStL3B6ubM7M51RmPwXQGLe0teBGJocmWhgwccA1GeFXqxzILIxXpHbl9Q/Kw==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + requiresBuild: true + dev: true + optional: true + + /esbuild-windows-32@0.14.54: + resolution: {integrity: sha512-T+rdZW19ql9MjS7pixmZYVObd9G7kcaZo+sETqNH4RCkuuYSuv9AGHUVnPoP9hhuE1WM1ZimHz1CIBHBboLU7w==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /esbuild-windows-64@0.14.54: + resolution: {integrity: sha512-AoHTRBUuYwXtZhjXZbA1pGfTo8cJo3vZIcWGLiUcTNgHpJJMC1rVA44ZereBHMJtotyN71S8Qw0npiCIkW96cQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /esbuild-windows-arm64@0.14.54: + resolution: {integrity: sha512-M0kuUvXhot1zOISQGXwWn6YtS+Y/1RT9WrVIOywZnJHo3jCDyewAc79aKNQWFCQm+xNHVTq9h8dZKvygoXQQRg==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + requiresBuild: true + dev: true + optional: true + + /esbuild@0.14.54: + resolution: {integrity: sha512-Cy9llcy8DvET5uznocPyqL3BFRrFXSVqbgpMJ9Wz8oVjZlh/zUSNbPRbov0VX7VxN2JH1Oa0uNxZ7eLRb62pJA==} + engines: {node: '>=12'} + hasBin: true + requiresBuild: true + optionalDependencies: + '@esbuild/linux-loong64': 0.14.54 + esbuild-android-64: 0.14.54 + esbuild-android-arm64: 0.14.54 + esbuild-darwin-64: 0.14.54 + esbuild-darwin-arm64: 0.14.54 + esbuild-freebsd-64: 0.14.54 + esbuild-freebsd-arm64: 0.14.54 + esbuild-linux-32: 0.14.54 + esbuild-linux-64: 0.14.54 + esbuild-linux-arm: 0.14.54 + esbuild-linux-arm64: 0.14.54 + esbuild-linux-mips64le: 0.14.54 + esbuild-linux-ppc64le: 0.14.54 + esbuild-linux-riscv64: 0.14.54 + esbuild-linux-s390x: 0.14.54 + esbuild-netbsd-64: 0.14.54 + esbuild-openbsd-64: 0.14.54 + esbuild-sunos-64: 0.14.54 + esbuild-windows-32: 0.14.54 + esbuild-windows-64: 0.14.54 + esbuild-windows-arm64: 0.14.54 + dev: true + + /esbuild@0.16.17: + resolution: {integrity: sha512-G8LEkV0XzDMNwXKgM0Jwu3nY3lSTwSGY6XbxM9cr9+s0T/qSV1q1JVPBGzm3dcjhCic9+emZDmMffkwgPeOeLg==} + engines: {node: '>=12'} + hasBin: true + requiresBuild: true + optionalDependencies: + '@esbuild/android-arm': 0.16.17 + '@esbuild/android-arm64': 0.16.17 + '@esbuild/android-x64': 0.16.17 + '@esbuild/darwin-arm64': 0.16.17 + '@esbuild/darwin-x64': 0.16.17 + '@esbuild/freebsd-arm64': 0.16.17 + '@esbuild/freebsd-x64': 0.16.17 + '@esbuild/linux-arm': 0.16.17 + '@esbuild/linux-arm64': 0.16.17 + '@esbuild/linux-ia32': 0.16.17 + '@esbuild/linux-loong64': 0.16.17 + '@esbuild/linux-mips64el': 0.16.17 + '@esbuild/linux-ppc64': 0.16.17 + '@esbuild/linux-riscv64': 0.16.17 + '@esbuild/linux-s390x': 0.16.17 + '@esbuild/linux-x64': 0.16.17 + '@esbuild/netbsd-x64': 0.16.17 + '@esbuild/openbsd-x64': 0.16.17 + '@esbuild/sunos-x64': 0.16.17 + '@esbuild/win32-arm64': 0.16.17 + '@esbuild/win32-ia32': 0.16.17 + '@esbuild/win32-x64': 0.16.17 + + /esbuild@0.17.19: + resolution: {integrity: sha512-XQ0jAPFkK/u3LcVRcvVHQcTIqD6E2H1fvZMA5dQPSOWb3suUbWbfbRf94pjc0bNzRYLfIrDRQXr7X+LHIm5oHw==} + engines: {node: '>=12'} + hasBin: true + requiresBuild: true + optionalDependencies: + '@esbuild/android-arm': 0.17.19 + '@esbuild/android-arm64': 0.17.19 + '@esbuild/android-x64': 0.17.19 + '@esbuild/darwin-arm64': 0.17.19 + '@esbuild/darwin-x64': 0.17.19 + '@esbuild/freebsd-arm64': 0.17.19 + '@esbuild/freebsd-x64': 0.17.19 + '@esbuild/linux-arm': 0.17.19 + '@esbuild/linux-arm64': 0.17.19 + '@esbuild/linux-ia32': 0.17.19 + '@esbuild/linux-loong64': 0.17.19 + '@esbuild/linux-mips64el': 0.17.19 + '@esbuild/linux-ppc64': 0.17.19 + '@esbuild/linux-riscv64': 0.17.19 + '@esbuild/linux-s390x': 0.17.19 + '@esbuild/linux-x64': 0.17.19 + '@esbuild/netbsd-x64': 0.17.19 + '@esbuild/openbsd-x64': 0.17.19 + '@esbuild/sunos-x64': 0.17.19 + '@esbuild/win32-arm64': 0.17.19 + '@esbuild/win32-ia32': 0.17.19 + '@esbuild/win32-x64': 0.17.19 + dev: true + + /escalade@3.1.1: + resolution: {integrity: sha512-k0er2gUkLf8O0zKJiAhmkTnJlTvINGv7ygDNPbeIsX/TJjGJZHuh9B2UxbsaEkmlEo9MfhrSzmhIlhRlI2GXnw==} + engines: {node: '>=6'} + dev: true + + /escape-string-regexp@4.0.0: + resolution: {integrity: sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==} + engines: {node: '>=10'} + dev: false + + /escodegen@2.1.0: + resolution: {integrity: sha512-2NlIDTwUWJN0mRPQOdtQBzbUHvdGY2P1VXSyU83Q3xKxM7WHX2Ql8dKq782Q9TgQUNOLEzEYu9bzLNj1q88I5w==} + engines: {node: '>=6.0'} + hasBin: true + dependencies: + esprima: 4.0.1 + estraverse: 5.3.0 + esutils: 2.0.3 + optionalDependencies: + source-map: 0.6.1 + dev: true + + /eslint-compat-utils@0.1.2(eslint@8.54.0): + resolution: {integrity: sha512-Jia4JDldWnFNIru1Ehx1H5s9/yxiRHY/TimCuUc0jNexew3cF1gI6CYZil1ociakfWO3rRqFjl1mskBblB3RYg==} + engines: {node: '>=12'} + peerDependencies: + eslint: '>=6.0.0' + dependencies: + eslint: 8.54.0 + dev: false + + /eslint-config-prettier@9.0.0(eslint@8.54.0): + resolution: {integrity: sha512-IcJsTkJae2S35pRsRAwoCE+925rJJStOdkKnLVgtE+tEpqU0EVVM7OqrwxqgptKdX29NUwC82I5pXsGFIgSevw==} + hasBin: true + peerDependencies: + eslint: '>=7.0.0' + dependencies: + eslint: 8.54.0 + dev: false + + /eslint-plugin-prettier@4.2.1(eslint-config-prettier@9.0.0)(eslint@8.54.0)(prettier@3.1.0): + resolution: {integrity: sha512-f/0rXLXUt0oFYs8ra4w49wYZBG5GKZpAYsJSm6rnYL5uVDjd+zowwMwVZHnAjf4edNrKpCDYfXDgmRE/Ak7QyQ==} + engines: {node: '>=12.0.0'} + peerDependencies: + eslint: '>=7.28.0' + eslint-config-prettier: '*' + prettier: '>=2.0.0' + peerDependenciesMeta: + eslint-config-prettier: + optional: true + dependencies: + eslint: 8.54.0 + eslint-config-prettier: 9.0.0(eslint@8.54.0) + prettier: 3.1.0 + prettier-linter-helpers: 1.0.0 + dev: false + + /eslint-plugin-svelte@2.35.1(eslint@8.54.0)(svelte@4.2.7)(ts-node@10.9.1): + resolution: {integrity: sha512-IF8TpLnROSGy98Z3NrsKXWDSCbNY2ReHDcrYTuXZMbfX7VmESISR78TWgO9zdg4Dht1X8coub5jKwHzP0ExRug==} + engines: {node: ^14.17.0 || >=16.0.0} + peerDependencies: + eslint: ^7.0.0 || ^8.0.0-0 + svelte: ^3.37.0 || ^4.0.0 + peerDependenciesMeta: + svelte: + optional: true + dependencies: + '@eslint-community/eslint-utils': 4.4.0(eslint@8.54.0) + '@jridgewell/sourcemap-codec': 1.4.15 + debug: 4.3.4 + eslint: 8.54.0 + eslint-compat-utils: 0.1.2(eslint@8.54.0) + esutils: 2.0.3 + known-css-properties: 0.29.0 + postcss: 8.4.31 + postcss-load-config: 3.1.4(postcss@8.4.31)(ts-node@10.9.1) + postcss-safe-parser: 6.0.0(postcss@8.4.31) + postcss-selector-parser: 6.0.13 + semver: 7.5.4 + svelte: 4.2.7 + svelte-eslint-parser: 0.33.1(svelte@4.2.7) + transitivePeerDependencies: + - supports-color + - ts-node + dev: false + + /eslint-scope@5.1.1: + resolution: {integrity: sha512-2NxwbF/hZ0KpepYN0cNbo+FN6XoK7GaHlQhgx/hIZl6Va0bF45RQOOwhLIy8lQDbuCiadSLCBnH2CFYquit5bw==} + engines: {node: '>=8.0.0'} + dependencies: + esrecurse: 4.3.0 + estraverse: 4.3.0 + dev: false + + /eslint-scope@7.2.2: + resolution: {integrity: sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + esrecurse: 4.3.0 + estraverse: 5.3.0 + dev: false + + /eslint-visitor-keys@3.4.3: + resolution: {integrity: sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dev: false + + /eslint@8.54.0: + resolution: {integrity: sha512-NY0DfAkM8BIZDVl6PgSa1ttZbx3xHgJzSNJKYcQglem6CppHyMhRIQkBVSSMaSRnLhig3jsDbEzOjwCVt4AmmA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + hasBin: true + dependencies: + '@eslint-community/eslint-utils': 4.4.0(eslint@8.54.0) + '@eslint-community/regexpp': 4.10.0 + '@eslint/eslintrc': 2.1.3 + '@eslint/js': 8.54.0 + '@humanwhocodes/config-array': 0.11.13 + '@humanwhocodes/module-importer': 1.0.1 + '@nodelib/fs.walk': 1.2.8 + '@ungap/structured-clone': 1.2.0 + ajv: 6.12.6 + chalk: 4.1.2 + cross-spawn: 7.0.3 + debug: 4.3.4 + doctrine: 3.0.0 + escape-string-regexp: 4.0.0 + eslint-scope: 7.2.2 + eslint-visitor-keys: 3.4.3 + espree: 9.6.1 + esquery: 1.5.0 + esutils: 2.0.3 + fast-deep-equal: 3.1.3 + file-entry-cache: 6.0.1 + find-up: 5.0.0 + glob-parent: 6.0.2 + globals: 13.23.0 + graphemer: 1.4.0 + ignore: 5.3.0 + imurmurhash: 0.1.4 + is-glob: 4.0.3 + is-path-inside: 3.0.3 + js-yaml: 4.1.0 + json-stable-stringify-without-jsonify: 1.0.1 + levn: 0.4.1 + lodash.merge: 4.6.2 + minimatch: 3.1.2 + natural-compare: 1.4.0 + optionator: 0.9.3 + strip-ansi: 6.0.1 + text-table: 0.2.0 + transitivePeerDependencies: + - supports-color + dev: false + + /espree@9.6.1: + resolution: {integrity: sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + dependencies: + acorn: 8.11.2 + acorn-jsx: 5.3.2(acorn@8.11.2) + eslint-visitor-keys: 3.4.3 + dev: false + + /esprima@4.0.1: + resolution: {integrity: sha512-eGuFFw7Upda+g4p+QHvnW0RyTX/SVeJBDM/gCtMARO0cLuT2HcEKnTPvhjV6aGeqrCB/sbNop0Kszm0jsaWU4A==} + engines: {node: '>=4'} + hasBin: true + dev: true + + /esquery@1.5.0: + resolution: {integrity: sha512-YQLXUplAwJgCydQ78IMJywZCceoqk1oH01OERdSAJc/7U2AylwjhSCLDEtqwg811idIS/9fIU5GjG73IgjKMVg==} + engines: {node: '>=0.10'} + dependencies: + estraverse: 5.3.0 + dev: false + + /esrecurse@4.3.0: + resolution: {integrity: sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==} + engines: {node: '>=4.0'} + dependencies: + estraverse: 5.3.0 + dev: false + + /estraverse@4.3.0: + resolution: {integrity: sha512-39nnKffWz8xN1BU/2c79n9nB9HDzo0niYUqx6xyqUnyoAnQyyWpOTdZEeiCch8BBu515t4wp9ZmgVfVhn9EBpw==} + engines: {node: '>=4.0'} + dev: false + + /estraverse@5.3.0: + resolution: {integrity: sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==} + engines: {node: '>=4.0'} + + /estree-walker@0.6.1: + resolution: {integrity: sha512-SqmZANLWS0mnatqbSfRP5g8OXZC12Fgg1IwNtLsyHDzJizORW4khDfjPqJZsemPWBB2uqykUah5YpQ6epsqC/w==} + dev: true + + /estree-walker@1.0.1: + resolution: {integrity: sha512-1fMXF3YP4pZZVozF8j/ZLfvnR8NSIljt56UhbZ5PeeDmmGHpgpdwQt7ITlGvYaQukCvuBRMLEiKiYC+oeIg4cg==} + dev: true + + /estree-walker@2.0.2: + resolution: {integrity: sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==} + dev: true + + /estree-walker@3.0.3: + resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + dependencies: + '@types/estree': 1.0.5 + dev: false + + /esutils@2.0.3: + resolution: {integrity: sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==} + engines: {node: '>=0.10.0'} + + /execa@5.1.1: + resolution: {integrity: sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg==} + engines: {node: '>=10'} + dependencies: + cross-spawn: 7.0.3 + get-stream: 6.0.1 + human-signals: 2.1.0 + is-stream: 2.0.1 + merge-stream: 2.0.0 + npm-run-path: 4.0.1 + onetime: 5.1.2 + signal-exit: 3.0.7 + strip-final-newline: 2.0.0 + dev: true + + /extract-zip@2.0.1: + resolution: {integrity: sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==} + engines: {node: '>= 10.17.0'} + hasBin: true + dependencies: + debug: 4.3.4 + get-stream: 5.2.0 + yauzl: 2.10.0 + optionalDependencies: + '@types/yauzl': 2.10.3 + transitivePeerDependencies: + - supports-color + dev: true + + /fast-deep-equal@2.0.1: + resolution: {integrity: sha512-bCK/2Z4zLidyB4ReuIsvALH6w31YfAQDmXMqMx6FyfHqvBxtjC0eRumeSu4Bs3XtXwpyIywtSTrVT99BxY1f9w==} + dev: true + + /fast-deep-equal@3.1.3: + resolution: {integrity: sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==} + dev: false + + /fast-diff@1.3.0: + resolution: {integrity: sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==} + dev: false + + /fast-fifo@1.3.2: + resolution: {integrity: sha512-/d9sfos4yxzpwkDkuN7k2SqFKtYNmCTzgfEpz82x34IM9/zc8KGxQoXg1liNC/izpRM/MBdt44Nmx41ZWqk+FQ==} + dev: true + + /fast-glob@3.3.2: + resolution: {integrity: sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==} + engines: {node: '>=8.6.0'} + dependencies: + '@nodelib/fs.stat': 2.0.5 + '@nodelib/fs.walk': 1.2.8 + glob-parent: 5.1.2 + merge2: 1.4.1 + micromatch: 4.0.5 + + /fast-json-stable-stringify@2.1.0: + resolution: {integrity: sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==} + dev: false + + /fast-levenshtein@2.0.6: + resolution: {integrity: sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==} + dev: false + + /fastq@1.15.0: + resolution: {integrity: sha512-wBrocU2LCXXa+lWBt8RoIRD89Fi8OdABODa/kEnyeyjS5aZO5/GNvI5sEINADqP/h8M29UHTHUb53sUu5Ihqdw==} + dependencies: + reusify: 1.0.4 + + /fd-slicer@1.1.0: + resolution: {integrity: sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==} + dependencies: + pend: 1.2.0 + dev: true + + /fetch-blob@3.2.0: + resolution: {integrity: sha512-7yAQpD2UMJzLi1Dqv7qFYnPbaPx7ZfFK6PiIxQ4PfkGPyNyl2Ugx+a/umUonmKqjhM4DnfbMvdX6otXq83soQQ==} + engines: {node: ^12.20 || >= 14.13} + dependencies: + node-domexception: 1.0.0 + web-streams-polyfill: 3.2.1 + dev: true + + /file-entry-cache@6.0.1: + resolution: {integrity: sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==} + engines: {node: ^10.12.0 || >=12.0.0} + dependencies: + flat-cache: 3.2.0 + dev: false + + /fill-range@7.0.1: + resolution: {integrity: sha512-qOo9F+dMUmC2Lcb4BbVvnKJxTPjCm+RRpe4gDuGrzkL7mEVl/djYSu2OdQ2Pa302N4oqkSg9ir6jaLWJ2USVpQ==} + engines: {node: '>=8'} + dependencies: + to-regex-range: 5.0.1 + + /find-up@5.0.0: + resolution: {integrity: sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==} + engines: {node: '>=10'} + dependencies: + locate-path: 6.0.0 + path-exists: 4.0.0 + dev: false + + /flat-cache@3.2.0: + resolution: {integrity: sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==} + engines: {node: ^10.12.0 || >=12.0.0} + dependencies: + flatted: 3.2.9 + keyv: 4.5.4 + rimraf: 3.0.2 + dev: false + + /flatted@3.2.9: + resolution: {integrity: sha512-36yxDn5H7OFZQla0/jFJmbIKTdZAQHngCedGxiMmpNfEZM0sdEeT+WczLQrjK6D7o2aiyLYDnkw0R3JK0Qv1RQ==} + dev: false + + /foreground-child@3.1.1: + resolution: {integrity: sha512-TMKDUnIte6bfb5nWv7V/caI169OHgvwjb7V4WkeUvbQQdjr5rWKqHFiKWb/fcOwB+CzBT+qbWjvj+DVwRskpIg==} + engines: {node: '>=14'} + dependencies: + cross-spawn: 7.0.3 + signal-exit: 4.1.0 + dev: true + + /form-data-encoder@2.1.4: + resolution: {integrity: sha512-yDYSgNMraqvnxiEXO4hi88+YZxaHC6QKzb5N84iRCTDeRO7ZALpir/lVmf/uXUhnwUr2O4HU8s/n6x+yNjQkHw==} + engines: {node: '>= 14.17'} + dev: true + + /formdata-polyfill@4.0.10: + resolution: {integrity: sha512-buewHzMvYL29jdeQTVILecSaZKnt/RJWjoZCF5OW60Z67/GmSLBkOFM7qh1PI3zFNtJbaZL5eQu1vLfazOwj4g==} + engines: {node: '>=12.20.0'} + dependencies: + fetch-blob: 3.2.0 + dev: true + + /fs-extra@8.1.0: + resolution: {integrity: sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==} + engines: {node: '>=6 <7 || >=8'} + dependencies: + graceful-fs: 4.2.11 + jsonfile: 4.0.0 + universalify: 0.1.2 + dev: true + + /fs.realpath@1.0.0: + resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} + + /fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + requiresBuild: true + optional: true + + /fstream@1.0.12: + resolution: {integrity: sha512-WvJ193OHa0GHPEL+AycEJgxvBEwyfRkN1vhjca23OaPVMCaLCXTd5qAu82AjTcgP1UJmytkOKb63Ypde7raDIg==} + engines: {node: '>=0.6'} + dependencies: + graceful-fs: 4.2.11 + inherits: 2.0.4 + mkdirp: 0.5.6 + rimraf: 2.7.1 + dev: true + + /function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + /geckodriver@4.2.1: + resolution: {integrity: sha512-4m/CRk0OI8MaANRuFIahvOxYTSjlNAO2p9JmE14zxueknq6cdtB5M9UGRQ8R9aMV0bLGNVHHDnDXmoXdOwJfWg==} + engines: {node: ^16.13 || >=18 || >=20} + hasBin: true + requiresBuild: true + dependencies: + '@wdio/logger': 8.16.17 + decamelize: 6.0.0 + http-proxy-agent: 7.0.0 + https-proxy-agent: 7.0.2 + node-fetch: 3.3.2 + tar-fs: 3.0.4 + unzipper: 0.10.14 + which: 4.0.0 + transitivePeerDependencies: + - supports-color + dev: true + + /get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + dev: true + + /get-func-name@2.0.2: + resolution: {integrity: sha512-8vXOvuE167CtIc3OyItco7N/dpRtBbYOsPsXCz7X/PMnlGjYjSGuZJgM1Y7mmew7BKf9BqvLX2tnOVy1BBUsxQ==} + dev: true + + /get-port@7.0.0: + resolution: {integrity: sha512-mDHFgApoQd+azgMdwylJrv2DX47ywGq1i5VFJE7fZ0dttNq3iQMfsU4IvEgBHojA3KqEudyu7Vq+oN8kNaNkWw==} + engines: {node: '>=16'} + dev: true + + /get-stream@5.2.0: + resolution: {integrity: sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==} + engines: {node: '>=8'} + dependencies: + pump: 3.0.0 + dev: true + + /get-stream@6.0.1: + resolution: {integrity: sha512-ts6Wi+2j3jQjqi70w5AlN8DFnkSwC+MqmxEzdEALB2qXZYV3X/b1CTfgPLGJNMeAWxdPfU8FO1ms3NUfaHCPYg==} + engines: {node: '>=10'} + dev: true + + /get-uri@6.0.2: + resolution: {integrity: sha512-5KLucCJobh8vBY1K07EFV4+cPZH3mrV9YeAruUseCQKHB58SGjjT2l9/eA9LD082IiuMjSlFJEcdJ27TXvbZNw==} + engines: {node: '>= 14'} + dependencies: + basic-ftp: 5.0.3 + data-uri-to-buffer: 6.0.1 + debug: 4.3.4 + fs-extra: 8.1.0 + transitivePeerDependencies: + - supports-color + dev: true + + /glob-parent@5.1.2: + resolution: {integrity: sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==} + engines: {node: '>= 6'} + dependencies: + is-glob: 4.0.3 + + /glob-parent@6.0.2: + resolution: {integrity: sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==} + engines: {node: '>=10.13.0'} + dependencies: + is-glob: 4.0.3 + dev: false + + /glob@10.3.10: + resolution: {integrity: sha512-fa46+tv1Ak0UPK1TOy/pZrIybNNt4HCv7SDzwyfiOZkvZLEbjsZkJBPtDHVshZjbecAoAGSC20MjLDG/qr679g==} + engines: {node: '>=16 || 14 >=14.17'} + hasBin: true + dependencies: + foreground-child: 3.1.1 + jackspeak: 2.3.6 + minimatch: 9.0.3 + minipass: 7.0.4 + path-scurry: 1.10.1 + dev: true + + /glob@7.1.6: + resolution: {integrity: sha512-LwaxwyZ72Lk7vZINtNNrywX0ZuLyStrdDtabefZKAY5ZGJhVtgdznluResxNmPitE0SAO+O26sWTHeKSI2wMBA==} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + dev: true + + /glob@7.2.3: + resolution: {integrity: sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 3.1.2 + once: 1.4.0 + path-is-absolute: 1.0.1 + + /glob@8.1.0: + resolution: {integrity: sha512-r8hpEjiQEYlF2QU0df3dS+nxxSIreXQS1qRhMJM0Q5NDdR386C7jb7Hwwod8Fgiuex+k0GFjgft18yvxm5XoCQ==} + engines: {node: '>=12'} + dependencies: + fs.realpath: 1.0.0 + inflight: 1.0.6 + inherits: 2.0.4 + minimatch: 5.1.6 + once: 1.4.0 + dev: true + + /globals@13.23.0: + resolution: {integrity: sha512-XAmF0RjlrjY23MA51q3HltdlGxUpXPvg0GioKiD9X6HD28iMjo2dKC8Vqwm7lne4GNr78+RHTfliktR6ZH09wA==} + engines: {node: '>=8'} + dependencies: + type-fest: 0.20.2 + dev: false + + /globby@11.1.0: + resolution: {integrity: sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==} + engines: {node: '>=10'} + dependencies: + array-union: 2.1.0 + dir-glob: 3.0.1 + fast-glob: 3.3.2 + ignore: 5.3.0 + merge2: 1.4.1 + slash: 3.0.0 + + /got@12.6.1: + resolution: {integrity: sha512-mThBblvlAF1d4O5oqyvN+ZxLAYwIJK7bpMxgYqPD9okW0C3qm5FFn7k811QrcuEBwaogR3ngOFoCfs6mRv7teQ==} + engines: {node: '>=14.16'} + dependencies: + '@sindresorhus/is': 5.6.0 + '@szmarczak/http-timer': 5.0.1 + cacheable-lookup: 7.0.0 + cacheable-request: 10.2.14 + decompress-response: 6.0.0 + form-data-encoder: 2.1.4 + get-stream: 6.0.1 + http2-wrapper: 2.2.1 + lowercase-keys: 3.0.0 + p-cancelable: 3.0.0 + responselike: 3.0.0 + dev: true + + /got@13.0.0: + resolution: {integrity: sha512-XfBk1CxOOScDcMr9O1yKkNaQyy865NbYs+F7dr4H0LZMVgCj2Le59k6PqbNHoL5ToeaEQUYh6c6yMfVcc6SJxA==} + engines: {node: '>=16'} + dependencies: + '@sindresorhus/is': 5.6.0 + '@szmarczak/http-timer': 5.0.1 + cacheable-lookup: 7.0.0 + cacheable-request: 10.2.14 + decompress-response: 6.0.0 + form-data-encoder: 2.1.4 + get-stream: 6.0.1 + http2-wrapper: 2.2.1 + lowercase-keys: 3.0.0 + p-cancelable: 3.0.0 + responselike: 3.0.0 + dev: true + + /graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + dev: true + + /grapheme-splitter@1.0.4: + resolution: {integrity: sha512-bzh50DW9kTPM00T8y4o8vQg89Di9oLJVLW/KaOGIXJWP/iqCN6WKYkbNOF04vFLJhwcpYUh9ydh/+5vpOqV4YQ==} + dev: true + + /graphemer@1.4.0: + resolution: {integrity: sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==} + dev: false + + /has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + /hasown@2.0.0: + resolution: {integrity: sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA==} + engines: {node: '>= 0.4'} + dependencies: + function-bind: 1.1.2 + + /http-cache-semantics@4.1.1: + resolution: {integrity: sha512-er295DKPVsV82j5kw1Gjt+ADA/XYHsajl82cGNQG2eyoPkvgUhX+nDIyelzhIWbbsXP39EHcI6l5tYs2FYqYXQ==} + dev: true + + /http-proxy-agent@7.0.0: + resolution: {integrity: sha512-+ZT+iBxVUQ1asugqnD6oWoRiS25AkjNfG085dKJGtGxkdwLQrMKU5wJr2bOOFAXzKcTuqq+7fZlTMgG3SRfIYQ==} + engines: {node: '>= 14'} + dependencies: + agent-base: 7.1.0 + debug: 4.3.4 + transitivePeerDependencies: + - supports-color + dev: true + + /http2-wrapper@2.2.1: + resolution: {integrity: sha512-V5nVw1PAOgfI3Lmeaj2Exmeg7fenjhRUgz1lPSezy1CuhPYbgQtbQj4jZfEAEMlaL+vupsvhjqCyjzob0yxsmQ==} + engines: {node: '>=10.19.0'} + dependencies: + quick-lru: 5.1.1 + resolve-alpn: 1.2.1 + dev: true + + /https-proxy-agent@7.0.2: + resolution: {integrity: sha512-NmLNjm6ucYwtcUmL7JQC1ZQ57LmHP4lT15FQ8D61nak1rO6DH+fz5qNK2Ap5UN4ZapYICE3/0KodcLYSPsPbaA==} + engines: {node: '>= 14'} + dependencies: + agent-base: 7.1.0 + debug: 4.3.4 + transitivePeerDependencies: + - supports-color + dev: true + + /human-signals@2.1.0: + resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} + engines: {node: '>=10.17.0'} + dev: true + + /ieee754@1.2.1: + resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} + dev: true + + /ignore@5.3.0: + resolution: {integrity: sha512-g7dmpshy+gD7mh88OC9NwSGTKoc3kyLAZQRU1mt53Aw/vnvfXnbC+F/7F7QoYVKbV+KNvJx8wArewKy1vXMtlg==} + engines: {node: '>= 4'} + + /import-fresh@3.3.0: + resolution: {integrity: sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==} + engines: {node: '>=6'} + dependencies: + parent-module: 1.0.1 + resolve-from: 4.0.0 + dev: false + + /import-meta-resolve@3.1.1: + resolution: {integrity: sha512-qeywsE/KC3w9Fd2ORrRDUw6nS/nLwZpXgfrOc2IILvZYnCaEMd+D56Vfg9k4G29gIeVi3XKql1RQatME8iYsiw==} + dev: true + + /imurmurhash@0.1.4: + resolution: {integrity: sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==} + engines: {node: '>=0.8.19'} + dev: false + + /inflight@1.0.6: + resolution: {integrity: sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==} + dependencies: + once: 1.4.0 + wrappy: 1.0.2 + + /inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + /ip@1.1.8: + resolution: {integrity: sha512-PuExPYUiu6qMBQb4l06ecm6T6ujzhmh+MeJcW9wa89PoAz5pvd4zPgN5WJV104mb6S2T1AwNIAaB70JNrLQWhg==} + dev: true + + /ip@2.0.0: + resolution: {integrity: sha512-WKa+XuLG1A1R0UWhl2+1XQSi+fZWMsYKffMZTTYsiZaUD8k2yDAj5atimTUD2TZkyCkNEeYE5NhFZmupOGtjYQ==} + dev: true + + /is-binary-path@2.1.0: + resolution: {integrity: sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==} + engines: {node: '>=8'} + dependencies: + binary-extensions: 2.2.0 + dev: true + + /is-core-module@2.13.1: + resolution: {integrity: sha512-hHrIjvZsftOsvKSn2TRYl63zvxsgE0K+0mYMoH6gD4omR5IWB2KynivBQczo3+wF1cCkjzvptnI9Q0sPU66ilw==} + dependencies: + hasown: 2.0.0 + + /is-extglob@2.1.1: + resolution: {integrity: sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==} + engines: {node: '>=0.10.0'} + + /is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + dev: true + + /is-fullwidth-code-point@4.0.0: + resolution: {integrity: sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==} + engines: {node: '>=12'} + dev: true + + /is-glob@4.0.3: + resolution: {integrity: sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==} + engines: {node: '>=0.10.0'} + dependencies: + is-extglob: 2.1.1 + + /is-number@7.0.0: + resolution: {integrity: sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==} + engines: {node: '>=0.12.0'} + + /is-path-inside@3.0.3: + resolution: {integrity: sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==} + engines: {node: '>=8'} + dev: false + + /is-plain-obj@4.1.0: + resolution: {integrity: sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==} + engines: {node: '>=12'} + dev: true + + /is-reference@3.0.2: + resolution: {integrity: sha512-v3rht/LgVcsdZa3O2Nqs+NMowLOxeOm7Ay9+/ARQ2F+qEoANRcqrjAZKGN0v8ymUetZGgkp26LTnGT7H0Qo9Pg==} + dependencies: + '@types/estree': 1.0.5 + dev: false + + /is-stream@2.0.1: + resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} + engines: {node: '>=8'} + dev: true + + /isarray@1.0.0: + resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==} + dev: true + + /isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + /isexe@3.1.1: + resolution: {integrity: sha512-LpB/54B+/2J5hqQ7imZHfdU31OlgQqx7ZicVlkm9kzg9/w8GKLEcFfJl/t7DCEDueOyBAD6zCCwTO6Fzs0NoEQ==} + engines: {node: '>=16'} + dev: true + + /jackspeak@2.3.6: + resolution: {integrity: sha512-N3yCS/NegsOBokc8GAdM8UcmfsKiSS8cipheD/nivzr700H+nsMOxJjQnvwOcRYVuFkdH0wGUvW2WbXGmrZGbQ==} + engines: {node: '>=14'} + dependencies: + '@isaacs/cliui': 8.0.2 + optionalDependencies: + '@pkgjs/parseargs': 0.11.0 + dev: true + + /joycon@3.1.1: + resolution: {integrity: sha512-34wB/Y7MW7bzjKRjUKTa46I2Z7eV62Rkhva+KkopW7Qvv/OSWBqvkSY7vusOPrNuZcUG3tApvdVgNB8POj3SPw==} + engines: {node: '>=10'} + dev: true + + /js-yaml@4.1.0: + resolution: {integrity: sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==} + hasBin: true + dependencies: + argparse: 2.0.1 + dev: false + + /json-buffer@3.0.1: + resolution: {integrity: sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==} + + /json-schema-traverse@0.4.1: + resolution: {integrity: sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==} + dev: false + + /json-stable-stringify-without-jsonify@1.0.1: + resolution: {integrity: sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==} + dev: false + + /jsonc-parser@3.2.0: + resolution: {integrity: sha512-gfFQZrcTc8CnKXp6Y4/CBT3fTc0OVuDofpre4aEeEpSBPV5X5v4+Vmx+8snU7RLPrNHPKSgLxGo9YuQzz20o+w==} + dev: true + + /jsonfile@4.0.0: + resolution: {integrity: sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==} + optionalDependencies: + graceful-fs: 4.2.11 + dev: true + + /keyv@4.5.4: + resolution: {integrity: sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==} + dependencies: + json-buffer: 3.0.1 + + /known-css-properties@0.29.0: + resolution: {integrity: sha512-Ne7wqW7/9Cz54PDt4I3tcV+hAyat8ypyOGzYRJQfdxnnjeWsTxt1cy8pjvvKeI5kfXuyvULyeeAvwvvtAX3ayQ==} + dev: false + + /ky@0.33.3: + resolution: {integrity: sha512-CasD9OCEQSFIam2U8efFK81Yeg8vNMTBUqtMOHlrcWQHqUX3HeCl9Dr31u4toV7emlH8Mymk5+9p0lL6mKb/Xw==} + engines: {node: '>=14.16'} + dev: true + + /lazystream@1.0.1: + resolution: {integrity: sha512-b94GiNHQNy6JNTrt5w6zNyffMrNkXZb3KTkCZJb2V1xaEGCk093vkZ2jk3tpaeP33/OiXC+WvK9AxUebnf5nbw==} + engines: {node: '>= 0.6.3'} + dependencies: + readable-stream: 2.3.8 + dev: true + + /levn@0.4.1: + resolution: {integrity: sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==} + engines: {node: '>= 0.8.0'} + dependencies: + prelude-ls: 1.2.1 + type-check: 0.4.0 + dev: false + + /lilconfig@2.1.0: + resolution: {integrity: sha512-utWOt/GHzuUxnLKxB6dk81RoOeoNeHgbrXiuGk4yyF5qlRz+iIVWu56E2fqGHFrXz0QNUhLB/8nKqvRH66JKGQ==} + engines: {node: '>=10'} + + /lines-and-columns@1.2.4: + resolution: {integrity: sha512-7ylylesZQ/PV29jhEDl3Ufjo6ZX7gCqJr5F7PKrqc93v7fzSymt1BpwEU8nAUXs8qzzvqhbjhK5QZg6Mt/HkBg==} + dev: true + + /listenercount@1.0.1: + resolution: {integrity: sha512-3mk/Zag0+IJxeDrxSgaDPy4zZ3w05PRZeJNnlWhzFz5OkX49J4krc+A8X2d2M69vGMBEX0uyl8M+W+8gH+kBqQ==} + dev: true + + /load-tsconfig@0.2.5: + resolution: {integrity: sha512-IXO6OCs9yg8tMKzfPZ1YmheJbZCiEsnBdcB03l0OcfK9prKnJb96siuHCr5Fl37/yo9DnKU+TLpxzTUspw9shg==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dev: true + + /local-pkg@0.4.3: + resolution: {integrity: sha512-SFppqq5p42fe2qcZQqqEOiVRXl+WCP1MdT6k7BDEW1j++sp5fIY+/fdRQitvKgB5BrBcmrs5m/L0v2FrU5MY1g==} + engines: {node: '>=14'} + dev: true + + /locate-app@2.1.0: + resolution: {integrity: sha512-rcVo/iLUxrd9d0lrmregK/Z5Y5NCpSwf9KlMbPpOHmKmdxdQY1Fj8NDQ5QymJTryCsBLqwmniFv2f3JKbk9Bvg==} + dependencies: + n12: 0.4.0 + type-fest: 2.13.0 + userhome: 1.0.0 + dev: true + + /locate-character@3.0.0: + resolution: {integrity: sha512-SW13ws7BjaeJ6p7Q6CO2nchbYEc3X3J6WrmTTDto7yMPqVSZTUyY5Tjbid+Ab8gLnATtygYtiDIJGQRRn2ZOiA==} + dev: false + + /locate-path@6.0.0: + resolution: {integrity: sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==} + engines: {node: '>=10'} + dependencies: + p-locate: 5.0.0 + dev: false + + /lodash.clonedeep@4.5.0: + resolution: {integrity: sha512-H5ZhCF25riFd9uB5UCkVKo61m3S/xZk1x4wA6yp/L3RFP6Z/eHH1ymQcGLo7J3GMPfm0V/7m1tryHuGVxpqEBQ==} + dev: true + + /lodash.merge@4.6.2: + resolution: {integrity: sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==} + dev: false + + /lodash.sortby@4.7.0: + resolution: {integrity: sha512-HDWXG8isMntAyRF5vZ7xKuEvOhT4AhlRt/3czTSjvGUxjYCBVRQY48ViDHyfYz9VIoBkW4TMGQNapx+l3RUwdA==} + dev: true + + /lodash.zip@4.2.0: + resolution: {integrity: sha512-C7IOaBBK/0gMORRBd8OETNx3kmOkgIWIPvyDpZSCTwUrpYmgZwJkjZeOD8ww4xbOUOs4/attY+pciKvadNfFbg==} + dev: true + + /lodash@4.17.21: + resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} + dev: true + + /loglevel-plugin-prefix@0.8.4: + resolution: {integrity: sha512-WpG9CcFAOjz/FtNht+QJeGpvVl/cdR6P0z6OcXSkr8wFJOsV2GRj2j10JLfjuA4aYkcKCNIEqRGCyTife9R8/g==} + dev: true + + /loglevel@1.8.1: + resolution: {integrity: sha512-tCRIJM51SHjAayKwC+QAg8hT8vg6z7GSgLJKGvzuPb1Wc+hLzqtuVLxp6/HzSPOozuK+8ErAhy7U/sVzw8Dgfg==} + engines: {node: '>= 0.6.0'} + dev: true + + /loupe@2.3.7: + resolution: {integrity: sha512-zSMINGVYkdpYSOBmLi0D1Uo7JU9nVdQKrHxC8eYlV+9YKK9WePqAlL7lSlorG/U2Fw1w0hTBmaa/jrQ3UbPHtA==} + dependencies: + get-func-name: 2.0.2 + dev: true + + /lowercase-keys@3.0.0: + resolution: {integrity: sha512-ozCC6gdQ+glXOQsveKD0YsDy8DSQFjDTz4zyzEHNV5+JP5D62LmfDZ6o1cycFx9ouG940M5dE8C8CTewdj2YWQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dev: true + + /lru-cache@10.0.3: + resolution: {integrity: sha512-B7gr+F6MkqB3uzINHXNctGieGsRTMwIBgxkp0yq/5BwcuDzD4A8wQpHQW6vDAm1uKSLQghmRdD9sKqf2vJ1cEg==} + engines: {node: 14 || >=16.14} + dev: true + + /lru-cache@6.0.0: + resolution: {integrity: sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==} + engines: {node: '>=10'} + dependencies: + yallist: 4.0.0 + + /lru-cache@7.18.3: + resolution: {integrity: sha512-jumlc0BIUrS3qJGgIkWZsyfAM7NCWiBcCDhnd+3NNM5KbBmLTgHVfWBcg6W+rLUsIpzpERPsvwUP7CckAQSOoA==} + engines: {node: '>=12'} + dev: true + + /magic-string@0.25.9: + resolution: {integrity: sha512-RmF0AsMzgt25qzqqLc1+MbHmhdx0ojF2Fvs4XnOqz2ZOBXzzkEwc/dJQZCYHAn7v1jbVOjAZfK8msRn4BxO4VQ==} + dependencies: + sourcemap-codec: 1.4.8 + dev: true + + /magic-string@0.30.5: + resolution: {integrity: sha512-7xlpfBaQaP/T6Vh8MO/EqXSW5En6INHEvEXQiuff7Gku0PWjU3uf6w/j9o7O+SpB5fOAkrI5HeoNgwjEO0pFsA==} + engines: {node: '>=12'} + dependencies: + '@jridgewell/sourcemap-codec': 1.4.15 + dev: false + + /make-error@1.3.6: + resolution: {integrity: sha512-s8UhlNe7vPKomQhC1qFelMokr/Sc3AgNbso3n74mVPA5LTZwkB9NlXf4XPamLxJE8h0gh73rM94xvwRT2CVInw==} + + /mdn-data@2.0.30: + resolution: {integrity: sha512-GaqWWShW4kv/G9IEucWScBx9G1/vsFZZJUO+tD26M8J8z3Kw5RDQjaoZe03YAClgeS/SWPOcb4nkFBTEi5DUEA==} + dev: false + + /merge-stream@2.0.0: + resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} + dev: true + + /merge2@1.4.1: + resolution: {integrity: sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==} + engines: {node: '>= 8'} + + /micromatch@4.0.5: + resolution: {integrity: sha512-DMy+ERcEW2q8Z2Po+WNXuw3c5YaUSFjAO5GsJqfEl7UjvtIuFKO6ZrKvcItdy98dwFI2N1tg3zNIdKaQT+aNdA==} + engines: {node: '>=8.6'} + dependencies: + braces: 3.0.2 + picomatch: 2.3.1 + + /mimic-fn@2.1.0: + resolution: {integrity: sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg==} + engines: {node: '>=6'} + dev: true + + /mimic-response@3.1.0: + resolution: {integrity: sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==} + engines: {node: '>=10'} + dev: true + + /mimic-response@4.0.0: + resolution: {integrity: sha512-e5ISH9xMYU0DzrT+jl8q2ze9D6eWBto+I8CNpe+VI+K2J/F/k3PdkdTdz4wvGVH4NTpo+NRYTVIuMQEMMcsLqg==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dev: true + + /minimatch@3.1.2: + resolution: {integrity: sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==} + dependencies: + brace-expansion: 1.1.11 + + /minimatch@5.1.6: + resolution: {integrity: sha512-lKwV/1brpG6mBUFHtb7NUmtABCb2WZZmm2wNiOA5hAb8VdCS4B3dtMWyvcoViccwAW/COERjXLt0zP1zXUN26g==} + engines: {node: '>=10'} + dependencies: + brace-expansion: 2.0.1 + dev: true + + /minimatch@9.0.3: + resolution: {integrity: sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==} + engines: {node: '>=16 || 14 >=14.17'} + dependencies: + brace-expansion: 2.0.1 + dev: true + + /minimist@1.2.8: + resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} + dev: true + + /minipass@7.0.4: + resolution: {integrity: sha512-jYofLM5Dam9279rdkWzqHozUo4ybjdZmCsDHePy5V/PbBcVMiSZR97gmAy45aqi8CK1lG2ECd356FU86avfwUQ==} + engines: {node: '>=16 || 14 >=14.17'} + dev: true + + /mitt@3.0.0: + resolution: {integrity: sha512-7dX2/10ITVyqh4aOSVI9gdape+t9l2/8QxHrFmUXu4EEUpdlxl6RudZUPZoc+zuY2hk1j7XxVroIVIan/pD/SQ==} + dev: true + + /mkdirp-classic@0.5.3: + resolution: {integrity: sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==} + dev: true + + /mkdirp@0.5.6: + resolution: {integrity: sha512-FP+p8RB8OWpF3YZBCrP5gtADmtXApB5AMLn+vdyA+PyxCjrCs00mjyUozssO33cwDeT3wNGdLxJ5M//YqtHAJw==} + hasBin: true + dependencies: + minimist: 1.2.8 + dev: true + + /mlly@1.4.2: + resolution: {integrity: sha512-i/Ykufi2t1EZ6NaPLdfnZk2AX8cs0d+mTzVKuPfqPKPatxLApaBoxJQ9x1/uckXtrS/U5oisPMDkNs0yQTaBRg==} + dependencies: + acorn: 8.11.2 + pathe: 1.1.1 + pkg-types: 1.0.3 + ufo: 1.3.2 + dev: true + + /modern-node-polyfills@0.1.0: + resolution: {integrity: sha512-/Z9mlC56KBxjLZvdNSLqSEFw9jSav43dsUxhLYLN3bZgcSX5VFdixat+QGjb/4NxaGCwW09ABJhZA5oHFj4W4A==} + dependencies: + '@jspm/core': 2.0.0-beta.24 + '@rollup/plugin-inject': 4.0.4(rollup@2.79.1) + acorn: 8.11.2 + esbuild: 0.14.54 + local-pkg: 0.4.3 + rollup: 2.79.1 + dev: true + + /mrmime@1.0.1: + resolution: {integrity: sha512-hzzEagAgDyoU1Q6yg5uI+AorQgdvMCur3FcKf7NhMKWsaYg+RnbTyHRa/9IlLF9rf455MOCtcqqrQQ83pPP7Uw==} + engines: {node: '>=10'} + dev: true + + /ms@2.1.2: + resolution: {integrity: sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==} + + /mz@2.7.0: + resolution: {integrity: sha512-z81GNO7nnYMEhrGh9LeymoE4+Yr0Wn5McHIZMK5cfQCl+NDX08sCZgUc9/6MHni9IWuFLm1Z3HTCXu2z9fN62Q==} + dependencies: + any-promise: 1.3.0 + object-assign: 4.1.1 + thenify-all: 1.6.0 + dev: true + + /n12@0.4.0: + resolution: {integrity: sha512-p/hj4zQ8d3pbbFLQuN1K9honUxiDDhueOWyFLw/XgBv+wZCE44bcLH4CIcsolOceJQduh4Jf7m/LfaTxyGmGtQ==} + dev: true + + /nanoid@3.3.7: + resolution: {integrity: sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + /natural-compare-lite@1.4.0: + resolution: {integrity: sha512-Tj+HTDSJJKaZnfiuw+iaF9skdPpTo2GtEly5JHnWV/hfv2Qj/9RKsGISQtLh2ox3l5EAGw487hnBee0sIJ6v2g==} + dev: false + + /natural-compare@1.4.0: + resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} + dev: false + + /netmask@2.0.2: + resolution: {integrity: sha512-dBpDMdxv9Irdq66304OLfEmQ9tbNRFnFTuZiLo+bD+r332bBmMJ8GBLXklIXXgxd3+v9+KUnZaUR5PJMa75Gsg==} + engines: {node: '>= 0.4.0'} + dev: true + + /node-domexception@1.0.0: + resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} + engines: {node: '>=10.5.0'} + dev: true + + /node-fetch@2.7.0: + resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} + engines: {node: 4.x || >=6.0.0} + peerDependencies: + encoding: ^0.1.0 + peerDependenciesMeta: + encoding: + optional: true + dependencies: + whatwg-url: 5.0.0 + dev: true + + /node-fetch@3.3.2: + resolution: {integrity: sha512-dRB78srN/l6gqWulah9SrxeYnxeddIG30+GOqK/9OlLVyLg3HPnr6SqOWTWOXKRwC2eGYCkZ59NNuSgvSrpgOA==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + data-uri-to-buffer: 4.0.1 + fetch-blob: 3.2.0 + formdata-polyfill: 4.0.10 + dev: true + + /normalize-path@3.0.0: + resolution: {integrity: sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==} + engines: {node: '>=0.10.0'} + dev: true + + /normalize-url@8.0.0: + resolution: {integrity: sha512-uVFpKhj5MheNBJRTiMZ9pE/7hD1QTeEvugSJW/OmLzAp78PB5O6adfMNTvmfKhXBkvCzC+rqifWcVYpGFwTjnw==} + engines: {node: '>=14.16'} + dev: true + + /npm-run-path@4.0.1: + resolution: {integrity: sha512-S48WzZW777zhNIrn7gxOlISNAqi9ZC/uQFnRdbeIHhZhCA6UqpkOT8T1G7BvfdgP4Er8gF4sUbaS0i7QvIfCWw==} + engines: {node: '>=8'} + dependencies: + path-key: 3.1.1 + dev: true + + /object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + dev: true + + /once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + dependencies: + wrappy: 1.0.2 + + /onetime@5.1.2: + resolution: {integrity: sha512-kbpaSSGJTWdAY5KPVeMOKXSrPtr8C8C7wodJbcsd51jRnmD+GZu8Y0VoU6Dm5Z4vWr0Ig/1NKuWRKf7j5aaYSg==} + engines: {node: '>=6'} + dependencies: + mimic-fn: 2.1.0 + dev: true + + /optionator@0.9.3: + resolution: {integrity: sha512-JjCoypp+jKn1ttEFExxhetCKeJt9zhAgAve5FXHixTvFDW/5aEktX9bufBKLRRMdU7bNtpLfcGu94B3cdEJgjg==} + engines: {node: '>= 0.8.0'} + dependencies: + '@aashutoshrathi/word-wrap': 1.2.6 + deep-is: 0.1.4 + fast-levenshtein: 2.0.6 + levn: 0.4.1 + prelude-ls: 1.2.1 + type-check: 0.4.0 + dev: false + + /p-cancelable@3.0.0: + resolution: {integrity: sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw==} + engines: {node: '>=12.20'} + dev: true + + /p-limit@3.1.0: + resolution: {integrity: sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==} + engines: {node: '>=10'} + dependencies: + yocto-queue: 0.1.0 + dev: false + + /p-limit@4.0.0: + resolution: {integrity: sha512-5b0R4txpzjPWVw/cXXUResoD4hb6U/x9BH08L7nw+GN1sezDzPdxeRvpc9c433fZhBan/wusjbCsqwqm4EIBIQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + dependencies: + yocto-queue: 1.0.0 + dev: true + + /p-locate@5.0.0: + resolution: {integrity: sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==} + engines: {node: '>=10'} + dependencies: + p-limit: 3.1.0 + dev: false + + /pac-proxy-agent@7.0.1: + resolution: {integrity: sha512-ASV8yU4LLKBAjqIPMbrgtaKIvxQri/yh2OpI+S6hVa9JRkUI3Y3NPFbfngDtY7oFtSMD3w31Xns89mDa3Feo5A==} + engines: {node: '>= 14'} + dependencies: + '@tootallnate/quickjs-emscripten': 0.23.0 + agent-base: 7.1.0 + debug: 4.3.4 + get-uri: 6.0.2 + http-proxy-agent: 7.0.0 + https-proxy-agent: 7.0.2 + pac-resolver: 7.0.0 + socks-proxy-agent: 8.0.2 + transitivePeerDependencies: + - supports-color + dev: true + + /pac-resolver@7.0.0: + resolution: {integrity: sha512-Fd9lT9vJbHYRACT8OhCbZBbxr6KRSawSovFpy8nDGshaK99S/EBhVIHp9+crhxrsZOuvLpgL1n23iyPg6Rl2hg==} + engines: {node: '>= 14'} + dependencies: + degenerator: 5.0.1 + ip: 1.1.8 + netmask: 2.0.2 + dev: true + + /parent-module@1.0.1: + resolution: {integrity: sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==} + engines: {node: '>=6'} + dependencies: + callsites: 3.1.0 + dev: false + + /path-exists@4.0.0: + resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} + engines: {node: '>=8'} + dev: false + + /path-is-absolute@1.0.1: + resolution: {integrity: sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==} + engines: {node: '>=0.10.0'} + + /path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + /path-parse@1.0.7: + resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} + + /path-scurry@1.10.1: + resolution: {integrity: sha512-MkhCqzzBEpPvxxQ71Md0b1Kk51W01lrYvlMzSUaIzNsODdd7mqhiimSZlr+VegAz5Z6Vzt9Xg2ttE//XBhH3EQ==} + engines: {node: '>=16 || 14 >=14.17'} + dependencies: + lru-cache: 10.0.3 + minipass: 7.0.4 + dev: true + + /path-type@4.0.0: + resolution: {integrity: sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==} + engines: {node: '>=8'} + + /pathe@1.1.1: + resolution: {integrity: sha512-d+RQGp0MAYTIaDBIMmOfMwz3E+LOZnxx1HZd5R18mmCZY0QBlK0LDZfPc8FW8Ed2DlvsuE6PRjroDY+wg4+j/Q==} + dev: true + + /pathval@1.1.1: + resolution: {integrity: sha512-Dp6zGqpTdETdR63lehJYPeIOqpiNBNtc7BpWSLrOje7UaIsE5aY92r/AunQA7rsXvet3lrJ3JnZX29UPTKXyKQ==} + dev: true + + /pend@1.2.0: + resolution: {integrity: sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==} + dev: true + + /periscopic@3.1.0: + resolution: {integrity: sha512-vKiQ8RRtkl9P+r/+oefh25C3fhybptkHKCZSPlcXiJux2tJF55GnEj3BVn4A5gKfq9NWWXXrxkHBwVPUfH0opw==} + dependencies: + '@types/estree': 1.0.5 + estree-walker: 3.0.3 + is-reference: 3.0.2 + dev: false + + /picocolors@1.0.0: + resolution: {integrity: sha512-1fygroTLlHu66zi26VoTDv8yRgm0Fccecssto+MhsZ0D/DGW2sm8E8AjW7NU5VVTRt5GxbeZ5qBuJr+HyLYkjQ==} + + /picomatch@2.3.1: + resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==} + engines: {node: '>=8.6'} + + /pirates@4.0.6: + resolution: {integrity: sha512-saLsH7WeYYPiD25LDuLRRY/i+6HaPYr6G1OUlN39otzkSTxKnubR9RTxS3/Kk50s1g2JTgFwWQDQyplC5/SHZg==} + engines: {node: '>= 6'} + dev: true + + /pkg-types@1.0.3: + resolution: {integrity: sha512-nN7pYi0AQqJnoLPC9eHFQ8AcyaixBUOwvqc5TDnIKCMEE6I0y8P7OKA7fPexsXGCGxQDl/cmrLAp26LhcwxZ4A==} + dependencies: + jsonc-parser: 3.2.0 + mlly: 1.4.2 + pathe: 1.1.1 + dev: true + + /postcss-load-config@3.1.4(postcss@8.4.31)(ts-node@10.9.1): + resolution: {integrity: sha512-6DiM4E7v4coTE4uzA8U//WhtPwyhiim3eyjEMFCnUpzbrkK9wJHgKDT2mR+HbtSrd/NubVaYTOpSpjUl8NQeRg==} + engines: {node: '>= 10'} + peerDependencies: + postcss: '>=8.0.9' + ts-node: '>=9.0.0' + peerDependenciesMeta: + postcss: + optional: true + ts-node: + optional: true + dependencies: + lilconfig: 2.1.0 + postcss: 8.4.31 + ts-node: 10.9.1(@types/node@20.9.3)(typescript@5.3.2) + yaml: 1.10.2 + + /postcss-safe-parser@6.0.0(postcss@8.4.31): + resolution: {integrity: sha512-FARHN8pwH+WiS2OPCxJI8FuRJpTVnn6ZNFiqAM2aeW2LwTHWWmWgIyKC6cUo0L8aeKiF/14MNvnpls6R2PBeMQ==} + engines: {node: '>=12.0'} + peerDependencies: + postcss: ^8.3.3 + dependencies: + postcss: 8.4.31 + dev: false + + /postcss-scss@4.0.9(postcss@8.4.31): + resolution: {integrity: sha512-AjKOeiwAitL/MXxQW2DliT28EKukvvbEWx3LBmJIRN8KfBGZbRTxNYW0kSqi1COiTZ57nZ9NW06S6ux//N1c9A==} + engines: {node: '>=12.0'} + peerDependencies: + postcss: ^8.4.29 + dependencies: + postcss: 8.4.31 + dev: false + + /postcss-selector-parser@6.0.13: + resolution: {integrity: sha512-EaV1Gl4mUEV4ddhDnv/xtj7sxwrwxdetHdWUGnT4VJQf+4d05v6lHYZr8N573k5Z0BViss7BDhfWtKS3+sfAqQ==} + engines: {node: '>=4'} + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + dev: false + + /postcss@8.4.31: + resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==} + engines: {node: ^10 || ^12 || >=14} + dependencies: + nanoid: 3.3.7 + picocolors: 1.0.0 + source-map-js: 1.0.2 + + /prelude-ls@1.2.1: + resolution: {integrity: sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==} + engines: {node: '>= 0.8.0'} + dev: false + + /prettier-linter-helpers@1.0.0: + resolution: {integrity: sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==} + engines: {node: '>=6.0.0'} + dependencies: + fast-diff: 1.3.0 + dev: false + + /prettier-plugin-svelte@3.1.1(prettier@3.1.0)(svelte@4.2.7): + resolution: {integrity: sha512-jLzaHfToav527/I5h2BMQfN3G5gylrJm54zFFyoXvUtfscI47877ftacUb+Eyse/3bXrhY+MtkyiuvruiHc+kg==} + peerDependencies: + prettier: ^3.0.0 + svelte: ^3.2.0 || ^4.0.0-next.0 || ^5.0.0-next.0 + dependencies: + prettier: 3.1.0 + svelte: 4.2.7 + dev: false + + /prettier@3.1.0: + resolution: {integrity: sha512-TQLvXjq5IAibjh8EpBIkNKxO749UEWABoiIZehEPiY4GNpVdhaFKqSTu+QrlU6D2dPAfubRmtJTi4K4YkQ5eXw==} + engines: {node: '>=14'} + hasBin: true + dev: false + + /pretty-format@27.5.1: + resolution: {integrity: sha512-Qb1gy5OrP5+zDf2Bvnzdl3jsTf1qXVMazbvCoKhtKqVs4/YK4ozX4gKQJJVyNe+cajNPn0KoC0MC3FUmaHWEmQ==} + engines: {node: ^10.13.0 || ^12.13.0 || ^14.15.0 || >=15.0.0} + dependencies: + ansi-regex: 5.0.1 + ansi-styles: 5.2.0 + react-is: 17.0.2 + dev: true + + /process-nextick-args@2.0.1: + resolution: {integrity: sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==} + dev: true + + /progress@2.0.3: + resolution: {integrity: sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==} + engines: {node: '>=0.4.0'} + dev: true + + /proxy-agent@6.3.0: + resolution: {integrity: sha512-0LdR757eTj/JfuU7TL2YCuAZnxWXu3tkJbg4Oq3geW/qFNT/32T0sp2HnZ9O0lMR4q3vwAt0+xCA8SR0WAD0og==} + engines: {node: '>= 14'} + dependencies: + agent-base: 7.1.0 + debug: 4.3.4 + http-proxy-agent: 7.0.0 + https-proxy-agent: 7.0.2 + lru-cache: 7.18.3 + pac-proxy-agent: 7.0.1 + proxy-from-env: 1.1.0 + socks-proxy-agent: 8.0.2 + transitivePeerDependencies: + - supports-color + dev: true + + /proxy-agent@6.3.1: + resolution: {integrity: sha512-Rb5RVBy1iyqOtNl15Cw/llpeLH8bsb37gM1FUfKQ+Wck6xHlbAhWGUFiTRHtkjqGTA5pSHz6+0hrPW/oECihPQ==} + engines: {node: '>= 14'} + dependencies: + agent-base: 7.1.0 + debug: 4.3.4 + http-proxy-agent: 7.0.0 + https-proxy-agent: 7.0.2 + lru-cache: 7.18.3 + pac-proxy-agent: 7.0.1 + proxy-from-env: 1.1.0 + socks-proxy-agent: 8.0.2 + transitivePeerDependencies: + - supports-color + dev: true + + /proxy-from-env@1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + dev: true + + /pump@3.0.0: + resolution: {integrity: sha512-LwZy+p3SFs1Pytd/jYct4wpv49HiYCqd9Rlc5ZVdk0V+8Yzv6jR5Blk3TRmPL1ft69TxP0IMZGJ+WPFU2BFhww==} + dependencies: + end-of-stream: 1.4.4 + once: 1.4.0 + dev: true + + /punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + + /puppeteer-core@20.9.0(typescript@5.3.2): + resolution: {integrity: sha512-H9fYZQzMTRrkboEfPmf7m3CLDN6JvbxXA3qTtS+dFt27tR+CsFHzPsT6pzp6lYL6bJbAPaR0HaPO6uSi+F94Pg==} + engines: {node: '>=16.3.0'} + peerDependencies: + typescript: '>= 4.7.4' + peerDependenciesMeta: + typescript: + optional: true + dependencies: + '@puppeteer/browsers': 1.4.6(typescript@5.3.2) + chromium-bidi: 0.4.16(devtools-protocol@0.0.1147663) + cross-fetch: 4.0.0 + debug: 4.3.4 + devtools-protocol: 0.0.1147663 + typescript: 5.3.2 + ws: 8.13.0 + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - utf-8-validate + dev: true + + /query-selector-shadow-dom@1.0.1: + resolution: {integrity: sha512-lT5yCqEBgfoMYpf3F2xQRK7zEr1rhIIZuceDK6+xRkJQ4NMbHTwXqk4NkwDwQMNqXgG9r9fyHnzwNVs6zV5KRw==} + dev: true + + /queue-microtask@1.2.3: + resolution: {integrity: sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==} + + /queue-tick@1.0.1: + resolution: {integrity: sha512-kJt5qhMxoszgU/62PLP1CJytzd2NKetjSRnyuj31fDd3Rlcz3fzlFdFLD1SItunPwyqEOkca6GbV612BWfaBag==} + dev: true + + /quick-lru@5.1.1: + resolution: {integrity: sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==} + engines: {node: '>=10'} + dev: true + + /react-is@17.0.2: + resolution: {integrity: sha512-w2GsyukL62IJnlaff/nRegPQR94C/XXamvMWmSHRJ4y7Ts/4ocGRmTHvOs8PSE6pB3dWOrD/nueuU5sduBsQ4w==} + dev: true + + /readable-stream@2.3.8: + resolution: {integrity: sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==} + dependencies: + core-util-is: 1.0.3 + inherits: 2.0.4 + isarray: 1.0.0 + process-nextick-args: 2.0.1 + safe-buffer: 5.1.2 + string_decoder: 1.1.1 + util-deprecate: 1.0.2 + dev: true + + /readable-stream@3.6.2: + resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} + engines: {node: '>= 6'} + dependencies: + inherits: 2.0.4 + string_decoder: 1.3.0 + util-deprecate: 1.0.2 + dev: true + + /readdir-glob@1.1.3: + resolution: {integrity: sha512-v05I2k7xN8zXvPD9N+z/uhXPaj0sUFCe2rcWZIpBsqxfP7xXFQ0tipAd/wjj1YxWyWtUS5IDJpOG82JKt2EAVA==} + dependencies: + minimatch: 5.1.6 + dev: true + + /readdirp@3.6.0: + resolution: {integrity: sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==} + engines: {node: '>=8.10.0'} + dependencies: + picomatch: 2.3.1 + dev: true + + /require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + dev: true + + /resolve-alpn@1.2.1: + resolution: {integrity: sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g==} + dev: true + + /resolve-from@4.0.0: + resolution: {integrity: sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==} + engines: {node: '>=4'} + dev: false + + /resolve-from@5.0.0: + resolution: {integrity: sha512-qYg9KP24dD5qka9J47d0aVky0N+b4fTU89LN9iDnjB5waksiC49rvMB0PrUJQGoTmH50XPiqOvAjDfaijGxYZw==} + engines: {node: '>=8'} + dev: true + + /resolve@1.22.8: + resolution: {integrity: sha512-oKWePCxqpd6FlLvGV1VU0x7bkPmmCNolxzjMf4NczoDnQcIWrAF+cPtZn5i6n+RfD2d9i0tzpKnG6Yk168yIyw==} + hasBin: true + dependencies: + is-core-module: 2.13.1 + path-parse: 1.0.7 + supports-preserve-symlinks-flag: 1.0.0 + + /responselike@3.0.0: + resolution: {integrity: sha512-40yHxbNcl2+rzXvZuVkrYohathsSJlMTXKryG5y8uciHv1+xDLHQpgjG64JUO9nrEq2jGLH6IZ8BcZyw3wrweg==} + engines: {node: '>=14.16'} + dependencies: + lowercase-keys: 3.0.0 + dev: true + + /resq@1.11.0: + resolution: {integrity: sha512-G10EBz+zAAy3zUd/CDoBbXRL6ia9kOo3xRHrMDsHljI0GDkhYlyjwoCx5+3eCC4swi1uCoZQhskuJkj7Gp57Bw==} + dependencies: + fast-deep-equal: 2.0.1 + dev: true + + /reusify@1.0.4: + resolution: {integrity: sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==} + engines: {iojs: '>=1.0.0', node: '>=0.10.0'} + + /rgb2hex@0.2.5: + resolution: {integrity: sha512-22MOP1Rh7sAo1BZpDG6R5RFYzR2lYEgwq7HEmyW2qcsOqR2lQKmn+O//xV3YG/0rrhMC6KVX2hU+ZXuaw9a5bw==} + dev: true + + /rimraf@2.7.1: + resolution: {integrity: sha512-uWjbaKIK3T1OSVptzX7Nl6PvQ3qAGtKEtVRjRuazjfL3Bx5eI409VZSqgND+4UNnmzLVdPj9FqFJNPqBZFve4w==} + hasBin: true + dependencies: + glob: 7.2.3 + dev: true + + /rimraf@3.0.2: + resolution: {integrity: sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==} + hasBin: true + dependencies: + glob: 7.2.3 + dev: false + + /rollup-plugin-inject@3.0.2: + resolution: {integrity: sha512-ptg9PQwzs3orn4jkgXJ74bfs5vYz1NCZlSQMBUA0wKcGp5i5pA1AO3fOUEte8enhGUC+iapTCzEWw2jEFFUO/w==} + deprecated: This package has been deprecated and is no longer maintained. Please use @rollup/plugin-inject. + dependencies: + estree-walker: 0.6.1 + magic-string: 0.25.9 + rollup-pluginutils: 2.8.2 + dev: true + + /rollup-plugin-node-polyfills@0.2.1: + resolution: {integrity: sha512-4kCrKPTJ6sK4/gLL/U5QzVT8cxJcofO0OU74tnB19F40cmuAKSzH5/siithxlofFEjwvw1YAhPmbvGNA6jEroA==} + dependencies: + rollup-plugin-inject: 3.0.2 + dev: true + + /rollup-pluginutils@2.8.2: + resolution: {integrity: sha512-EEp9NhnUkwY8aif6bxgovPHMoMoNr2FulJziTndpt5H9RdwC47GSGuII9XxpSdzVGM0GWrNPHV6ie1LTNJPaLQ==} + dependencies: + estree-walker: 0.6.1 + dev: true + + /rollup@2.79.1: + resolution: {integrity: sha512-uKxbd0IhMZOhjAiD5oAFp7BqvkA4Dv47qpOCtaNvng4HBwdbWtdOh8f5nZNuk2rp51PMGk3bzfWu5oayNEuYnw==} + engines: {node: '>=10.0.0'} + hasBin: true + optionalDependencies: + fsevents: 2.3.3 + dev: true + + /rollup@3.29.4: + resolution: {integrity: sha512-oWzmBZwvYrU0iJHtDmhsm662rC15FRXmcjCk1xD771dFDx5jJ02ufAQQTn0etB2emNk4J9EZg/yWKpsn9BWGRw==} + engines: {node: '>=14.18.0', npm: '>=8.0.0'} + hasBin: true + optionalDependencies: + fsevents: 2.3.3 + + /run-parallel@1.2.0: + resolution: {integrity: sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==} + dependencies: + queue-microtask: 1.2.3 + + /safaridriver@0.1.0: + resolution: {integrity: sha512-azzzIP3gR1TB9bVPv7QO4Zjw0rR1BWEU/s2aFdUMN48gxDjxEB13grAEuXDmkKPgE74cObymDxmAmZnL3clj4w==} + dev: true + + /safe-buffer@5.1.2: + resolution: {integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==} + dev: true + + /safe-buffer@5.2.1: + resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} + dev: true + + /semver@7.5.4: + resolution: {integrity: sha512-1bCSESV6Pv+i21Hvpxp3Dx+pSD8lIPt8uVjRrxAUt/nbswYc+tK6Y2btiULjd4+fnq15PX+nqQDC7Oft7WkwcA==} + engines: {node: '>=10'} + hasBin: true + dependencies: + lru-cache: 6.0.0 + + /serialize-error@11.0.3: + resolution: {integrity: sha512-2G2y++21dhj2R7iHAdd0FIzjGwuKZld+7Pl/bTU6YIkrC2ZMbVUjm+luj6A6V34Rv9XfKJDKpTWu9W4Gse1D9g==} + engines: {node: '>=14.16'} + dependencies: + type-fest: 2.19.0 + dev: true + + /setimmediate@1.0.5: + resolution: {integrity: sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==} + dev: true + + /shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + dependencies: + shebang-regex: 3.0.0 + + /shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + /siginfo@2.0.0: + resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} + dev: true + + /signal-exit@3.0.7: + resolution: {integrity: sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==} + dev: true + + /signal-exit@4.1.0: + resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==} + engines: {node: '>=14'} + dev: true + + /sirv@2.0.3: + resolution: {integrity: sha512-O9jm9BsID1P+0HOi81VpXPoDxYP374pkOLzACAoyUQ/3OUVndNpsz6wMnY2z+yOxzbllCKZrM+9QrWsv4THnyA==} + engines: {node: '>= 10'} + dependencies: + '@polka/url': 1.0.0-next.23 + mrmime: 1.0.1 + totalist: 3.0.1 + dev: true + + /slash@3.0.0: + resolution: {integrity: sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==} + engines: {node: '>=8'} + + /slice-ansi@5.0.0: + resolution: {integrity: sha512-FC+lgizVPfie0kkhqUScwRu1O/lF6NOgJmlCgK+/LYxDCTk8sGelYaHDhFcDN+Sn3Cv+3VSa4Byeo+IMCzpMgQ==} + engines: {node: '>=12'} + dependencies: + ansi-styles: 6.2.1 + is-fullwidth-code-point: 4.0.0 + dev: true + + /smart-buffer@4.2.0: + resolution: {integrity: sha512-94hK0Hh8rPqQl2xXc3HsaBoOXKV20MToPkcXvwbISWLEs+64sBq5kFgn2kJDHb1Pry9yrP0dxrCI9RRci7RXKg==} + engines: {node: '>= 6.0.0', npm: '>= 3.0.0'} + dev: true + + /socks-proxy-agent@8.0.2: + resolution: {integrity: sha512-8zuqoLv1aP/66PHF5TqwJ7Czm3Yv32urJQHrVyhD7mmA6d61Zv8cIXQYPTWwmg6qlupnPvs/QKDmfa4P/qct2g==} + engines: {node: '>= 14'} + dependencies: + agent-base: 7.1.0 + debug: 4.3.4 + socks: 2.7.1 + transitivePeerDependencies: + - supports-color + dev: true + + /socks@2.7.1: + resolution: {integrity: sha512-7maUZy1N7uo6+WVEX6psASxtNlKaNVMlGQKkG/63nEDdLOWNbiUMoLK7X4uYoLhQstau72mLgfEWcXcwsaHbYQ==} + engines: {node: '>= 10.13.0', npm: '>= 3.0.0'} + dependencies: + ip: 2.0.0 + smart-buffer: 4.2.0 + dev: true + + /source-map-js@1.0.2: + resolution: {integrity: sha512-R0XvVJ9WusLiqTCEiGCmICCMplcCkIwwR11mOSD9CR5u+IXYdiseeEuXCVAjS54zqwkLcPNnmU4OeJ6tUrWhDw==} + engines: {node: '>=0.10.0'} + + /source-map@0.6.1: + resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} + engines: {node: '>=0.10.0'} + dev: true + + /source-map@0.8.0-beta.0: + resolution: {integrity: sha512-2ymg6oRBpebeZi9UUNsgQ89bhx01TcTkmNTGnNO88imTmbSgy4nfujrgVEFKWpMTEGA11EDkTt7mqObTPdigIA==} + engines: {node: '>= 8'} + dependencies: + whatwg-url: 7.1.0 + dev: true + + /sourcemap-codec@1.4.8: + resolution: {integrity: sha512-9NykojV5Uih4lgo5So5dtw+f0JgJX30KCNI8gwhz2J9A15wD0Ml6tjHKwf6fTSa6fAdVBdZeNOs9eJ71qCk8vA==} + deprecated: Please use @jridgewell/sourcemap-codec instead + dev: true + + /split2@4.2.0: + resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==} + engines: {node: '>= 10.x'} + dev: true + + /stackback@0.0.2: + resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + dev: true + + /std-env@3.5.0: + resolution: {integrity: sha512-JGUEaALvL0Mf6JCfYnJOTcobY+Nc7sG/TemDRBqCA0wEr4DER7zDchaaixTlmOxAjG1uRJmX82EQcxwTQTkqVA==} + dev: true + + /streamx@2.15.5: + resolution: {integrity: sha512-9thPGMkKC2GctCzyCUjME3yR03x2xNo0GPKGkRw2UMYN+gqWa9uqpyNWhmsNCutU5zHmkUum0LsCRQTXUgUCAg==} + dependencies: + fast-fifo: 1.3.2 + queue-tick: 1.0.1 + dev: true + + /string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + dev: true + + /string-width@5.1.2: + resolution: {integrity: sha512-HnLOCR3vjcY8beoNLtcjZ5/nxn2afmME6lhrDrebokqMap+XbeW8n9TXpPDOqdGK5qcI3oT0GKTW6wC7EMiVqA==} + engines: {node: '>=12'} + dependencies: + eastasianwidth: 0.2.0 + emoji-regex: 9.2.2 + strip-ansi: 7.1.0 + dev: true + + /string_decoder@1.1.1: + resolution: {integrity: sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==} + dependencies: + safe-buffer: 5.1.2 + dev: true + + /string_decoder@1.3.0: + resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==} + dependencies: + safe-buffer: 5.2.1 + dev: true + + /strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + dependencies: + ansi-regex: 5.0.1 + + /strip-ansi@7.1.0: + resolution: {integrity: sha512-iq6eVVI64nQQTRYq2KtEg2d2uU7LElhTJwsH4YzIHZshxlgZms/wIc4VoDQTlG/IvVIrBKG06CrZnp0qv7hkcQ==} + engines: {node: '>=12'} + dependencies: + ansi-regex: 6.0.1 + dev: true + + /strip-final-newline@2.0.0: + resolution: {integrity: sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA==} + engines: {node: '>=6'} + dev: true + + /strip-json-comments@3.1.1: + resolution: {integrity: sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==} + engines: {node: '>=8'} + dev: false + + /strip-literal@1.3.0: + resolution: {integrity: sha512-PugKzOsyXpArk0yWmUwqOZecSO0GH0bPoctLcqNDH9J04pVW3lflYE0ujElBGTloevcxF5MofAOZ7C5l2b+wLg==} + dependencies: + acorn: 8.11.2 + dev: true + + /sucrase@3.34.0: + resolution: {integrity: sha512-70/LQEZ07TEcxiU2dz51FKaE6hCTWC6vr7FOk3Gr0U60C3shtAN+H+BFr9XlYe5xqf3RA8nrc+VIwzCfnxuXJw==} + engines: {node: '>=8'} + hasBin: true + dependencies: + '@jridgewell/gen-mapping': 0.3.3 + commander: 4.1.1 + glob: 7.1.6 + lines-and-columns: 1.2.4 + mz: 2.7.0 + pirates: 4.0.6 + ts-interface-checker: 0.1.13 + dev: true + + /supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + dependencies: + has-flag: 4.0.0 + + /supports-preserve-symlinks-flag@1.0.0: + resolution: {integrity: sha512-ot0WnXS9fgdkgIcePe6RHNk1WA8+muPa6cSjeR3V8K27q9BB1rTE3R1p7Hv0z1ZyAc8s6Vvv8DIyWf681MAt0w==} + engines: {node: '>= 0.4'} + + /svelte-eslint-parser@0.33.1(svelte@4.2.7): + resolution: {integrity: sha512-vo7xPGTlKBGdLH8T5L64FipvTrqv3OQRx9d2z5X05KKZDlF4rQk8KViZO4flKERY+5BiVdOh7zZ7JGJWo5P0uA==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + peerDependencies: + svelte: ^3.37.0 || ^4.0.0 + peerDependenciesMeta: + svelte: + optional: true + dependencies: + eslint-scope: 7.2.2 + eslint-visitor-keys: 3.4.3 + espree: 9.6.1 + postcss: 8.4.31 + postcss-scss: 4.0.9(postcss@8.4.31) + svelte: 4.2.7 + dev: false + + /svelte@4.2.7: + resolution: {integrity: sha512-UExR1KS7raTdycsUrKLtStayu4hpdV3VZQgM0akX8XbXgLBlosdE/Sf3crOgyh9xIjqSYB3UEBuUlIQKRQX2hg==} + engines: {node: '>=16'} + dependencies: + '@ampproject/remapping': 2.2.1 + '@jridgewell/sourcemap-codec': 1.4.15 + '@jridgewell/trace-mapping': 0.3.20 + acorn: 8.11.2 + aria-query: 5.3.0 + axobject-query: 3.2.1 + code-red: 1.0.4 + css-tree: 2.3.1 + estree-walker: 3.0.3 + is-reference: 3.0.2 + locate-character: 3.0.0 + magic-string: 0.30.5 + periscopic: 3.1.0 + dev: false + + /tar-fs@3.0.4: + resolution: {integrity: sha512-5AFQU8b9qLfZCX9zp2duONhPmZv0hGYiBPJsyUdqMjzq/mqVpy/rEUSeHk1+YitmxugaptgBh5oDGU3VsAJq4w==} + dependencies: + mkdirp-classic: 0.5.3 + pump: 3.0.0 + tar-stream: 3.1.6 + dev: true + + /tar-stream@3.1.6: + resolution: {integrity: sha512-B/UyjYwPpMBv+PaFSWAmtYjwdrlEaZQEhMIBFNC5oEG8lpiW8XjcSdmEaClj28ArfKScKHs2nshz3k2le6crsg==} + dependencies: + b4a: 1.6.4 + fast-fifo: 1.3.2 + streamx: 2.15.5 + dev: true + + /text-table@0.2.0: + resolution: {integrity: sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==} + dev: false + + /thenify-all@1.6.0: + resolution: {integrity: sha512-RNxQH/qI8/t3thXJDwcstUO4zeqo64+Uy/+sNVRBx4Xn2OX+OZ9oP+iJnNFqplFra2ZUVeKCSa2oVWi3T4uVmA==} + engines: {node: '>=0.8'} + dependencies: + thenify: 3.3.1 + dev: true + + /thenify@3.3.1: + resolution: {integrity: sha512-RVZSIV5IG10Hk3enotrhvz0T9em6cyHBLkH/YAZuKqd8hRkKhSfCGIcP2KUY0EPxndzANBmNllzWPwak+bheSw==} + dependencies: + any-promise: 1.3.0 + dev: true + + /through@2.3.8: + resolution: {integrity: sha512-w89qg7PI8wAdvX60bMDP+bFoD5Dvhm9oLheFp5O4a2QF0cSBGsBX4qZmadPMvVqlLJBBci+WqGGOAPvcDeNSVg==} + dev: true + + /tinybench@2.5.1: + resolution: {integrity: sha512-65NKvSuAVDP/n4CqH+a9w2kTlLReS9vhsAP06MWx+/89nMinJyB2icyl58RIcqCmIggpojIGeuJGhjU1aGMBSg==} + dev: true + + /tinypool@0.4.0: + resolution: {integrity: sha512-2ksntHOKf893wSAH4z/+JbPpi92esw8Gn9N2deXX+B0EO92hexAVI9GIZZPx7P5aYo5KULfeOSt3kMOmSOy6uA==} + engines: {node: '>=14.0.0'} + dev: true + + /tinyspy@1.1.1: + resolution: {integrity: sha512-UVq5AXt/gQlti7oxoIg5oi/9r0WpF7DGEVwXgqWSMmyN16+e3tl5lIvTaOpJ3TAtu5xFzWccFRM4R5NaWHF+4g==} + engines: {node: '>=14.0.0'} + dev: true + + /to-regex-range@5.0.1: + resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} + engines: {node: '>=8.0'} + dependencies: + is-number: 7.0.0 + + /totalist@3.0.1: + resolution: {integrity: sha512-sf4i37nQ2LBx4m3wB74y+ubopq6W/dIzXg0FDGjsYnZHVa1Da8FH853wlL2gtUhg+xJXjfk3kUZS3BRoQeoQBQ==} + engines: {node: '>=6'} + dev: true + + /tr46@0.0.3: + resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} + dev: true + + /tr46@1.0.1: + resolution: {integrity: sha512-dTpowEjclQ7Kgx5SdBkqRzVhERQXov8/l9Ft9dVM9fmg0W0KQSVaXX9T4i6twCPNtYiZM53lpSSUAwJbFPOHxA==} + dependencies: + punycode: 2.3.1 + dev: true + + /traverse@0.3.9: + resolution: {integrity: sha512-iawgk0hLP3SxGKDfnDJf8wTz4p2qImnyihM5Hh/sGvQ3K37dPi/w8sRhdNIxYA1TwFwc5mDhIJq+O0RsvXBKdQ==} + dev: true + + /tree-kill@1.2.2: + resolution: {integrity: sha512-L0Orpi8qGpRG//Nd+H90vFB+3iHnue1zSSGmNOOCh1GLJ7rUKVwV2HvijphGQS2UmhUZewS9VgvxYIdgr+fG1A==} + hasBin: true + dev: true + + /ts-interface-checker@0.1.13: + resolution: {integrity: sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA==} + dev: true + + /ts-node@10.9.1(@types/node@20.9.3)(typescript@5.3.2): + resolution: {integrity: sha512-NtVysVPkxxrwFGUUxGYhfux8k78pQB3JqYBXlLRZgdGUqTO5wU/UyHop5p70iEbGhB7q5KmiZiU0Y3KlJrScEw==} + hasBin: true + peerDependencies: + '@swc/core': '>=1.2.50' + '@swc/wasm': '>=1.2.50' + '@types/node': '*' + typescript: '>=2.7' + peerDependenciesMeta: + '@swc/core': + optional: true + '@swc/wasm': + optional: true + dependencies: + '@cspotcode/source-map-support': 0.8.1 + '@tsconfig/node10': 1.0.9 + '@tsconfig/node12': 1.0.11 + '@tsconfig/node14': 1.0.3 + '@tsconfig/node16': 1.0.4 + '@types/node': 20.9.3 + acorn: 8.11.2 + acorn-walk: 8.3.0 + arg: 4.1.3 + create-require: 1.1.1 + diff: 4.0.2 + make-error: 1.3.6 + typescript: 5.3.2 + v8-compile-cache-lib: 3.0.1 + yn: 3.1.1 + + /tslib@1.14.1: + resolution: {integrity: sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg==} + dev: false + + /tslib@2.6.2: + resolution: {integrity: sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q==} + dev: true + + /tsup@6.7.0(postcss@8.4.31)(ts-node@10.9.1)(typescript@5.3.2): + resolution: {integrity: sha512-L3o8hGkaHnu5TdJns+mCqFsDBo83bJ44rlK7e6VdanIvpea4ArPcU3swWGsLVbXak1PqQx/V+SSmFPujBK+zEQ==} + engines: {node: '>=14.18'} + hasBin: true + peerDependencies: + '@swc/core': ^1 + postcss: ^8.4.12 + typescript: '>=4.1.0' + peerDependenciesMeta: + '@swc/core': + optional: true + postcss: + optional: true + typescript: + optional: true + dependencies: + bundle-require: 4.0.2(esbuild@0.17.19) + cac: 6.7.14 + chokidar: 3.5.3 + debug: 4.3.4 + esbuild: 0.17.19 + execa: 5.1.1 + globby: 11.1.0 + joycon: 3.1.1 + postcss: 8.4.31 + postcss-load-config: 3.1.4(postcss@8.4.31)(ts-node@10.9.1) + resolve-from: 5.0.0 + rollup: 3.29.4 + source-map: 0.8.0-beta.0 + sucrase: 3.34.0 + tree-kill: 1.2.2 + typescript: 5.3.2 + transitivePeerDependencies: + - supports-color + - ts-node + dev: true + + /tsutils@3.21.0(typescript@5.3.2): + resolution: {integrity: sha512-mHKK3iUXL+3UF6xL5k0PEhKRUBKPBCv/+RkEOpjRWxxx27KKRBmmA60A9pgOUvMi8GKhRMPEmjBRPzs2W7O1OA==} + engines: {node: '>= 6'} + peerDependencies: + typescript: '>=2.8.0 || >= 3.2.0-dev || >= 3.3.0-dev || >= 3.4.0-dev || >= 3.5.0-dev || >= 3.6.0-dev || >= 3.6.0-beta || >= 3.7.0-dev || >= 3.7.0-beta' + dependencies: + tslib: 1.14.1 + typescript: 5.3.2 + dev: false + + /type-check@0.4.0: + resolution: {integrity: sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==} + engines: {node: '>= 0.8.0'} + dependencies: + prelude-ls: 1.2.1 + dev: false + + /type-detect@4.0.8: + resolution: {integrity: sha512-0fr/mIH1dlO+x7TlcMy+bIDqKPsw/70tVyeHW787goQjhmqaZe10uwLujubK9q9Lg6Fiho1KUKDYz0Z7k7g5/g==} + engines: {node: '>=4'} + dev: true + + /type-fest@0.20.2: + resolution: {integrity: sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==} + engines: {node: '>=10'} + dev: false + + /type-fest@2.13.0: + resolution: {integrity: sha512-lPfAm42MxE4/456+QyIaaVBAwgpJb6xZ8PRu09utnhPdWwcyj9vgy6Sq0Z5yNbJ21EdxB5dRU/Qg8bsyAMtlcw==} + engines: {node: '>=12.20'} + dev: true + + /type-fest@2.19.0: + resolution: {integrity: sha512-RAH822pAdBgcNMAfWnCBU3CFZcfZ/i1eZjwFU/dsLKumyuuP3niueg2UAukXYF0E2AAoc82ZSSf9J0WQBinzHA==} + engines: {node: '>=12.20'} + dev: true + + /typescript@5.3.2: + resolution: {integrity: sha512-6l+RyNy7oAHDfxC4FzSJcz9vnjTKxrLpDG5M2Vu4SHRVNg6xzqZp6LYSR9zjqQTu8DU/f5xwxUdADOkbrIX2gQ==} + engines: {node: '>=14.17'} + hasBin: true + + /ufo@1.3.2: + resolution: {integrity: sha512-o+ORpgGwaYQXgqGDwd+hkS4PuZ3QnmqMMxRuajK/a38L6fTpcE5GPIfrf+L/KemFzfUpeUQc1rRS1iDBozvnFA==} + dev: true + + /unbzip2-stream@1.4.3: + resolution: {integrity: sha512-mlExGW4w71ebDJviH16lQLtZS32VKqsSfk80GCfUlwT/4/hNRFsoscrF/c++9xinkMzECL1uL9DDwXqFWkruPg==} + dependencies: + buffer: 5.7.1 + through: 2.3.8 + dev: true + + /undici-types@5.26.5: + resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} + + /universalify@0.1.2: + resolution: {integrity: sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==} + engines: {node: '>= 4.0.0'} + dev: true + + /unzipper@0.10.14: + resolution: {integrity: sha512-ti4wZj+0bQTiX2KmKWuwj7lhV+2n//uXEotUmGuQqrbVZSEGFMbI68+c6JCQ8aAmUWYvtHEz2A8K6wXvueR/6g==} + dependencies: + big-integer: 1.6.52 + binary: 0.3.0 + bluebird: 3.4.7 + buffer-indexof-polyfill: 1.0.2 + duplexer2: 0.1.4 + fstream: 1.0.12 + graceful-fs: 4.2.11 + listenercount: 1.0.1 + readable-stream: 2.3.8 + setimmediate: 1.0.5 + dev: true + + /uri-js@4.4.1: + resolution: {integrity: sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==} + dependencies: + punycode: 2.3.1 + dev: false + + /userhome@1.0.0: + resolution: {integrity: sha512-ayFKY3H+Pwfy4W98yPdtH1VqH4psDeyW8lYYFzfecR9d6hqLpqhecktvYR3SEEXt7vG0S1JEpciI3g94pMErig==} + engines: {node: '>= 0.8.0'} + dev: true + + /util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + /v8-compile-cache-lib@3.0.1: + resolution: {integrity: sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg==} + + /vite-node@0.29.8(@types/node@20.9.3): + resolution: {integrity: sha512-b6OtCXfk65L6SElVM20q5G546yu10/kNrhg08afEoWlFRJXFq9/6glsvSVY+aI6YeC1tu2TtAqI2jHEQmOmsFw==} + engines: {node: '>=v14.16.0'} + hasBin: true + dependencies: + cac: 6.7.14 + debug: 4.3.4 + mlly: 1.4.2 + pathe: 1.1.1 + picocolors: 1.0.0 + vite: 4.1.4(@types/node@20.9.3) + transitivePeerDependencies: + - '@types/node' + - less + - sass + - stylus + - sugarss + - supports-color + - terser + dev: true + + /vite@4.1.4(@types/node@20.9.3): + resolution: {integrity: sha512-3knk/HsbSTKEin43zHu7jTwYWv81f8kgAL99G5NWBcA1LKvtvcVAC4JjBH1arBunO9kQka+1oGbrMKOjk4ZrBg==} + engines: {node: ^14.18.0 || >=16.0.0} + hasBin: true + peerDependencies: + '@types/node': '>= 14' + less: '*' + sass: '*' + stylus: '*' + sugarss: '*' + terser: ^5.4.0 + peerDependenciesMeta: + '@types/node': + optional: true + less: + optional: true + sass: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + dependencies: + '@types/node': 20.9.3 + esbuild: 0.16.17 + postcss: 8.4.31 + resolve: 1.22.8 + rollup: 3.29.4 + optionalDependencies: + fsevents: 2.3.3 + + /vitest@0.29.8(@vitest/browser@0.29.8)(webdriverio@8.23.4): + resolution: {integrity: sha512-JIAVi2GK5cvA6awGpH0HvH/gEG9PZ0a/WoxdiV3PmqK+3CjQMf8c+J/Vhv4mdZ2nRyXFw66sAg6qz7VNkaHfDQ==} + engines: {node: '>=v14.16.0'} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@vitest/browser': '*' + '@vitest/ui': '*' + happy-dom: '*' + jsdom: '*' + playwright: '*' + safaridriver: '*' + webdriverio: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@vitest/browser': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + playwright: + optional: true + safaridriver: + optional: true + webdriverio: + optional: true + dependencies: + '@types/chai': 4.3.11 + '@types/chai-subset': 1.3.5 + '@types/node': 20.9.3 + '@vitest/browser': 0.29.8(vitest@0.29.8) + '@vitest/expect': 0.29.8 + '@vitest/runner': 0.29.8 + '@vitest/spy': 0.29.8 + '@vitest/utils': 0.29.8 + acorn: 8.11.2 + acorn-walk: 8.3.0 + cac: 6.7.14 + chai: 4.3.10 + debug: 4.3.4 + local-pkg: 0.4.3 + pathe: 1.1.1 + picocolors: 1.0.0 + source-map: 0.6.1 + std-env: 3.5.0 + strip-literal: 1.3.0 + tinybench: 2.5.1 + tinypool: 0.4.0 + tinyspy: 1.1.1 + vite: 4.1.4(@types/node@20.9.3) + vite-node: 0.29.8(@types/node@20.9.3) + webdriverio: 8.23.4(typescript@5.3.2) + why-is-node-running: 2.2.2 + transitivePeerDependencies: + - less + - sass + - stylus + - sugarss + - supports-color + - terser + dev: true + + /wait-port@1.1.0: + resolution: {integrity: sha512-3e04qkoN3LxTMLakdqeWth8nih8usyg+sf1Bgdf9wwUkp05iuK1eSY/QpLvscT/+F/gA89+LpUmmgBtesbqI2Q==} + engines: {node: '>=10'} + hasBin: true + dependencies: + chalk: 4.1.2 + commander: 9.5.0 + debug: 4.3.4 + transitivePeerDependencies: + - supports-color + dev: true + + /web-streams-polyfill@3.2.1: + resolution: {integrity: sha512-e0MO3wdXWKrLbL0DgGnUV7WHVuw9OUvL4hjgnPkIeEvESk74gAITi5G606JtZPp39cd8HA9VQzCIvA49LpPN5Q==} + engines: {node: '>= 8'} + dev: true + + /webdriver@8.23.1: + resolution: {integrity: sha512-0PLN6cqP5cSorZBU2OBk2XKhxKpWWKzvClHBiGCqZIuofZ3kPTq7uYFapej0c4xFmKXHEiLIN7Qkt4H3gWTs8g==} + engines: {node: ^16.13 || >=18} + dependencies: + '@types/node': 20.9.3 + '@types/ws': 8.5.10 + '@wdio/config': 8.23.1 + '@wdio/logger': 8.16.17 + '@wdio/protocols': 8.23.0 + '@wdio/types': 8.23.1 + '@wdio/utils': 8.23.1 + deepmerge-ts: 5.1.0 + got: 12.6.1 + ky: 0.33.3 + ws: 8.14.2 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + dev: true + + /webdriverio@8.23.4(typescript@5.3.2): + resolution: {integrity: sha512-tlma460ls27zv5Z+WHZG99SJrgcIZi4jsFrZeCCPZTtspOvXoqImL7g6orJTOJXVMhqptkFZN16zHONuAoXV5Q==} + engines: {node: ^16.13 || >=18} + peerDependencies: + devtools: ^8.14.0 + peerDependenciesMeta: + devtools: + optional: true + dependencies: + '@types/node': 20.9.3 + '@wdio/config': 8.23.1 + '@wdio/logger': 8.16.17 + '@wdio/protocols': 8.23.0 + '@wdio/repl': 8.23.1 + '@wdio/types': 8.23.1 + '@wdio/utils': 8.23.1 + archiver: 6.0.1 + aria-query: 5.3.0 + css-shorthand-properties: 1.1.1 + css-value: 0.0.1 + devtools-protocol: 0.0.1213968 + grapheme-splitter: 1.0.4 + import-meta-resolve: 3.1.1 + is-plain-obj: 4.1.0 + lodash.clonedeep: 4.5.0 + lodash.zip: 4.2.0 + minimatch: 9.0.3 + puppeteer-core: 20.9.0(typescript@5.3.2) + query-selector-shadow-dom: 1.0.1 + resq: 1.11.0 + rgb2hex: 0.2.5 + serialize-error: 11.0.3 + webdriver: 8.23.1 + transitivePeerDependencies: + - bufferutil + - encoding + - supports-color + - typescript + - utf-8-validate + dev: true + + /webidl-conversions@3.0.1: + resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} + dev: true + + /webidl-conversions@4.0.2: + resolution: {integrity: sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==} + dev: true + + /whatwg-url@5.0.0: + resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + dependencies: + tr46: 0.0.3 + webidl-conversions: 3.0.1 + dev: true + + /whatwg-url@7.1.0: + resolution: {integrity: sha512-WUu7Rg1DroM7oQvGWfOiAK21n74Gg+T4elXEQYkOhtyLeWiJFoOGLXPKI/9gzIie9CtwVLm8wtw6YJdKyxSjeg==} + dependencies: + lodash.sortby: 4.7.0 + tr46: 1.0.1 + webidl-conversions: 4.0.2 + dev: true + + /which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + dependencies: + isexe: 2.0.0 + + /which@4.0.0: + resolution: {integrity: sha512-GlaYyEb07DPxYCKhKzplCWBJtvxZcZMrL+4UkrTSJHHPyZU4mYYTv3qaOe77H7EODLSSopAUFAc6W8U4yqvscg==} + engines: {node: ^16.13.0 || >=18.0.0} + hasBin: true + dependencies: + isexe: 3.1.1 + dev: true + + /why-is-node-running@2.2.2: + resolution: {integrity: sha512-6tSwToZxTOcotxHeA+qGCq1mVzKR3CwcJGmVcY+QE8SHy6TnpFnh8PAvPNHYr7EcuVeG0QSMxtYCuO1ta/G/oA==} + engines: {node: '>=8'} + hasBin: true + dependencies: + siginfo: 2.0.0 + stackback: 0.0.2 + dev: true + + /wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + dev: true + + /wrap-ansi@8.1.0: + resolution: {integrity: sha512-si7QWI6zUMq56bESFvagtmzMdGOtoxfR+Sez11Mobfc7tm+VkUckk9bW2UeffTGVUbOksxmSw0AA2gs8g71NCQ==} + engines: {node: '>=12'} + dependencies: + ansi-styles: 6.2.1 + string-width: 5.1.2 + strip-ansi: 7.1.0 + dev: true + + /wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + /ws@8.13.0: + resolution: {integrity: sha512-x9vcZYTrFPC7aSIbj7sRCYo7L/Xb8Iy+pW0ng0wt2vCJv7M9HOMy0UoN3rr+IFC7hb7vXoqS+P9ktyLLLhO+LA==} + engines: {node: '>=10.0.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: '>=5.0.2' + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + dev: true + + /ws@8.14.2: + resolution: {integrity: sha512-wEBG1ftX4jcglPxgFCMJmZ2PLtSbJ2Peg6TmpJFTbe9GZYOQCDPdMYu/Tm0/bGZkw8paZnJY45J4K2PZrLYq8g==} + engines: {node: '>=10.0.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: '>=5.0.2' + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + dev: true + + /y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: '>=10'} + dev: true + + /yallist@4.0.0: + resolution: {integrity: sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==} + + /yaml@1.10.2: + resolution: {integrity: sha512-r3vXyErRCYJ7wg28yvBY5VSoAF8ZvlcW9/BwUzEtUsjvX/DKs24dIkuwjtuprwJJHsbyUbLApepYTR1BN4uHrg==} + engines: {node: '>= 6'} + + /yargs-parser@21.1.1: + resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} + engines: {node: '>=12'} + dev: true + + /yargs@17.7.1: + resolution: {integrity: sha512-cwiTb08Xuv5fqF4AovYacTFNxk62th7LKJ6BL9IGUpTJrWoU7/7WdQGTP2SjKf1dUNBGzDd28p/Yfs/GI6JrLw==} + engines: {node: '>=12'} + dependencies: + cliui: 8.0.1 + escalade: 3.1.1 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + dev: true + + /yargs@17.7.2: + resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==} + engines: {node: '>=12'} + dependencies: + cliui: 8.0.1 + escalade: 3.1.1 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + dev: true + + /yauzl@2.10.0: + resolution: {integrity: sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==} + dependencies: + buffer-crc32: 0.2.13 + fd-slicer: 1.1.0 + dev: true + + /yn@3.1.1: + resolution: {integrity: sha512-Ux4ygGWsu2c7isFWe8Yu1YluJmqVhxqK2cLXNQA5AcC3QfbGNpM7fu0Y8b/z16pXLnFxZYvWhd3fhBY9DLmC6Q==} + engines: {node: '>=6'} + + /yocto-queue@0.1.0: + resolution: {integrity: sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==} + engines: {node: '>=10'} + dev: false + + /yocto-queue@1.0.0: + resolution: {integrity: sha512-9bnSc/HEW2uRy67wc+T8UwauLuPJVn28jb+GtJY16iiKWyvmYJRXVT4UamsAEGQfPohgr2q4Tq0sQbQlxTfi1g==} + engines: {node: '>=12.20'} + dev: true + + /zip-stream@5.0.1: + resolution: {integrity: sha512-UfZ0oa0C8LI58wJ+moL46BDIMgCQbnsb+2PoiJYtonhBsMh2bq1eRBVkvjfVsqbEHd9/EgKPUuL9saSSsec8OA==} + engines: {node: '>= 12.0.0'} + dependencies: + archiver-utils: 4.0.1 + compress-commons: 5.0.1 + readable-stream: 3.6.2 + dev: true diff --git a/pnpm-lock.yml b/pnpm-lock.yml new file mode 100644 index 0000000000000000000000000000000000000000..c45d934995f1cbfc2c765e4d65c09256e54c0775 --- /dev/null +++ b/pnpm-lock.yml @@ -0,0 +1,30 @@ +{ + "license": "MIT", + "packageManager": "pnpm@8.10.5", + "dependencies": { + "@typescript-eslint/eslint-plugin": "^5.51.0", + "@typescript-eslint/parser": "^5.51.0", + "eslint": "^8.35.0", + "eslint-config-prettier": "^9.0.0", + "eslint-plugin-prettier": "^4.2.1", + "eslint-plugin-svelte": "^2.30.0", + "prettier": "^3.0.0", + "prettier-plugin-svelte": "^3.0.0", + "typescript": "^5.0.0", + "vite": "4.1.4" + }, + "scripts": { + "lint": "eslint --quiet --fix --ext .cjs,.ts .eslintrc.cjs", + "lint:check": "eslint --ext .cjs,.ts .eslintrc.cjs", + "format": "prettier --write package.json .prettierrc .vscode .eslintrc.cjs e2e .github *.md", + "format:check": "prettier --check package.json .prettierrc .vscode .eslintrc.cjs .github *.md" + }, + "devDependencies": { + "@vitest/browser": "^0.29.7", + "semver": "^7.5.0", + "ts-node": "^10.9.1", + "tsup": "^6.7.0", + "vitest": "^0.29.4", + "webdriverio": "^8.6.7" + } +}