diff --git a/README.md b/README.md index 9df9d33ea3a0c91eb0ae42a94bc02718cadc64d4..0196cef4b1307f952ee913dc894d75e3b1638444 100644 --- a/README.md +++ b/README.md @@ -36,6 +36,7 @@ You can propose bounties attached to issues (eg `"add support for Adobe Premiere There are various platforms to do this, I propose to try Boss as it has low fees: [boss.dev](https://www.boss.dev/doc/#create-bounties) +Please use good judgment if you take on a bounty. Post a message in the GitHub issuek, communicate on our Discord that you are going to take it etc. ## For developers diff --git a/package-lock.json b/package-lock.json index 736188c50866a45ac1eddf1014850543d5d81927..46445baf0b95832c2bde0c5023ca26e1b4f83d69 100644 --- a/package-lock.json +++ b/package-lock.json @@ -10,8 +10,9 @@ "dependencies": { "@aitube/clap": "0.0.27", "@aitube/engine": "0.0.24", - "@aitube/timeline": "0.0.26", + "@aitube/timeline": "0.0.29", "@fal-ai/serverless-client": "^0.10.3", + "@gradio/client": "^1.1.1", "@huggingface/hub": "^0.15.1", "@huggingface/inference": "^2.7.0", "@langchain/anthropic": "^0.2.0", @@ -56,14 +57,12 @@ "@upstash/ratelimit": "^1.1.3", "@upstash/redis": "^1.31.1", "autoprefixer": "10.4.17", - "axios": "^1.7.2", "class-variance-authority": "^0.7.0", "clsx": "^2.1.0", "cmdk": "^0.2.1", "eslint": "8.57.0", "eslint-config-next": "14.1.0", "fluent-ffmpeg": "^2.1.3", - "form-data": "^4.0.0", "fs-extra": "^11.2.0", "lucide-react": "^0.334.0", "mlt-xml": "^2.0.2", @@ -125,9 +124,9 @@ } }, "node_modules/@aitube/timeline": { - "version": "0.0.26", - "resolved": "https://registry.npmjs.org/@aitube/timeline/-/timeline-0.0.26.tgz", - "integrity": "sha512-uFC1oF86g2/kWL6eG5bEy1Z1zJUp27t/zkx35o9cnJ0zgNGYMq5XgvZJ91SYdC8TBvKBRosbZroIOx+7EraCRw==", + "version": "0.0.29", + "resolved": "https://registry.npmjs.org/@aitube/timeline/-/timeline-0.0.29.tgz", + "integrity": "sha512-m2SD8iLyH8ZmiGmtZKJn8vOdTyIsP5DVcy0G0BcOpdE4magNtrUYho+fD+bCyYWr8Io4C5GNWyNQzdHRCR8aqw==", "dependencies": { "date-fns": "^3.6.0", "react-virtualized-auto-sizer": "^1.0.24" @@ -1518,6 +1517,22 @@ "node": ">=6.9.0" } }, + "node_modules/@bundled-es-modules/cookie": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/@bundled-es-modules/cookie/-/cookie-2.0.0.tgz", + "integrity": "sha512-Or6YHg/kamKHpxULAdSqhGqnWFneIXu1NKvvfBBzKGwpVsYuFIQ5aBPHDnnoR3ghW1nvSkALd+EF9iMtY7Vjxw==", + "dependencies": { + "cookie": "^0.5.0" + } + }, + "node_modules/@bundled-es-modules/statuses": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@bundled-es-modules/statuses/-/statuses-1.0.1.tgz", + "integrity": "sha512-yn7BklA5acgcBr+7w064fGV+SGIFySjCKpqjcWgBAIfrAkY+4GQTJJHQMeT3V/sgz23VTEVV8TtOmkvJAhFVfg==", + "dependencies": { + "statuses": "^2.0.1" + } + }, "node_modules/@cspotcode/source-map-support": { "version": "0.8.1", "resolved": "https://registry.npmjs.org/@cspotcode/source-map-support/-/source-map-support-0.8.1.tgz", @@ -1670,6 +1685,25 @@ "resolved": "https://registry.npmjs.org/@floating-ui/utils/-/utils-0.2.2.tgz", "integrity": "sha512-J4yDIIthosAsRZ5CPYP/jQvUAQtlZTTD/4suA08/FEnlxqW3sKS9iAhgsa9VYLZ6vDHn/ixJgIqRQPotoBjxIw==" }, + "node_modules/@gradio/client": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@gradio/client/-/client-1.1.1.tgz", + "integrity": "sha512-6Hzc+/wmNRkodefp0bvfOBQbEakwg31Ye9IVOjfoHkvunHoIzjupM+1m7VwhQXt8pMCjOw8Hc7zhvwnwy6f5GQ==", + "dependencies": { + "@types/eventsource": "^1.1.15", + "bufferutil": "^4.0.7", + "eventsource": "^2.0.2", + "fetch-event-stream": "^0.1.5", + "msw": "^2.2.1", + "semiver": "^1.1.0", + "textlinestream": "^1.1.1", + "typescript": "^5.0.0", + "ws": "^8.13.0" + }, + "engines": { + "node": ">=18.0.0" + } + }, "node_modules/@huggingface/hub": { "version": "0.15.1", "resolved": "https://registry.npmjs.org/@huggingface/hub/-/hub-0.15.1.tgz", @@ -2161,6 +2195,144 @@ "url": "https://opencollective.com/libvips" } }, + "node_modules/@inquirer/confirm": { + "version": "3.1.9", + "resolved": "https://registry.npmjs.org/@inquirer/confirm/-/confirm-3.1.9.tgz", + "integrity": "sha512-UF09aejxCi4Xqm6N/jJAiFXArXfi9al52AFaSD+2uIHnhZGtd1d6lIGTRMPouVSJxbGEi+HkOWSYaiEY/+szUw==", + "dependencies": { + "@inquirer/core": "^8.2.2", + "@inquirer/type": "^1.3.3" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@inquirer/core": { + "version": "8.2.2", + "resolved": "https://registry.npmjs.org/@inquirer/core/-/core-8.2.2.tgz", + "integrity": "sha512-K8SuNX45jEFlX3EBJpu9B+S2TISzMPGXZIuJ9ME924SqbdW6Pt6fIkKvXg7mOEOKJ4WxpQsxj0UTfcL/A434Ww==", + "dependencies": { + "@inquirer/figures": "^1.0.3", + "@inquirer/type": "^1.3.3", + "@types/mute-stream": "^0.0.4", + "@types/node": "^20.12.13", + "@types/wrap-ansi": "^3.0.0", + "ansi-escapes": "^4.3.2", + "chalk": "^4.1.2", + "cli-spinners": "^2.9.2", + "cli-width": "^4.1.0", + "mute-stream": "^1.0.0", + "signal-exit": "^4.1.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^6.2.0" + }, + "engines": { + "node": ">=18" + } + }, + "node_modules/@inquirer/core/node_modules/@types/node": { + "version": "20.14.5", + "resolved": "https://registry.npmjs.org/@types/node/-/node-20.14.5.tgz", + "integrity": "sha512-aoRR+fJkZT2l0aGOJhuA8frnCSoNX6W7U2mpNq63+BxBIj5BQFt8rHy627kijCmm63ijdSdwvGgpUsU6MBsZZA==", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@inquirer/core/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/@inquirer/core/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/@inquirer/core/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/@inquirer/core/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/@inquirer/core/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@inquirer/core/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@inquirer/core/node_modules/wrap-ansi": { + "version": "6.2.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-6.2.0.tgz", + "integrity": "sha512-r6lPcBGxZXlIcymEu7InxDMhdW0KDxpLgoFLcguasxCaJ/SOIZwINatK9KY/tf+ZrlywOKU0UDj3ATXUBfxJXA==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/@inquirer/figures": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@inquirer/figures/-/figures-1.0.3.tgz", + "integrity": "sha512-ErXXzENMH5pJt5/ssXV0DfWUZqly8nGzf0UcBV9xTnP+KyffE2mqyxIMBrZ8ijQck2nU0TQm40EQB53YreyWHw==", + "engines": { + "node": ">=18" + } + }, + "node_modules/@inquirer/type": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/@inquirer/type/-/type-1.3.3.tgz", + "integrity": "sha512-xTUt0NulylX27/zMx04ZYar/kr1raaiFTVvQ5feljQsiAgdm0WPj4S73/ye0fbslh+15QrIuDvfCXTek7pMY5A==", + "engines": { + "node": ">=18" + } + }, "node_modules/@isaacs/cliui": { "version": "8.0.2", "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-8.0.2.tgz", @@ -2448,6 +2620,30 @@ "node": ">= 14" } }, + "node_modules/@mswjs/cookies": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/@mswjs/cookies/-/cookies-1.1.1.tgz", + "integrity": "sha512-W68qOHEjx1iD+4VjQudlx26CPIoxmIAtK4ZCexU0/UJBG6jYhcuyzKJx+Iw8uhBIGd9eba64XgWVgo20it1qwA==", + "engines": { + "node": ">=18" + } + }, + "node_modules/@mswjs/interceptors": { + "version": "0.29.1", + "resolved": "https://registry.npmjs.org/@mswjs/interceptors/-/interceptors-0.29.1.tgz", + "integrity": "sha512-3rDakgJZ77+RiQUuSK69t1F0m8BQKA8Vh5DCS5V0DWvNY67zob2JhhQrhCO0AKLGINTRSFd1tBaHcJTkhefoSw==", + "dependencies": { + "@open-draft/deferred-promise": "^2.2.0", + "@open-draft/logger": "^0.3.0", + "@open-draft/until": "^2.0.0", + "is-node-process": "^1.2.0", + "outvariant": "^1.2.1", + "strict-event-emitter": "^0.5.1" + }, + "engines": { + "node": ">=18" + } + }, "node_modules/@next/env": { "version": "14.2.4", "resolved": "https://registry.npmjs.org/@next/env/-/env-14.2.4.tgz", @@ -2628,6 +2824,25 @@ "node": ">= 8" } }, + "node_modules/@open-draft/deferred-promise": { + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/@open-draft/deferred-promise/-/deferred-promise-2.2.0.tgz", + "integrity": "sha512-CecwLWx3rhxVQF6V4bAgPS5t+So2sTbPgAzafKkVizyi7tlwpcFpdFqq+wqF2OwNBmqFuu6tOyouTuxgpMfzmA==" + }, + "node_modules/@open-draft/logger": { + "version": "0.3.0", + "resolved": "https://registry.npmjs.org/@open-draft/logger/-/logger-0.3.0.tgz", + "integrity": "sha512-X2g45fzhxH238HKO4xbSr7+wBS8Fvw6ixhTDuvLd5mqh6bJJCFAPwU9mPDxbcrRtfxv4u5IHCEH77BmxvXmmxQ==", + "dependencies": { + "is-node-process": "^1.2.0", + "outvariant": "^1.4.0" + } + }, + "node_modules/@open-draft/until": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/@open-draft/until/-/until-2.1.0.tgz", + "integrity": "sha512-U69T3ItWHvLwGg5eJ0n3I62nWuE6ilHlmz7zM0npLBRvPRd7e6NYmg54vvRtP5mZG7kZqZCFVdsTWo7BPtBujg==" + }, "node_modules/@pkgjs/parseargs": { "version": "0.11.0", "resolved": "https://registry.npmjs.org/@pkgjs/parseargs/-/parseargs-0.11.0.tgz", @@ -4677,11 +4892,21 @@ "resolved": "https://registry.npmjs.org/@tweenjs/tween.js/-/tween.js-23.1.2.tgz", "integrity": "sha512-kMCNaZCJugWI86xiEHaY338CU5JpD0B97p1j1IKNn/Zto8PgACjQx0UxbHjmOcLl/dDOBnItwD07KmCs75pxtQ==" }, + "node_modules/@types/cookie": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/@types/cookie/-/cookie-0.6.0.tgz", + "integrity": "sha512-4Kh9a6B2bQciAhf7FSuMRRkUWecJgJu9nPnx3yzpsfXX/c50REIqpHY4C82bXP90qrLtXtkDxTZosYO3UpOwlA==" + }, "node_modules/@types/draco3d": { "version": "1.4.10", "resolved": "https://registry.npmjs.org/@types/draco3d/-/draco3d-1.4.10.tgz", "integrity": "sha512-AX22jp8Y7wwaBgAixaSvkoG4M/+PlAcm3Qs4OW8yT9DM4xUpWKeFhLueTAyZF39pviAdcDdeJoACapiAceqNcw==" }, + "node_modules/@types/eventsource": { + "version": "1.1.15", + "resolved": "https://registry.npmjs.org/@types/eventsource/-/eventsource-1.1.15.tgz", + "integrity": "sha512-XQmGcbnxUNa06HR3VBVkc9+A2Vpi9ZyLJcdS5dwaQQ/4ZMWFO+5c90FnMUpbtMZwB/FChoYHwuVg8TvkECacTA==" + }, "node_modules/@types/fluent-ffmpeg": { "version": "2.1.24", "resolved": "https://registry.npmjs.org/@types/fluent-ffmpeg/-/fluent-ffmpeg-2.1.24.tgz", @@ -4715,6 +4940,14 @@ "@types/node": "*" } }, + "node_modules/@types/mute-stream": { + "version": "0.0.4", + "resolved": "https://registry.npmjs.org/@types/mute-stream/-/mute-stream-0.0.4.tgz", + "integrity": "sha512-CPM9nzrCPPJHQNA9keH9CVkVI+WR5kMa+7XEs5jcGQ0VoAGnLv242w8lIVgwAEfmE4oufJRaTc9PNLQl0ioAow==", + "dependencies": { + "@types/node": "*" + } + }, "node_modules/@types/node": { "version": "20.12.7", "resolved": "https://registry.npmjs.org/@types/node/-/node-20.12.7.tgz", @@ -4777,6 +5010,11 @@ "resolved": "https://registry.npmjs.org/@types/stats.js/-/stats.js-0.17.3.tgz", "integrity": "sha512-pXNfAD3KHOdif9EQXZ9deK82HVNaXP5ZIF5RP2QG6OQFNTaY2YIetfrE9t528vEreGQvEPRDDc8muaoYeK0SxQ==" }, + "node_modules/@types/statuses": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@types/statuses/-/statuses-2.0.5.tgz", + "integrity": "sha512-jmIUGWrAiwu3dZpxntxieC+1n/5c3mjrImkmOSQ2NC5uP6cYO4aAZDdSmRcI5C1oiTmqlZGHC+/NmJrKogbP5A==" + }, "node_modules/@types/three": { "version": "0.165.0", "resolved": "https://registry.npmjs.org/@types/three/-/three-0.165.0.tgz", @@ -4800,6 +5038,11 @@ "resolved": "https://registry.npmjs.org/@types/webxr/-/webxr-0.5.16.tgz", "integrity": "sha512-0E0Cl84FECtzrB4qG19TNTqpunw0F1YF0QZZnFMF6pDw1kNKJtrlTKlVB34stGIsHbZsYQ7H0tNjPfZftkHHoA==" }, + "node_modules/@types/wrap-ansi": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/@types/wrap-ansi/-/wrap-ansi-3.0.0.tgz", + "integrity": "sha512-ltIpx+kM7g/MLRZfkbL7EsCEjfzCcScLpkg37eXEtx5kmrAKBkTJwd1GIAjDSL8wTpM6Hzn5YO4pSb91BEwu1g==" + }, "node_modules/@typescript-eslint/parser": { "version": "6.21.0", "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", @@ -5046,6 +5289,31 @@ "url": "https://github.com/sponsors/epoberezkin" } }, + "node_modules/ansi-escapes": { + "version": "4.3.2", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.2.tgz", + "integrity": "sha512-gKXj5ALrKWQLsYG9jlTRmR/xKluxHV+Z9QEwNIgCfM1/uwPMCuzVVnh5mwTd+OuBZcwSIMbqssNWRm1lE51QaQ==", + "dependencies": { + "type-fest": "^0.21.3" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/ansi-escapes/node_modules/type-fest": { + "version": "0.21.3", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.21.3.tgz", + "integrity": "sha512-t0rzBq87m3fVcduHDUFhKmyyX+9eo6WQjZvf51Ea/M0Q7+T374Jp1aUiyUl0GKxp8M/OETVHSDvmkyPgvX+X2w==", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/ansi-regex": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", @@ -5365,16 +5633,6 @@ "node": ">=4" } }, - "node_modules/axios": { - "version": "1.7.2", - "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.2.tgz", - "integrity": "sha512-2A8QhOMrbomlDuiLeK9XibIBzuHeRcqqNOHp0Cyp5EoJ1IFDh+XZH3A6BkXtv0K4gFGCI0Y4BM7B1wOEi0Rmgw==", - "dependencies": { - "follow-redirects": "^1.15.6", - "form-data": "^4.0.0", - "proxy-from-env": "^1.1.0" - } - }, "node_modules/axobject-query": { "version": "3.2.1", "resolved": "https://registry.npmjs.org/axobject-query/-/axobject-query-3.2.1.tgz", @@ -5548,6 +5806,18 @@ "resolved": "https://registry.npmjs.org/buffer-equal-constant-time/-/buffer-equal-constant-time-1.0.1.tgz", "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==" }, + "node_modules/bufferutil": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/bufferutil/-/bufferutil-4.0.8.tgz", + "integrity": "sha512-4T53u4PdgsXqKaIctwF8ifXlRTTmEPJ8iEPWFdGZvcf7sbwYo6FKFEX9eNNAnzFZ7EzJAQ3CJeOtCRA4rDp7Pw==", + "hasInstallScript": true, + "dependencies": { + "node-gyp-build": "^4.3.0" + }, + "engines": { + "node": ">=6.14.2" + } + }, "node_modules/busboy": { "version": "1.6.0", "resolved": "https://registry.npmjs.org/busboy/-/busboy-1.6.0.tgz", @@ -5741,11 +6011,80 @@ "url": "https://github.com/sponsors/sindresorhus" } }, + "node_modules/cli-width": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz", + "integrity": "sha512-ouuZd4/dm2Sw5Gmqy6bGyNNNe1qt9RpmxveLSO7KcgsTnU7RXfsw+/bukWGo1abgBiMAic068rclZsO4IWmmxQ==", + "engines": { + "node": ">= 12" + } + }, "node_modules/client-only": { "version": "0.0.1", "resolved": "https://registry.npmjs.org/client-only/-/client-only-0.0.1.tgz", "integrity": "sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==" }, + "node_modules/cliui": { + "version": "8.0.1", + "resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz", + "integrity": "sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==", + "dependencies": { + "string-width": "^4.2.0", + "strip-ansi": "^6.0.1", + "wrap-ansi": "^7.0.0" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/cliui/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/cliui/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/cliui/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/cliui/node_modules/wrap-ansi": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", + "integrity": "sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==", + "dependencies": { + "ansi-styles": "^4.0.0", + "string-width": "^4.1.0", + "strip-ansi": "^6.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/wrap-ansi?sponsor=1" + } + }, "node_modules/clsx": { "version": "2.1.1", "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.1.tgz", @@ -6124,6 +6463,14 @@ "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", "peer": true }, + "node_modules/cookie": { + "version": "0.5.0", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.5.0.tgz", + "integrity": "sha512-YZ3GUyn/o8gfKJlnlX7g7xq4gyO6OSuhGPKaaGssGB2qgDUS0gPgtTvoyZLTt9Ab6dC4hfc9dV5arkvc/OCmrw==", + "engines": { + "node": ">= 0.6" + } + }, "node_modules/create-require": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/create-require/-/create-require-1.1.1.tgz", @@ -7192,6 +7539,14 @@ "node": ">=0.8.x" } }, + "node_modules/eventsource": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/eventsource/-/eventsource-2.0.2.tgz", + "integrity": "sha512-IzUmBGPR3+oUG9dUeXynyNmf91/3zUSJg1lCktzKw47OXuhco54U3r9B7O4XX+Rb1Itm9OZ2b0RkTs10bICOxA==", + "engines": { + "node": ">=12.0.0" + } + }, "node_modules/eventsource-parser": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-1.1.2.tgz", @@ -7292,6 +7647,11 @@ "reusify": "^1.0.4" } }, + "node_modules/fetch-event-stream": { + "version": "0.1.5", + "resolved": "https://registry.npmjs.org/fetch-event-stream/-/fetch-event-stream-0.1.5.tgz", + "integrity": "sha512-V1PWovkspxQfssq/NnxoEyQo1DV+MRK/laPuPblIZmSjMN8P5u46OhlFQznSr9p/t0Sp8Uc6SbM3yCMfr0KU8g==" + }, "node_modules/fflate": { "version": "0.8.2", "resolved": "https://registry.npmjs.org/fflate/-/fflate-0.8.2.tgz", @@ -7397,25 +7757,6 @@ "which": "bin/which" } }, - "node_modules/follow-redirects": { - "version": "1.15.6", - "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", - "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", - "funding": [ - { - "type": "individual", - "url": "https://github.com/sponsors/RubenVerborgh" - } - ], - "engines": { - "node": ">=4.0" - }, - "peerDependenciesMeta": { - "debug": { - "optional": true - } - } - }, "node_modules/for-each": { "version": "0.3.3", "resolved": "https://registry.npmjs.org/for-each/-/for-each-0.3.3.tgz", @@ -7588,6 +7929,14 @@ "node": ">=6.9.0" } }, + "node_modules/get-caller-file": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/get-caller-file/-/get-caller-file-2.0.5.tgz", + "integrity": "sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==", + "engines": { + "node": "6.* || 8.* || >= 10.*" + } + }, "node_modules/get-east-asian-width": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/get-east-asian-width/-/get-east-asian-width-1.2.0.tgz", @@ -7818,6 +8167,14 @@ "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==" }, + "node_modules/graphql": { + "version": "16.8.2", + "resolved": "https://registry.npmjs.org/graphql/-/graphql-16.8.2.tgz", + "integrity": "sha512-cvVIBILwuoSyD54U4cF/UXDh5yAobhNV/tPygI4lZhgOIJQE/WLWC4waBRb4I6bDVYb3OVx3lfHbaQOEoUD5sg==", + "engines": { + "node": "^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0" + } + }, "node_modules/groq-sdk": { "version": "0.3.3", "resolved": "https://registry.npmjs.org/groq-sdk/-/groq-sdk-0.3.3.tgz", @@ -7942,6 +8299,11 @@ "he": "bin/he" } }, + "node_modules/headers-polyfill": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/headers-polyfill/-/headers-polyfill-4.0.3.tgz", + "integrity": "sha512-IScLbePpkvO846sIwOtOTDjutRMWdXdJmXdMvk6gCBHxFO8d+QKOQedyZSxFTTFYRSmlgSTDtXqqq4pcenBXLQ==" + }, "node_modules/hls.js": { "version": "1.3.5", "resolved": "https://registry.npmjs.org/hls.js/-/hls.js-1.3.5.tgz", @@ -8287,6 +8649,11 @@ "url": "https://github.com/sponsors/ljharb" } }, + "node_modules/is-node-process": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/is-node-process/-/is-node-process-1.2.0.tgz", + "integrity": "sha512-Vg4o6/fqPxIjtxgUH5QLJhwZ7gW5diGCVlXpuUfELC62CuxM1iHcRe51f2W1FDy04Ai4KJkagKjx3XaqyfRKXw==" + }, "node_modules/is-number": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", @@ -9005,6 +9372,107 @@ "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, + "node_modules/msw": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/msw/-/msw-2.3.1.tgz", + "integrity": "sha512-ocgvBCLn/5l3jpl1lssIb3cniuACJLoOfZu01e3n5dbJrpA5PeeWn28jCLgQDNt6d7QT8tF2fYRzm9JoEHtiig==", + "hasInstallScript": true, + "dependencies": { + "@bundled-es-modules/cookie": "^2.0.0", + "@bundled-es-modules/statuses": "^1.0.1", + "@inquirer/confirm": "^3.0.0", + "@mswjs/cookies": "^1.1.0", + "@mswjs/interceptors": "^0.29.0", + "@open-draft/until": "^2.1.0", + "@types/cookie": "^0.6.0", + "@types/statuses": "^2.0.4", + "chalk": "^4.1.2", + "graphql": "^16.8.1", + "headers-polyfill": "^4.0.2", + "is-node-process": "^1.2.0", + "outvariant": "^1.4.2", + "path-to-regexp": "^6.2.0", + "strict-event-emitter": "^0.5.1", + "type-fest": "^4.9.0", + "yargs": "^17.7.2" + }, + "bin": { + "msw": "cli/index.js" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/mswjs" + }, + "peerDependencies": { + "typescript": ">= 4.7.x" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/msw/node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/msw/node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/msw/node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "engines": { + "node": ">=8" + } + }, + "node_modules/msw/node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/msw/node_modules/type-fest": { + "version": "4.20.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-4.20.1.tgz", + "integrity": "sha512-R6wDsVsoS9xYOpy8vgeBlqpdOyzJ12HNfQhC/aAKWM3YoCV9TtunJzh/QpkMgeDhkoynDcw5f1y+qF9yc/HHyg==", + "engines": { + "node": ">=16" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/mustache": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/mustache/-/mustache-4.2.0.tgz", @@ -9013,6 +9481,14 @@ "mustache": "bin/mustache" } }, + "node_modules/mute-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-1.0.0.tgz", + "integrity": "sha512-avsJQhyd+680gKXyG/sQc0nXaC6rBkPOfyHYcFb9+hdkqQkR9bdnkJ0AMZhke0oesPqIO+mFFJ+IdBc7mst4IA==", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, "node_modules/mz": { "version": "2.7.0", "resolved": "https://registry.npmjs.org/mz/-/mz-2.7.0.tgz", @@ -9176,6 +9652,16 @@ "node": ">= 6.13.0" } }, + "node_modules/node-gyp-build": { + "version": "4.8.1", + "resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.8.1.tgz", + "integrity": "sha512-OSs33Z9yWr148JZcbZd5WiAXhh/n9z8TxQcdMhIOlpN9AhWpLfvVFO73+m77bBABQMaY9XSvIa+qk0jlI7Gcaw==", + "bin": { + "node-gyp-build": "bin.js", + "node-gyp-build-optional": "optional.js", + "node-gyp-build-test": "build-test.js" + } + }, "node_modules/node-html-parser": { "version": "6.1.13", "resolved": "https://registry.npmjs.org/node-html-parser/-/node-html-parser-6.1.13.tgz", @@ -9484,6 +9970,11 @@ "url": "https://github.com/chalk/strip-ansi?sponsor=1" } }, + "node_modules/outvariant": { + "version": "1.4.2", + "resolved": "https://registry.npmjs.org/outvariant/-/outvariant-1.4.2.tgz", + "integrity": "sha512-Ou3dJ6bA/UJ5GVHxah4LnqDwZRwAmWxrG3wtrHrbGnP4RnLCtA64A4F+ae7Y8ww660JaddSoArUR5HjipWSHAQ==" + }, "node_modules/p-finally": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/p-finally/-/p-finally-1.0.0.tgz", @@ -9621,6 +10112,11 @@ "node": "14 || >=16.14" } }, + "node_modules/path-to-regexp": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-6.2.2.tgz", + "integrity": "sha512-GQX3SSMokngb36+whdpRXE+3f9V8UzyAorlYvOGx87ufGHehNTn5lCxrKtLyZ4Yl/wEKnNnr98ZzOwwDZV5ogw==" + }, "node_modules/path-type": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", @@ -9897,11 +10393,6 @@ "react-is": "^16.13.1" } }, - "node_modules/proxy-from-env": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", - "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==" - }, "node_modules/punycode": { "version": "2.3.1", "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", @@ -10353,6 +10844,14 @@ "readable-stream": ">=4.0.0" } }, + "node_modules/require-directory": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz", + "integrity": "sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==", + "engines": { + "node": ">=0.10.0" + } + }, "node_modules/require-from-string": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/require-from-string/-/require-from-string-2.0.2.tgz", @@ -10557,6 +11056,14 @@ "loose-envify": "^1.1.0" } }, + "node_modules/semiver": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/semiver/-/semiver-1.1.0.tgz", + "integrity": "sha512-QNI2ChmuioGC1/xjyYwyZYADILWyW6AmS1UH6gDj/SFUUUS4MBAWs/7mxnkRPc/F4iHezDP+O8t0dO8WHiEOdg==", + "engines": { + "node": ">=6" + } + }, "node_modules/semver": { "version": "7.6.2", "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.2.tgz", @@ -10778,6 +11285,14 @@ "resolved": "https://registry.npmjs.org/stats.js/-/stats.js-0.17.0.tgz", "integrity": "sha512-hNKz8phvYLPEcRkeG1rsGmV5ChMjKDAWU7/OJJdDErPBNChQXxCo3WZurGpnWc6gZhAzEPFad1aVgyOANH1sMw==" }, + "node_modules/statuses": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", + "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "engines": { + "node": ">= 0.8" + } + }, "node_modules/stdin-discarder": { "version": "0.2.2", "resolved": "https://registry.npmjs.org/stdin-discarder/-/stdin-discarder-0.2.2.tgz", @@ -10797,6 +11312,11 @@ "node": ">=10.0.0" } }, + "node_modules/strict-event-emitter": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/strict-event-emitter/-/strict-event-emitter-0.5.1.tgz", + "integrity": "sha512-vMgjE/GGEPEFnhFub6pa4FmJBRBVOLpIII2hvCZ8Kzb7K0hlHo7mQv6xYrBvCL2LtAIBwFUK8wvuJgTVSQ5MFQ==" + }, "node_modules/string_decoder": { "version": "1.3.0", "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", @@ -11162,6 +11682,11 @@ "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==" }, + "node_modules/textlinestream": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/textlinestream/-/textlinestream-1.1.1.tgz", + "integrity": "sha512-iBHbi7BQxrFmwZUQJsT0SjNzlLLsXhvW/kg7EyOMVMBIrlnj/qYofwo1LVLZi+3GbUEo96Iu2eqToI2+lZoAEQ==" + }, "node_modules/thenify": { "version": "3.3.1", "resolved": "https://registry.npmjs.org/thenify/-/thenify-3.3.1.tgz", @@ -11992,6 +12517,34 @@ "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==" }, + "node_modules/ws": { + "version": "8.17.1", + "resolved": "https://registry.npmjs.org/ws/-/ws-8.17.1.tgz", + "integrity": "sha512-6XQFvXTkbfUOZOKKILFG1PDK2NDQs4azKQl26T0YS5CxqWLgXajbPZ+h4gZekJyRqFU8pvnbAbbs/3TgRPy+GQ==", + "engines": { + "node": ">=10.0.0" + }, + "peerDependencies": { + "bufferutil": "^4.0.1", + "utf-8-validate": ">=5.0.2" + }, + "peerDependenciesMeta": { + "bufferutil": { + "optional": true + }, + "utf-8-validate": { + "optional": true + } + } + }, + "node_modules/y18n": { + "version": "5.0.8", + "resolved": "https://registry.npmjs.org/y18n/-/y18n-5.0.8.tgz", + "integrity": "sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==", + "engines": { + "node": ">=10" + } + }, "node_modules/yallist": { "version": "4.0.0", "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", @@ -12008,6 +12561,49 @@ "node": ">= 14" } }, + "node_modules/yargs": { + "version": "17.7.2", + "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.2.tgz", + "integrity": "sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==", + "dependencies": { + "cliui": "^8.0.1", + "escalade": "^3.1.1", + "get-caller-file": "^2.0.5", + "require-directory": "^2.1.1", + "string-width": "^4.2.3", + "y18n": "^5.0.5", + "yargs-parser": "^21.1.1" + }, + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs-parser": { + "version": "21.1.1", + "resolved": "https://registry.npmjs.org/yargs-parser/-/yargs-parser-21.1.1.tgz", + "integrity": "sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==", + "engines": { + "node": ">=12" + } + }, + "node_modules/yargs/node_modules/emoji-regex": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-8.0.0.tgz", + "integrity": "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==" + }, + "node_modules/yargs/node_modules/string-width": { + "version": "4.2.3", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-4.2.3.tgz", + "integrity": "sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==", + "dependencies": { + "emoji-regex": "^8.0.0", + "is-fullwidth-code-point": "^3.0.0", + "strip-ansi": "^6.0.1" + }, + "engines": { + "node": ">=8" + } + }, "node_modules/yn": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", @@ -12041,9 +12637,9 @@ } }, "node_modules/zod-to-json-schema": { - "version": "3.23.0", - "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.23.0.tgz", - "integrity": "sha512-az0uJ243PxsRIa2x1WmNE/pnuA05gUq/JB8Lwe1EDCCL/Fz9MgjYQ0fPlyc2Tcv6aF2ZA7WM5TWaRZVEFaAIag==", + "version": "3.23.1", + "resolved": "https://registry.npmjs.org/zod-to-json-schema/-/zod-to-json-schema-3.23.1.tgz", + "integrity": "sha512-oT9INvydob1XV0v1d2IadrR74rLtDInLvDFfAa1CG0Pmg/vxATk7I2gSelfj271mbzeM4Da0uuDQE/Nkj3DWNw==", "peerDependencies": { "zod": "^3.23.3" } diff --git a/package.json b/package.json index 2afbc793a1bd47102cbf7167e16f660725b06a71..7f8cdac18e4aa1bcd296d14a0a73f1a1d5b8bd0e 100644 --- a/package.json +++ b/package.json @@ -12,8 +12,9 @@ "dependencies": { "@aitube/clap": "0.0.27", "@aitube/engine": "0.0.24", - "@aitube/timeline": "0.0.26", + "@aitube/timeline": "0.0.29", "@fal-ai/serverless-client": "^0.10.3", + "@gradio/client": "^1.1.1", "@huggingface/hub": "^0.15.1", "@huggingface/inference": "^2.7.0", "@langchain/anthropic": "^0.2.0", @@ -58,14 +59,12 @@ "@upstash/ratelimit": "^1.1.3", "@upstash/redis": "^1.31.1", "autoprefixer": "10.4.17", - "axios": "^1.7.2", "class-variance-authority": "^0.7.0", "clsx": "^2.1.0", "cmdk": "^0.2.1", "eslint": "8.57.0", "eslint-config-next": "14.1.0", "fluent-ffmpeg": "^2.1.3", - "form-data": "^4.0.0", "fs-extra": "^11.2.0", "lucide-react": "^0.334.0", "mlt-xml": "^2.0.2", diff --git a/src/app/api/resolve/providers/falai/index.ts b/src/app/api/resolve/providers/falai/index.ts index 9a732b20d8afdfc83fc9d099391cd3e7a5f36bff..8a56f250de24d7255b84d3602017a24bd42b7be7 100644 --- a/src/app/api/resolve/providers/falai/index.ts +++ b/src/app/api/resolve/providers/falai/index.ts @@ -1,10 +1,8 @@ import * as fal from '@fal-ai/serverless-client' import { FalAiImageSize, ResolveRequest } from "@/types" -import { ClapMediaOrientation, ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap" -import { decodeOutput } from '@/lib/utils/decodeOutput' +import { ClapMediaOrientation, ClapSegment, ClapSegmentCategory } from "@aitube/clap" import { FalAiAudioResponse, FalAiImageResponse, FalAiSpeechResponse, FalAiVideoResponse } from './types' -import { getResolveRequestPrompts } from '@/lib/utils/getResolveRequestPrompts' export async function resolveSegment(request: ResolveRequest): Promise { if (!request.settings.falAiApiKey) { @@ -15,161 +13,146 @@ export async function resolveSegment(request: ResolveRequest): Promise s.category === ClapSegmentCategory.STORYBOARD) - if (!storyboard) { - throw new Error(`cannot generate a video without a storyboard (the concept of Clapper is to use storyboards)`) - } - const result = await fal.run(request.settings.falAiModelForVideo, { + if (request.settings.falAiModelForImage === "fal-ai/pulid") { + result = await fal.run(request.settings.falAiModelForImage, { input: { - image_url: storyboard.assetUrl, - - motion_bucket_id: 55, - - // The conditoning augmentation determines the amount of noise that - // will be added to the conditioning frame. The higher the number, - // the more noise there will be, and the less the video will look - // like the initial image. Increase it for more motion. - // Default value: 0.02 - cond_aug: 0.02, - + reference_images: [{ + image_url: request.prompts.image.identity + }], + image_size: imageSize, + num_images: 1, sync_mode: true, enable_safety_checker: request.settings.censorNotForAllAudiencesContent }, - }) as FalAiVideoResponse - - if (request.settings.censorNotForAllAudiencesContent) { - if (result.has_nsfw_concepts.includes(true)) { - throw new Error(`The generated content has been filtered according to your safety settings`) - } - } - - content = result?.video?.url - } else if ( - request.segment.category === ClapSegmentCategory.SOUND - || - request.segment.category === ClapSegmentCategory.MUSIC - ) { - const result = await fal.run(request.settings.falAiModelForSound, { + }) as FalAiImageResponse + + } else { + result = await fal.run(request.settings.falAiModelForImage, { input: { - // note how we use the *segment* prompt for music or sound - prompt: request.segment.prompt, - + prompt: request.prompts.image.positive, + image_size: imageSize, sync_mode: true, + num_inference_steps: + request.settings.falAiModelForImage === "fal-ai/stable-diffusion-v3-medium" + ? 40 + : 25, + num_images: 1, enable_safety_checker: request.settings.censorNotForAllAudiencesContent }, - }) as FalAiAudioResponse + }) as FalAiImageResponse + } - content = await decodeOutput(result?.audio_file?.url) - } else if ( - request.segment.category === ClapSegmentCategory.DIALOGUE - ) { - const result = await fal.run(request.settings.falAiModelForVoice, { - input: { - text: request.segment.prompt, + + if (request.settings.censorNotForAllAudiencesContent) { + if (result.has_nsfw_concepts.includes(true)) { + throw new Error(`The generated content has been filtered according to your safety settings`) + } + } - // todo use the entty audio id, if available - audio_url: "https://cdn.themetavoice.xyz/speakers/bria.mp3", + segment.assetUrl = result.images[0]?.url || "" + } else if (request.segment.category === ClapSegmentCategory.VIDEO) { - sync_mode: true, - enable_safety_checker: request.settings.censorNotForAllAudiencesContent - }, - }) as FalAiSpeechResponse + // console.log(`request.settings.falAiModelForVideo = `, request.settings.falAiModelForVideo) + if (request.settings.falAiModelForVideo !== "fal-ai/stable-video") { + throw new Error(`only "fal-ai/stable-video" is supported by Clapper for the moment`) + } - content = result?.audio_url?.url - } else { - throw new Error(`Clapper doesn't support ${request.segment.category} generation for provider "Fal.ai". Please open a pull request with (working code) to solve this!`) + const storyboard = request.segments.find(s => s.category === ClapSegmentCategory.STORYBOARD) + if (!storyboard) { + throw new Error(`cannot generate a video without a storyboard (the concept of Clapper is to use storyboards)`) + } + const result = await fal.run(request.settings.falAiModelForVideo, { + input: { + image_url: storyboard.assetUrl, + + motion_bucket_id: 55, + + // The conditoning augmentation determines the amount of noise that + // will be added to the conditioning frame. The higher the number, + // the more noise there will be, and the less the video will look + // like the initial image. Increase it for more motion. + // Default value: 0.02 + cond_aug: 0.02, + + sync_mode: true, + enable_safety_checker: request.settings.censorNotForAllAudiencesContent + }, + }) as FalAiVideoResponse + + if (request.settings.censorNotForAllAudiencesContent) { + if (result.has_nsfw_concepts.includes(true)) { + throw new Error(`The generated content has been filtered according to your safety settings`) + } } - segment.assetUrl = await decodeOutput(content) - // console.log(`segment.assetUrl = ${segment.assetUrl.slice(0, 80)}..`) - segment.assetSourceType = getClapAssetSourceType(segment.assetUrl) - } catch (err) { - console.error(`failed to call Fal.ai: `, err) - segment.assetUrl = '' - segment.assetSourceType = getClapAssetSourceType(segment.assetUrl) - segment.status = ClapSegmentStatus.TO_GENERATE + segment.assetUrl = result?.video?.url || "" + } else if ( + request.segment.category === ClapSegmentCategory.SOUND + || + request.segment.category === ClapSegmentCategory.MUSIC + ) { + const result = await fal.run(request.settings.falAiModelForSound, { + input: { + // note how we use the *segment* prompt for music or sound + prompt: request.segment.prompt, + + sync_mode: true, + enable_safety_checker: request.settings.censorNotForAllAudiencesContent + }, + }) as FalAiAudioResponse + + segment.assetUrl = result?.audio_file?.url || "" + } else if ( + request.segment.category === ClapSegmentCategory.DIALOGUE + ) { + const result = await fal.run(request.settings.falAiModelForVoice, { + input: { + text: request.segment.prompt, + + // todo use the entty audio id, if available + audio_url: "https://cdn.themetavoice.xyz/speakers/bria.mp3", + + sync_mode: true, + enable_safety_checker: request.settings.censorNotForAllAudiencesContent + }, + }) as FalAiSpeechResponse + + segment.assetUrl = result?.audio_url?.url || "" + } else { + throw new Error(`Clapper doesn't support ${request.segment.category} generation for provider "Fal.ai". Please open a pull request with (working code) to solve this!`) } return segment diff --git a/src/app/api/resolve/providers/gradio/index.ts b/src/app/api/resolve/providers/gradio/index.ts new file mode 100644 index 0000000000000000000000000000000000000000..c8ac159e87f74e5edc8497a522024118baff1033 --- /dev/null +++ b/src/app/api/resolve/providers/gradio/index.ts @@ -0,0 +1,30 @@ +import { ClapSegment, ClapSegmentCategory } from "@aitube/clap" + +import { ResolveRequest } from "@/types" +import { callGradioApi } from "@/lib/hf/callGradioApi" + +export async function resolveSegment(request: ResolveRequest): Promise { + + const segment = request.segment + + if (request.segment.category === ClapSegmentCategory.STORYBOARD) { + segment.assetUrl = await callGradioApi({ + url: request.settings.gradioApiUrlForImage, + inputs: request.prompts.image, + }) + } if (request.segment.category === ClapSegmentCategory.DIALOGUE) { + segment.assetUrl = await callGradioApi({ + url: request.settings.gradioApiUrlForVoice, + inputs: request.prompts.voice, + }) + } if (request.segment.category === ClapSegmentCategory.VIDEO) { + segment.assetUrl = await callGradioApi({ + url: request.settings.gradioApiUrlForVideo, + inputs: request.prompts.video, + }) + } else { + throw new Error(`Clapper doesn't support ${request.segment.category} generation for provider "Gradio". Please open a pull request with (working code) to solve this!`) + } + + return segment +} \ No newline at end of file diff --git a/src/app/api/resolve/providers/huggingface/generateImage.ts b/src/app/api/resolve/providers/huggingface/generateImage.ts new file mode 100644 index 0000000000000000000000000000000000000000..27ec3cf4f28dbd719feecaad3bcbaebc461c684e --- /dev/null +++ b/src/app/api/resolve/providers/huggingface/generateImage.ts @@ -0,0 +1,42 @@ +import { HfInference, HfInferenceEndpoint } from "@huggingface/inference" + +import { decodeOutput } from "@/lib/utils/decodeOutput" +import { ResolveRequest } from "@/types" + +export async function generateImage(request: ResolveRequest): Promise { + + if (!request.settings.huggingFaceModelForImage) { + throw new Error(`HuggingFace.generateImage: cannot generate without a valid huggingFaceModelForImage`) + } + + if (!request.prompts.image.positive) { + throw new Error(`HuggingFace.generateImage: cannot generate without a valid positive image prompt`) + } + + if (!request.settings.huggingFaceApiKey) { + throw new Error(`HuggingFace.generateImage: cannot generate without a valid huggingFaceApiKey`) + } + + const hf: HfInferenceEndpoint = new HfInference(request.settings.huggingFaceApiKey) + + const blob: Blob = await hf.textToImage({ + model: request.settings.huggingFaceModelForImage, + inputs: request.prompts.image.positive, + parameters: { + height: request.meta.height, + width: request.meta.width, + /** + * The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference. + */ + // num_inference_steps?: number; + /** + * Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality. + */ + // guidance_scale?: number; + } + }) + + console.log("output from Hugging Face Inference API:", blob) + + throw new Error(`finish me`) +} \ No newline at end of file diff --git a/src/app/api/resolve/providers/huggingface/generateVideo.ts b/src/app/api/resolve/providers/huggingface/generateVideo.ts new file mode 100644 index 0000000000000000000000000000000000000000..f114f6442a97cf441f3b2f5039749e63ac5c1c89 --- /dev/null +++ b/src/app/api/resolve/providers/huggingface/generateVideo.ts @@ -0,0 +1,28 @@ +import { ResolveRequest } from "@/types" +import { callGradioApi } from "@/lib/hf/callGradioApi" + +export async function generateVideo(request: ResolveRequest): Promise { + + if (!request.settings.huggingFaceModelForVideo) { + throw new Error(`HuggingFace.generateVideo: cannot generate without a valid huggingFaceModelForVideo`) + } + + if (!request.prompts.video.image) { + throw new Error(`HuggingFace.generateVideo: cannot generate without a valid input image prompt`) + } + + if (!request.settings.huggingFaceApiKey) { + throw new Error(`HuggingFace.generateVideo: cannot generate without a valid huggingFaceApiKey`) + } + + // TODO pass a type to the template function + const output = await callGradioApi({ + url: request.settings.huggingFaceModelForVideo, + inputs: request.prompts.video, + apiKey: request.settings.huggingFaceApiKey + }) + + console.log(`output from the Gradio API:`, output) + + throw new Error(`please finish me`) +} \ No newline at end of file diff --git a/src/app/api/resolve/providers/huggingface/generateVoice.ts b/src/app/api/resolve/providers/huggingface/generateVoice.ts new file mode 100644 index 0000000000000000000000000000000000000000..bb1865f2fec84caf05523f45c2ac56607bf4e77f --- /dev/null +++ b/src/app/api/resolve/providers/huggingface/generateVoice.ts @@ -0,0 +1,29 @@ +import { HfInference, HfInferenceEndpoint } from "@huggingface/inference" + +import { ResolveRequest } from "@/types" + +export async function generateVoice(request: ResolveRequest): Promise { + + if (!request.settings.huggingFaceModelForVoice) { + throw new Error(`HuggingFace.generateVoice: cannot generate without a valid huggingFaceModelForVoice`) + } + + if (!request.prompts.voice.positive) { + throw new Error(`HuggingFace.generateVoice: cannot generate without a valid voice prompt`) + } + + if (!request.settings.huggingFaceApiKey) { + throw new Error(`HuggingFace.generateVoice: cannot generate without a valid huggingFaceApiKey`) + } + + const hf: HfInferenceEndpoint = new HfInference(request.settings.huggingFaceApiKey) + + const blob: Blob = await hf.textToSpeech({ + model: request.settings.huggingFaceModelForVoice, + inputs: request.prompts.voice.positive, + }) + + console.log("output from Hugging Face Inference API:", blob) + + throw new Error(`finish me`) +} \ No newline at end of file diff --git a/src/app/api/resolve/providers/huggingface/index.ts b/src/app/api/resolve/providers/huggingface/index.ts index ec2b9213703c7bc66655788da7308712ba3a8ecf..f72b7943f26aac1abab1e9e72e5ead09ec11d1eb 100644 --- a/src/app/api/resolve/providers/huggingface/index.ts +++ b/src/app/api/resolve/providers/huggingface/index.ts @@ -1,41 +1,30 @@ import { HfInference, HfInferenceEndpoint } from "@huggingface/inference" import { ResolveRequest } from "@/types" -import { ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap" -import { getResolveRequestPrompts } from "@/lib/utils/getResolveRequestPrompts" -import { decodeOutput } from "@/lib/utils/decodeOutput" +import { ClapSegment, ClapSegmentCategory } from "@aitube/clap" + +import { generateImage } from "./generateImage" +import { generateVoice } from "./generateVoice" +import { generateVideo } from "./generateVideo" export async function resolveSegment(request: ResolveRequest): Promise { if (!request.settings.huggingFaceApiKey) { throw new Error(`Missing API key for "Hugging Face"`) } - - const hf: HfInferenceEndpoint = new HfInference(request.settings.huggingFaceApiKey) - if (request.segment.category !== ClapSegmentCategory.STORYBOARD) { - throw new Error(`Clapper doesn't support ${request.segment.category} generation for provider "Hugging Face". Please open a pull request with (working code) to solve this!`) - } - - const segment: ClapSegment = { ...request.segment } + const segment = request.segment - const prompts = getResolveRequestPrompts(request) - - try { - const blob: Blob = await hf.textToImage({ - model: request.settings.huggingFaceModelForImage, - inputs: prompts.positivePrompt - }) + const hf: HfInferenceEndpoint = new HfInference(request.settings.huggingFaceApiKey) - segment.assetUrl = await decodeOutput(blob) - console.log(`successfully called Hugging Face`) - segment.assetSourceType = getClapAssetSourceType(segment.assetUrl) - } catch (err) { - console.error(`failed to call Hugging Face: `, err) - segment.assetUrl = '' - segment.assetSourceType = getClapAssetSourceType(segment.assetUrl) - segment.status = ClapSegmentStatus.TO_GENERATE + if (request.segment.category === ClapSegmentCategory.STORYBOARD) { + segment.assetUrl = await generateImage(request) + } if (request.segment.category === ClapSegmentCategory.DIALOGUE) { + segment.assetUrl = await generateVoice(request) + } if (request.segment.category === ClapSegmentCategory.VIDEO) { + segment.assetUrl = await generateVideo(request) + } else { + throw new Error(`Clapper doesn't support ${request.segment.category} generation for provider "Hugging Face" with model (or space) "${request.settings.huggingFaceModelForVideo}". Please open a pull request with (working code) to solve this!`) } - return segment } \ No newline at end of file diff --git a/src/app/api/resolve/providers/replicate/index.ts b/src/app/api/resolve/providers/replicate/index.ts index eaa873fd65741b2e137c43a12c4a00fc02385769..34dbdae904880b7b73564b4ac577a20c957ea4c4 100644 --- a/src/app/api/resolve/providers/replicate/index.ts +++ b/src/app/api/resolve/providers/replicate/index.ts @@ -1,9 +1,8 @@ import Replicate from 'replicate' +import { ClapSegment, ClapSegmentCategory } from "@aitube/clap" + import { ResolveRequest } from "@/types" -import { ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap" -import { getResolveRequestPrompts } from '@/lib/utils/getResolveRequestPrompts' -import { decodeOutput } from '@/lib/utils/decodeOutput' export async function resolveSegment(request: ResolveRequest): Promise { if (!request.settings.replicateApiKey) { @@ -15,26 +14,51 @@ export async function resolveSegment(request: ResolveRequest): Promise { + + if (!request.settings.stabilityAiApiKey) { + throw new Error(`StabilityAI.generateImage: cannot generate without a valid stabilityAiApiKey`) + } + + if (!request.settings.stabilityAiModelForImage) { + throw new Error(`StabilityAI.generateImage: cannot generate without a valid stabilityAiModelForImage`) + } + + if (!request.prompts.image.positive) { + throw new Error(`StabilityAI.generateImage: cannot generate without a valid positive prompt`) + } + + const aspectRatio = + request.meta.orientation === ClapMediaOrientation.SQUARE + ? StabilityAiImageSize.SQUARE + : request.meta.orientation === ClapMediaOrientation.PORTRAIT + ? StabilityAiImageSize.PORTRAIT_9_16 + : StabilityAiImageSize.LANDSCAPE_16_9 + + + // what's cool about the ultra model is its capacity to take in + // very large prompts, up to 10000 characters apparently? + + // To control the weight of a given word use the format (word:weight), + // where word is the word you'd like to control the weight of and weight + // is a value between 0 and 1. + // For example: The sky was a crisp (blue:0.3) and (green:0.8) would + // convey a sky that was blue and green, but more green than blue. + + const body = new FormData() + body.set("prompt", `${request.prompts.image.positive || ""}`) + body.set("output_format", "jpeg") // "png" + body.set("negative_prompt", `${request.prompts.image.negative || ""}`) + body.set("aspect_ratio", `${aspectRatio || ""}`) + + const response = await fetch(`https://api.stability.ai/v2beta/${request.settings.stabilityAiModelForImage}`, { + method: "POST", + headers: { + Authorization: `Bearer ${request.settings.stabilityAiApiKey}`, + }, + body, + cache: "no-store" + }) + + console.log("response:", response) + + /* + if (response.status === 200) { + const buffer = Buffer.from(response.data) + const rawAssetUrl = `data:image/${payload.output_format};base64,${buffer.toString('base64')}` + return rawAssetUrl + } else { + throw new Error(`${response.status}: ${response.data.toString()}`); + } + */ + throw new Error("finish me") +} \ No newline at end of file diff --git a/src/app/api/resolve/providers/stabilityai/generateVideo.ts b/src/app/api/resolve/providers/stabilityai/generateVideo.ts new file mode 100644 index 0000000000000000000000000000000000000000..92295452c1b4f94d55c86e0f8af8ffac6fe8cec5 --- /dev/null +++ b/src/app/api/resolve/providers/stabilityai/generateVideo.ts @@ -0,0 +1,89 @@ +import { sleep } from "@/lib/utils/sleep" +import { ResolveRequest } from "@/types" + +export async function generateVideo(request: ResolveRequest): Promise { + + + if (!request.settings.stabilityAiApiKey) { + throw new Error(`StabilityAI.generateVideo: cannot generate without a valid stabilityAiApiKey`) + } + + if (!request.settings.stabilityAiModelForVideo) { + throw new Error(`StabilityAI.generateVideo: cannot generate without a valid stabilityAiModelForVideo`) + } + + + if (!request.prompts.video.image) { + throw new Error(`StabilityAI.generateVideo: cannot generate without a valid image input`) + } + + + // what's cool about the ultra model is its capacity to take in + // very large prompts, up to 10000 characters apparently? + + // To control the weight of a given word use the format (word:weight), + // where word is the word you'd like to control the weight of and weight + // is a value between 0 and 1. + // For example: The sky was a crisp (blue:0.3) and (green:0.8) would + // convey a sky that was blue and green, but more green than blue. + + const body = new FormData() + + // Supported Formats: jpeg, png + // Supported Dimensions: 1024x576, 576x1024, 768x768 + + // "Please ensure that the source image is in the correct format and dimensions" + body.set("image", `${request.prompts.video.image || ""}`) + + const response = await fetch(`https://api.stability.ai/v2beta/image-to-video`, { + method: "POST", + headers: { + Authorization: `Bearer ${request.settings.stabilityAiApiKey}`, + }, + body, + cache: "no-store" + }) as unknown as { data: { id: number } } + + const generationId = response?.data?.id + if (!generationId) { throw new Error(`StabilityAI failed to give us a valid response.data.id`) } + + console.log("Generation ID:", generationId); + + + let pollingCount = 0 + do { + // This is normally a fast model, so let's check every 4 seconds + await sleep(10000) + + const res = await fetch(`https://api.stability.ai/v2beta/image-to-video/result/${generationId}`, { + method: "GET", + headers: { + Authorization: `Bearer ${request.settings.stabilityAiApiKey}`, + Accept: "video/*", // Use 'application/json' to receive base64 encoded JSON + }, + cache: "no-store" + }); + + if (res.status === 200) { + try { + const response = (await res.json()) as any + const errors = `${response?.errors || ""}` + if (errors) { + throw new Error(errors) + } + return response.output.pop() + } catch (err) { + console.error("res.json() error:", err) + } + } + + pollingCount++ + + // To prevent indefinite polling, we can stop after a certain number + if (pollingCount >= 40) { + throw new Error('Request timed out.') + } + } while (true) + + throw new Error("finish me") +} \ No newline at end of file diff --git a/src/app/api/resolve/providers/stabilityai/index.ts b/src/app/api/resolve/providers/stabilityai/index.ts index 4622c6424f7c379623d044caa093f0d1441cd360..3ca0ee1ffe56a066f685e3c233cc7cc9fe127ce4 100644 --- a/src/app/api/resolve/providers/stabilityai/index.ts +++ b/src/app/api/resolve/providers/stabilityai/index.ts @@ -1,74 +1,22 @@ +import { ClapSegment, ClapSegmentCategory } from "@aitube/clap" -import { ResolveRequest, StabilityAiImageSize } from "@/types" -import { ClapMediaOrientation, ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap" -import { decodeOutput } from '@/lib/utils/decodeOutput' -import { getResolveRequestPrompts } from '@/lib/utils/getResolveRequestPrompts' -import { performRequest } from "./performRequest" +import { ResolveRequest } from "@/types" +import { generateImage } from "./generateImage" export async function resolveSegment(request: ResolveRequest): Promise { if (!request.settings.stabilityAiApiKey) { throw new Error(`Missing API key for "Stability.ai"`) } + const segment = request.segment - const segment: ClapSegment = { ...request.segment } - - let content = '' - - const prompts = getResolveRequestPrompts(request) - - try { - - // for doc see: - // https://fal.ai/models/fal-ai/fast-sdxl/api + // for doc see: + // https://fal.ai/models/fal-ai/fast-sdxl/api - if (request.segment.category === ClapSegmentCategory.STORYBOARD) { - - - if (!prompts.positivePrompt) { - console.error(`resolveSegment: cannot resolve a storyboard with an empty prompt`) - return segment - } - - const imageSize: StabilityAiImageSize = - request.meta.orientation === ClapMediaOrientation.SQUARE - ? StabilityAiImageSize.SQUARE - : request.meta.orientation === ClapMediaOrientation.PORTRAIT - ? StabilityAiImageSize.PORTRAIT_9_16 - : StabilityAiImageSize.LANDSCAPE_16_9 - - const assetUrl = await performRequest({ - modelName: request.settings.imageGenerationModel, - - // what's cool about the ultra model is its capacity to take in - // very large prompts, up to 10000 characters apparently? - - // To control the weight of a given word use the format (word:weight), - // where word is the word you'd like to control the weight of and weight - // is a value between 0 and 1. - // For example: The sky was a crisp (blue:0.3) and (green:0.8) would - // convey a sky that was blue and green, but more green than blue. - positivePrompt: prompts.positivePrompt, - negativePrompt: prompts.negativePrompt, - - imageSize, - - apiKey: request.settings.stabilityAiApiKey - }) - - content = assetUrl - } else { - throw new Error(`Clapper doesn't support ${request.segment.category} generation for provider "Stability.ai". Please open a pull request with (working code) to solve this!`) - } - - segment.assetUrl = await decodeOutput(content) - segment.assetSourceType = getClapAssetSourceType(segment.assetUrl) - } catch (err) { - console.error(`failed to call Stability.ai: `, err) - segment.assetUrl = '' - segment.assetSourceType = getClapAssetSourceType(segment.assetUrl) - segment.status = ClapSegmentStatus.TO_GENERATE + if (request.segment.category === ClapSegmentCategory.STORYBOARD) { + segment.assetUrl = await generateImage(request) + } else { + throw new Error(`Clapper doesn't support ${request.segment.category} generation for provider "Stability.ai". Please open a pull request with (working code) to solve this!`) } - return segment } \ No newline at end of file diff --git a/src/app/api/resolve/providers/stabilityai/performRequest.ts b/src/app/api/resolve/providers/stabilityai/performRequest.ts deleted file mode 100644 index c66f41a06d607415f92b781943b0e7edb4c176fc..0000000000000000000000000000000000000000 --- a/src/app/api/resolve/providers/stabilityai/performRequest.ts +++ /dev/null @@ -1,64 +0,0 @@ - -import axios from "axios" -import FormData from "form-data" - -import { decodeOutput } from "@/lib/utils/decodeOutput" -import { StabilityAiImageSize } from "@/types" - -export async function performRequest({ - positivePrompt, - negativePrompt, - modelName, - imageSize, - apiKey -}: { - positivePrompt: string - negativePrompt: string - modelName: string - imageSize: StabilityAiImageSize - apiKey: string -}): Promise { - - const payload = { - prompt: positivePrompt, - output_format: "jpeg", // "webp", - negative_prompt: negativePrompt, - aspect_ratio: imageSize, - - - - // string (GenerationMode) - // Default: text-to-image - // Enum: image-to-image text-to-image - // Controls whether this is a text-to-image or image-to-image generation, which affects which parameters are required: - - // text-to-image requires only the prompt parameter - // image-to-image requires the prompt, image, and strength parameters - // mode: "text-to-image", - - // "stable-image/generate/sd3" supports this option: - // model: "", // the model to use ("SD3 Medium", "SD3 Large", or "SD3 Large Turbo") - }; - - const response = await axios.postForm( - `https://api.stability.ai/v2beta/${modelName}`, - axios.toFormData(payload, new FormData()), - { - validateStatus: undefined, - responseType: "arraybuffer", - headers: { - Authorization: `Bearer ${apiKey}`, - Accept: "image/*" - }, - }, - ) - - if (response.status === 200) { - const buffer = Buffer.from(response.data) - const rawAssetUrl = `data:image/${payload.output_format};base64,${buffer.toString('base64')}` - const assetUrl = await decodeOutput(rawAssetUrl) - return assetUrl - } else { - throw new Error(`${response.status}: ${response.data.toString()}`); - } -} \ No newline at end of file diff --git a/src/app/api/resolve/route.ts b/src/app/api/resolve/route.ts index 4f0f8f1b9b845f5c606c373d4e2841d8e41e16ad..7955dcf28aef1414538657e1efc26a511eb2df09 100644 --- a/src/app/api/resolve/route.ts +++ b/src/app/api/resolve/route.ts @@ -1,5 +1,5 @@ import { NextResponse, NextRequest } from "next/server" -import { ClapSegmentCategory } from "@aitube/clap" +import { ClapOutputType, ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap" import { resolveSegment as resolveSegmentUsingHuggingFace } from "./providers/huggingface" import { resolveSegment as resolveSegmentUsingComfyReplicate } from "./providers/comfy-replicate" @@ -10,6 +10,9 @@ import { resolveSegment as resolveSegmentUsingModelsLab } from "./providers/mode import { resolveSegment as resolveSegmentUsingStabilityAi } from "./providers/stabilityai" import { ComputeProvider, ResolveRequest } from "@/types" +import { decodeOutput } from "@/lib/utils/decodeOutput" +import { getTypeAndExtension } from "@/lib/utils/getTypeAndExtension" +import { getMediaInfo } from "@/lib/ffmpeg/getMediaInfo" export async function POST(req: NextRequest) { // do we really need to secure it? @@ -53,8 +56,53 @@ export async function POST(req: NextRequest) { : null if (!resolveSegment) { throw new Error(`Provider ${provider} is not supported yet`)} - - const segment = await resolveSegment(request) + + let segment = request.segment + + try { + segment = await resolveSegment(request) + + // we clean-up and parse the output from all the resolvers: + // this will download files hosted on CDNs, convert WAV files to MP3 etc + + segment.assetUrl = await decodeOutput(segment.assetUrl) + + segment.assetSourceType = getClapAssetSourceType(segment.assetUrl) + + segment.status = ClapSegmentStatus.COMPLETED + + const { assetFileFormat, outputType } = getTypeAndExtension(segment.assetUrl) + + segment.assetFileFormat = assetFileFormat + segment.outputType = outputType + + if (segment.outputType === ClapOutputType.AUDIO + || + segment.outputType === ClapOutputType.VIDEO + ) { + const { durationInMs, hasAudio } = await getMediaInfo(segment.assetUrl) + segment.assetDurationInMs = durationInMs + + // hasAudio doesn't work properly I think, with small samples + segment.outputGain = hasAudio ? 1.0 : 0.0 + + /* + console.log(`DEBUG:`, { + durationInMs, + hasAudio, + "segment.assetDurationInMs": segment.assetDurationInMs, + "segment.outputGain": segment.outputGain, + }) + */ + } + } catch (err) { + console.error(`failed to generate a segment: ${err}`) + segment.assetUrl = '' + segment.assetSourceType = getClapAssetSourceType(segment.assetUrl) + segment.assetDurationInMs = 0 + segment.outputGain = 0 + segment.status = ClapSegmentStatus.TO_GENERATE + } return NextResponse.json(segment) } diff --git a/src/components/settings/constants.ts b/src/components/settings/constants.ts index 38674cdebfc9f21ee89a56a0feeeb4ec5e2bb709..da7f4e9ab5db78d57cefda027356339988a4a5c4 100644 --- a/src/components/settings/constants.ts +++ b/src/components/settings/constants.ts @@ -20,12 +20,15 @@ export const computeProviderShortNames = { [ComputeProvider.OPENAI]: "OpenAI", [ComputeProvider.REPLICATE]: "Replicate", [ComputeProvider.STABILITYAI]: "StabilityAI", - [ComputeProvider.MIDJOURNEY]: "Midjourney (no image API)", + [ComputeProvider.MIDJOURNEY]: "Midjourney (no API)", [ComputeProvider.SUNO]: "Suno (no music API)", [ComputeProvider.UDIO]: "Udio (no music API)", - [ComputeProvider.LUMALABS]: "LumaLabs (no video API)", - [ComputeProvider.KUAISHOU]: "KuaiShou (no video API)", - [ComputeProvider.RUNWAYML]: "RunwayML (no video API)", + [ComputeProvider.LUMALABS]: "Luma: Dream Machine (no API)", + [ComputeProvider.KUAISHOU]: "KuaiShou: Kling (no API)", + [ComputeProvider.RUNWAYML]: "RunwayML: GEN-3 (no API)", + [ComputeProvider.HEDRA]: "Hedra: Character-1 (no API)", + [ComputeProvider.LEONARDOAI]: "Leonardo.ai (no API)", + [ComputeProvider.EVERARTAI]: "Everart.ai (no API)", } // for developer sanity purposes, we only support LangChain for now. @@ -56,6 +59,7 @@ export const availableComputeProvidersForImages = [ ComputeProvider.FALAI, ComputeProvider.MODELSLAB, ComputeProvider.MIDJOURNEY, + ComputeProvider.EVERARTAI, ] export const availableComputeProvidersForVideos = [ @@ -71,6 +75,8 @@ export const availableComputeProvidersForVideos = [ ComputeProvider.GOOGLE, ComputeProvider.LUMALABS, ComputeProvider.KUAISHOU, + ComputeProvider.HEDRA, + ComputeProvider.LEONARDOAI, ] export const availableComputeProvidersForMusic = [ @@ -208,7 +214,10 @@ export const availableModelsForImageGeneration: Partial> = { @@ -252,18 +277,50 @@ export const availableModelsForVideoUpscaling: Partial> = { + [ComputeProvider.HUGGINGFACE]: [ + "cvssp/audioldm2", + "cvssp/audioldm2-large", + "cvssp/audioldm" + ], [ComputeProvider.FALAI]: [ "fal-ai/stable-audio" + ], + [ComputeProvider.ELEVENLABS]: [ + "v1/sound-generation" + ], + [ComputeProvider.REPLICATE]: [ + "declare-lab/tango", + "suno-ai/bark", + "sepal/audiogen" ] } export const availableModelsForVoiceGeneration: Partial> = { + [ComputeProvider.HUGGINGFACE]: [ + "coqui/XTTS-v2", + "myshell-ai/OpenVoiceV2", + "myshell-ai/OpenVoice", + "WhisperSpeech/WhisperSpeech", + "metavoiceio/metavoice-1B-v0.1", + "parler-tts/parler_tts_mini_v0.1", + "parler-tts/parler-tts-mini-expresso" + ], [ComputeProvider.FALAI]: [ "fal-ai/metavoice-v1" + ], + [ComputeProvider.REPLICATE]: [ + "chenxwh/openvoice" + ], + [ComputeProvider.ELEVENLABS]: [ + "v1/text-to-speech" ] } export const availableModelsForMusicGeneration: Partial> = { + [ComputeProvider.HUGGINGFACE]: [ + "cvssp/audioldm2-music", + "facebook/musicgen-large", + ], [ComputeProvider.FALAI]: [ "fal-ai/stable-audio" ], diff --git a/src/components/toolbars/top-menu/lists/hasNoPublicAPI.ts b/src/components/toolbars/top-menu/lists/hasNoPublicAPI.ts index 933d1c36bc718dec36c80687477f5701c79f733f..8fdd491e1d3773660f7284f44c0ef3bd5d07108e 100644 --- a/src/components/toolbars/top-menu/lists/hasNoPublicAPI.ts +++ b/src/components/toolbars/top-menu/lists/hasNoPublicAPI.ts @@ -16,6 +16,7 @@ export function hasNoPublicAPI(model: string) { model === ComputeProvider.KUAISHOU || model === ComputeProvider.RUNWAYML || model === ComputeProvider.LUMALABS || + model === ComputeProvider.HEDRA || model === ComputeProvider.UDIO ) { return true diff --git a/src/controllers/audio/analyzeAudio.ts b/src/controllers/audio/analyzeAudio.ts index 8da94085043fb67ca81f701d6943fa9be57706e2..8600bc88ea901534ba07b14910bf19bf1d344b34 100644 --- a/src/controllers/audio/analyzeAudio.ts +++ b/src/controllers/audio/analyzeAudio.ts @@ -1,6 +1,5 @@ -import { DEFAULT_DURATION_IN_MS_PER_STEP } from "@aitube/timeline" +import { DEFAULT_DURATION_IN_MS_PER_STEP, getAudioBuffer } from "@aitube/timeline" -import { getAudioBuffer } from "./getAudioBuffer" import { AudioAnalysis } from "./types" import { detectBPM } from "./detectBPM" diff --git a/src/controllers/audio/getAudioBuffer.ts b/src/controllers/audio/getAudioBuffer.ts deleted file mode 100644 index 8ece6a6e854d603826a4eb3c2d7372ed822643b9..0000000000000000000000000000000000000000 --- a/src/controllers/audio/getAudioBuffer.ts +++ /dev/null @@ -1,15 +0,0 @@ -import { readFileAsArrayBuffer } from "./readFileAsArrayBuffer" - -export async function getAudioBuffer(file: File): Promise { - const audioContext = new AudioContext() // initialize AudioContext - const arrayBuffer = await readFileAsArrayBuffer(file) - - // decode audio data from your arrayBuffer - return new Promise((resolve, reject) => { - audioContext.decodeAudioData(arrayBuffer, (buffer) => { - resolve(buffer) - }, (err) => { - reject(err) - }) - }) -} \ No newline at end of file diff --git a/src/controllers/audio/readFileAsArrayBuffer.ts b/src/controllers/audio/readFileAsArrayBuffer.ts deleted file mode 100644 index 68926e2966aa4b9176665b02a60c0d3ab5d7afca..0000000000000000000000000000000000000000 --- a/src/controllers/audio/readFileAsArrayBuffer.ts +++ /dev/null @@ -1,16 +0,0 @@ - - -export async function readFileAsArrayBuffer(file: File): Promise { - return new Promise((resolve, reject) => { - let reader = new FileReader(); - reader.onload = () => { - // when the reader has loaded, resolve the Promise with the result - resolve(reader.result as ArrayBuffer); - }; - reader.onerror = (error) => { - // if there's an error, reject the Promise with the error - reject(error); - }; - reader.readAsArrayBuffer(file); - }); -} \ No newline at end of file diff --git a/src/controllers/audio/startAudioSourceNode.ts b/src/controllers/audio/startAudioSourceNode.ts index 8d89cf83dda50db2ec16585d19664e5fb0a71351..b1a1c75b73b2b7574c6ee797b0a6f9e405b13c29 100644 --- a/src/controllers/audio/startAudioSourceNode.ts +++ b/src/controllers/audio/startAudioSourceNode.ts @@ -1,6 +1,5 @@ import { UUID } from "@aitube/clap" - -import { RuntimeSegment } from "@/types" +import { RuntimeSegment } from "@aitube/timeline" import { CurrentlyPlayingAudioSource } from "./types" diff --git a/src/controllers/audio/types.ts b/src/controllers/audio/types.ts index b1f9c46e3641982335c3a77a9946669835c9f426..f645cf34e3e13f8580ab83ac92402fd75db7a219 100644 --- a/src/controllers/audio/types.ts +++ b/src/controllers/audio/types.ts @@ -1,4 +1,4 @@ -import { RuntimeSegment } from "@/types" +import { RuntimeSegment } from "@aitube/timeline" export type AudioAnalysis = { audioBuffer: AudioBuffer diff --git a/src/controllers/audio/useAudio.ts b/src/controllers/audio/useAudio.ts index 869ac376dd589ff03fe0a52293cda5e2f028b044..7db124b99e9ce40089db86a0500b1dd17ce9f9c5 100644 --- a/src/controllers/audio/useAudio.ts +++ b/src/controllers/audio/useAudio.ts @@ -1,9 +1,7 @@ "use client" import { create } from "zustand" -import { TimelineStore, useTimeline } from "@aitube/timeline" - -import { RuntimeSegment } from "@/types" +import { TimelineStore, useTimeline, RuntimeSegment } from "@aitube/timeline" import { AudioStore } from "./types" import { getDefaultAudioState } from "./getDefaultAudioState" diff --git a/src/controllers/io/parseFileIntoSegments.ts b/src/controllers/io/parseFileIntoSegments.ts index 51d888465e66345ab2f01d9af7243561ad5b72d8..3c1396d237fdf60b36eece74d813e63e6f652893 100644 --- a/src/controllers/io/parseFileIntoSegments.ts +++ b/src/controllers/io/parseFileIntoSegments.ts @@ -1,13 +1,12 @@ "use client" -import { ClapAssetSource, ClapOutputType, ClapSegment, ClapSegmentCategory, ClapSegmentStatus, generateSeed, newSegment, UUID } from "@aitube/clap" -import { findFreeTrack } from "@aitube/timeline" +import { ClapAssetSource, ClapOutputType, ClapSegment, ClapSegmentCategory, ClapSegmentStatus, newSegment, UUID } from "@aitube/clap" +import { RuntimeSegment } from "@aitube/timeline" -import { RuntimeSegment } from "@/types" +import { blobToBase64DataUri } from "@/lib/utils/blobToBase64DataUri" import { analyzeAudio } from "../audio/analyzeAudio" import { ResourceCategory, ResourceType } from "./types" -import { blobToBase64DataUri } from "@/lib/utils/blobToBase64DataUri" export async function parseFileIntoSegments({ file }: { /** diff --git a/src/controllers/metrics/constants.ts b/src/controllers/metrics/constants.ts index df8d7405ed77aad939c13011a8fd0d666cccc6fd..5329ef8f06dc2de1d1d8b0de3e0c7927bed61a89 100644 --- a/src/controllers/metrics/constants.ts +++ b/src/controllers/metrics/constants.ts @@ -66,6 +66,12 @@ export const estimatedMetrics: Record((set, get) => ({ ...getDefaultResolverState(), @@ -75,8 +75,8 @@ export const useResolver = create((set, get) => ({ // segments visible on screen are show first, // then those nearby, then the hidden ones const segments: RuntimeSegment[] = ([...allSegments] as RuntimeSegment[]).sort((segment1, segment2) => { - const priority1 = (SegmentVisibilityPriority as any)[segment1.visibility || SegmentVisibility.HIDDEN] || 0 - const priority2 = (SegmentVisibilityPriority as any)[segment2.visibility || SegmentVisibility.HIDDEN] || 0 + const priority1 = (segmentVisibilityPriority as any)[segment1.visibility || SegmentVisibility.HIDDEN] || 0 + const priority2 = (segmentVisibilityPriority as any)[segment2.visibility || SegmentVisibility.HIDDEN] || 0 return priority2 - priority1 }) @@ -385,7 +385,7 @@ export const useResolver = create((set, get) => ({ // throw new Error(`please call setSegmentRender(...) first`) } - const shotSegments: ClapSegment[] = filterSegments( + const segments: ClapSegment[] = filterSegments( ClapSegmentFilteringMode.ANY, segment, allSegments @@ -397,34 +397,78 @@ export const useResolver = create((set, get) => ({ } segment.status = ClapSegmentStatus.IN_PROGRESS + + const entities = clap.entityIndex || {} + + const speakingCharactersIds = segments.map(s => + s.category === ClapSegmentCategory.DIALOGUE ? s.entityId : null + ).filter(id => id) as string[] + + const generalCharactersIds = segments.map(s => + s.category === ClapSegmentCategory.CHARACTER ? s.entityId : null + ).filter(id => id) as string[] + + const mainCharacterId: string | undefined = speakingCharactersIds.at(0) || generalCharactersIds.at(0) || undefined + + const mainCharacterEntity: ClapEntity | undefined = mainCharacterId ? (entities[mainCharacterId] || undefined) : undefined + + const storyboard = segments.find(s => s.category === ClapSegmentCategory.STORYBOARD) - try { - const entities = clap.entityIndex || {} - - const speakingCharactersIds = shotSegments.map(s => - s.category === ClapSegmentCategory.DIALOGUE ? s.entityId : null - ).filter(id => id) as string[] - - const generalCharactersIds = shotSegments.map(s => - s.category === ClapSegmentCategory.CHARACTER ? s.entityId : null - ).filter(id => id) as string[] - - const mainCharacterId: string | undefined = speakingCharactersIds.at(0) || generalCharactersIds.at(0) || undefined - - const mainCharacterEntity: ClapEntity | undefined = mainCharacterId ? (entities[mainCharacterId] || undefined) : undefined - - const request: ResolveRequest = { - settings, - segment, - segments: shotSegments, - entities, - speakingCharactersIds, - generalCharactersIds, - mainCharacterId, - mainCharacterEntity, - meta: clap.meta, + const dialogue = segments.find(s => s.category === ClapSegmentCategory.DIALOGUE) + + const imagePrompt = getVideoPrompt( + segments, + entities + ) + + const positiveImagePrompt = [ + settings.imagePromptPrefix, + imagePrompt, + settings.imagePromptSuffix, + ].map(x => x.trim()).filter(x => x).join(", ") + + const negativeImagePrompt = [ + settings.imageNegativePrompt + ].map(x => x.trim()).filter(x => x).join(", ") + + // note: not all AI models will support those parameters. + // in 2024, even the "best" proprietary video models like Sora, Veo, Kling, Gen-3, Dream Machine etc.. + // don't support voice input for lip syncing, for instance. + const prompts: ResolveRequestPrompts = { + image: { + // the "identification picture" of the character, if available + identity: `${mainCharacterEntity?.imageId || ""}`, + positive: positiveImagePrompt, + negative: negativeImagePrompt + }, + video: { + // image to animate + image: `${storyboard?.assetUrl || ""}`, + + // dialogue line to lip-sync + voice: `${dialogue?.assetUrl || ""}`, + }, + voice: { + identity: `${mainCharacterEntity?.audioId || ""}`, + positive: "", + negative: "" } + } + + const request: ResolveRequest = { + settings, + segment, + segments, + entities, + speakingCharactersIds, + generalCharactersIds, + mainCharacterId, + mainCharacterEntity, + meta: clap.meta, + prompts, + } + try { const res = await fetch("/api/resolve", { method: "POST", headers: { @@ -437,26 +481,39 @@ export const useResolver = create((set, get) => ({ const newSegmentData = (await res.json()) as ClapSegment // console.log(`useResolver.resolveSegment(): newSegmentData`, newSegmentData) - const { - id, - assetUrl, - assetDurationInMs, - assetFileFormat, - assetSourceType, - status - } = newSegmentData - // note: this modifies the old object in-place - const newSegment = Object.assign(segment, { - id, - assetUrl, - assetDurationInMs, - assetFileFormat, - assetSourceType, - status - }) + // it is super important as this helps preserving the reference + const newSegment = Object.assign(segment, newSegmentData) as RuntimeSegment + + if (newSegment.outputType === ClapOutputType.AUDIO) { + try { + newSegment.audioBuffer = await getAudioBuffer(newSegment.assetUrl) + } catch (err) { + console.error(`failed to load the audio file: ${err}`) + } + } + + // after a segment has ben resolved, it is possible that the size + // of its asset changed (eg. a dialogue line longer than the segment's length) + // + // there are multiple ways to solve this, one approach could be to + // just add some more B-roll (more shots) + // + // or we can also extend it, which is the current simple solution + // + // for the other categories, such as MUSIC or SOUND, + // we assume it is okay if they are too short or too long, + // and that we can crop them etc + // + // note that video clips are also concerned: we want them to perfectly fit + if (newSegment.category === ClapSegmentCategory.DIALOGUE) { + await timeline.fitSegmentToAssetDuration(newSegment) + } else if (newSegment.category === ClapSegmentCategory.VIDEO) { + await timeline.fitSegmentToAssetDuration(newSegment) + } newSegment.status = ClapSegmentStatus.COMPLETED + trackSilentChangeInSegment(newSegment.id) return newSegment } catch (err) { diff --git a/src/controllers/settings/getDefaultSettingsState.ts b/src/controllers/settings/getDefaultSettingsState.ts index 4f9397440ae8ca0af31641688a0c0129a52327d6..088a756a4d8fdc75e18c842ac82fda060722f8e2 100644 --- a/src/controllers/settings/getDefaultSettingsState.ts +++ b/src/controllers/settings/getDefaultSettingsState.ts @@ -68,19 +68,28 @@ export function getDefaultSettingsState(): SettingsState { comfyWorkflowForSound: "{}", comfyWorkflowForMusic: "{}", - // "HuggingFaceH4/zephyr-7b-beta" - // "mistralai/Mixtral-8x7B-Instruct-v0.1", - huggingFaceModelForAssistant: "mistralai/Mixtral-8x7B-Instruct-v0.1", - huggingFaceModelForImage: "sd-community/sdxl-flash", - huggingFaceModelForVideo: "", + // now how we prefix everything with "models" + // that's because it will be possible at some point to also + // call a space eg. spaces/openai/sora (this one is just a silly example, of course) + // "models/HuggingFaceH4/zephyr-7b-beta" + // "models/mistralai/Mixtral-8x7B-Instruct-v0.1", + huggingFaceModelForAssistant: "models/mistralai/Mixtral-8x7B-Instruct-v0.1", + huggingFaceModelForImage: "models/sd-community/sdxl-flash", + + // huggingFaceModelForVideo: "spaces/multimodalart/hallo", + huggingFaceModelForVideo: "spaces/hpcai-tech/open-sora", + huggingFaceModelForVoice: "", huggingFaceModelForSound: "", huggingFaceModelForMusic: "", + // those are not designed for Hugging Face specifically, + // but to be compatible with any Gradio API URL that the + // user would set manually (eg. running on localhost) gradioApiUrlForAssistant: "", gradioApiUrlForImage: "", gradioApiUrlForVideo: "", - gradioApiUrlForSpeech: "", + gradioApiUrlForVoice: "", gradioApiUrlForSound: "", gradioApiUrlForMusic: "", diff --git a/src/controllers/settings/types.ts b/src/controllers/settings/types.ts index 65277c7b7a773e60a1dac6dec2c4e90906d6d42a..3e936f2e86d675d5d4ede924b836e071747a8082 100644 --- a/src/controllers/settings/types.ts +++ b/src/controllers/settings/types.ts @@ -81,7 +81,7 @@ export type SettingsState = { gradioApiUrlForAssistant: string gradioApiUrlForImage: string gradioApiUrlForVideo: string - gradioApiUrlForSpeech: string + gradioApiUrlForVoice: string gradioApiUrlForSound: string gradioApiUrlForMusic: string @@ -210,7 +210,7 @@ export type SettingsControls = { setGradioApiUrlForAssistant: (gradioApiUrlForAssistant?: string) => void setGradioApiUrlForImage: (gradioApiUrlForImage?: string) => void setGradioApiUrlForVideo: (gradioApiUrlForVideo?: string) => void - setGradioApiUrlForSpeech: (gradioApiUrlForSpeech?: string) => void + setGradioApiUrlForVoice: (gradioApiUrlForVoice?: string) => void setGradioApiUrlForSound: (gradioApiUrlForSound?: string) => void setGradioApiUrlForMusic: (gradioApiUrlForMusic?: string) => void diff --git a/src/controllers/settings/useSettings.ts b/src/controllers/settings/useSettings.ts index e37c4369a7b3ead6512627de3aea4b7b827c0003..575551f4412e0c2feddc25b521a074f4e7958c84 100644 --- a/src/controllers/settings/useSettings.ts +++ b/src/controllers/settings/useSettings.ts @@ -245,8 +245,8 @@ export const useSettings = create()( setGradioApiUrlForVideo: (gradioApiUrlForVideo?: string) => { set({ gradioApiUrlForVideo: getValidString(gradioApiUrlForVideo, getDefaultSettingsState().gradioApiUrlForVideo) }) }, - setGradioApiUrlForSpeech: (gradioApiUrlForSpeech?: string) => { - set({ gradioApiUrlForSpeech: getValidString(gradioApiUrlForSpeech, getDefaultSettingsState().gradioApiUrlForSpeech) }) + setGradioApiUrlForVoice: (gradioApiUrlForVoice?: string) => { + set({ gradioApiUrlForVoice: getValidString(gradioApiUrlForVoice, getDefaultSettingsState().gradioApiUrlForVoice) }) }, setGradioApiUrlForSound: (gradioApiUrlForSound?: string) => { set({ gradioApiUrlForSound: getValidString(gradioApiUrlForSound, getDefaultSettingsState().gradioApiUrlForSound) }) @@ -449,7 +449,7 @@ export const useSettings = create()( gradioApiUrlForAssistant: state.gradioApiUrlForAssistant || defaultSettings.gradioApiUrlForAssistant, gradioApiUrlForImage: state.gradioApiUrlForImage || defaultSettings.gradioApiUrlForImage, gradioApiUrlForVideo: state.gradioApiUrlForVideo || defaultSettings.gradioApiUrlForVideo, - gradioApiUrlForSpeech: state.gradioApiUrlForSpeech || defaultSettings.gradioApiUrlForSpeech, + gradioApiUrlForVoice: state.gradioApiUrlForVoice || defaultSettings.gradioApiUrlForVoice, gradioApiUrlForSound: state.gradioApiUrlForSound || defaultSettings.gradioApiUrlForSound, gradioApiUrlForMusic: state.gradioApiUrlForMusic || defaultSettings.gradioApiUrlForMusic, replicateModelForImage: state.replicateModelForImage || defaultSettings.replicateModelForImage, diff --git a/src/lib/ffmpeg/getMediaInfo.ts b/src/lib/ffmpeg/getMediaInfo.ts new file mode 100644 index 0000000000000000000000000000000000000000..e971772d67aaf1bf31bdf923f0662b24904e015d --- /dev/null +++ b/src/lib/ffmpeg/getMediaInfo.ts @@ -0,0 +1,83 @@ +import { tmpdir } from "node:os" +import { writeFile, rm } from "node:fs/promises" +import { join } from "node:path" + +import ffmpeg from "fluent-ffmpeg" + +export type MediaMetadata = { + durationInSec: number; + durationInMs: number; + hasAudio: boolean; +}; + +/** + * Get the media info of a base64 or file path + * @param input + * @returns + */ +export async function getMediaInfo(input: string): Promise { + // If the input is a base64 string + if (input.startsWith("data:")) { + // Extract the base64 content + // Extract the base64 content + const [head, tail] = input.split(";base64,") + if (!tail) { + throw new Error("Invalid base64 data"); + } + + const extension = head.split("/").pop() || "" + const base64Content = tail || "" + + // Decode the base64 content to a buffer + const buffer = Buffer.from(base64Content, 'base64') + + // Generate a temporary file name + const tempFileName = join(tmpdir(), `temp-media-${Date.now()}.${extension}`); + + // Write the buffer to a temporary file + await writeFile(tempFileName, buffer); + + // Get metadata from the temporary file then delete the file + try { + return await getMetaDataFromPath(tempFileName); + } finally { + await rm(tempFileName); + } + } + + // If the input is a path to the file + return await getMetaDataFromPath(input); +} + +async function getMetaDataFromPath(filePath: string): Promise { + return new Promise((resolve, reject) => { + ffmpeg.ffprobe(filePath, (err, metadata) => { + + let results = { + durationInSec: 0, + durationInMs: 0, + hasAudio: false, + } + + if (err) { + console.error("getMediaInfo(): failed to analyze the source (might happen with empty files)", err) + // reject(err); + resolve(results); + return; + } + + try { + results.durationInSec = metadata?.format?.duration || 0; + results.durationInMs = results.durationInSec * 1000; + results.hasAudio = (metadata?.streams || []).some((stream) => stream.codec_type === 'audio'); + + } catch (err) { + console.error(`getMediaInfo(): failed to analyze the source (might happen with empty files)`, err) + results.durationInSec = 0 + results.durationInMs = 0 + results.hasAudio = false + } + resolve(results); + }); + }); +} \ No newline at end of file diff --git a/src/lib/hf/adapter/README.md b/src/lib/hf/adapter/README.md new file mode 100644 index 0000000000000000000000000000000000000000..f41c5fb3c5a6228f7a6e7d2e45837e8ab878d97e --- /dev/null +++ b/src/lib/hf/adapter/README.md @@ -0,0 +1,3 @@ +Important: if you add a new field, +please make sure you check all the functions inside adapter/*.ts files +to support it \ No newline at end of file diff --git a/src/lib/hf/adapter/adaptAnyInputsToGradioInputs.ts b/src/lib/hf/adapter/adaptAnyInputsToGradioInputs.ts new file mode 100644 index 0000000000000000000000000000000000000000..bc9076806a10631dbdf20ccaa6463b57e999eba1 --- /dev/null +++ b/src/lib/hf/adapter/adaptAnyInputsToGradioInputs.ts @@ -0,0 +1,65 @@ +import { GradioApiInfo, SupportedFields } from "../types" +import { identifyField } from "./identifyField" +import { getDefaultFields } from "./getDefaultFields" +import { findMainGradioEndpoint } from "./findMainGradioEndpoint" + +/** + * This function try to adapt arbitrary inputs to strict gradio inputs + * + * @param param0 + * @returns + */ +export function adaptAnyInputsToGradioInputs({ + inputs, + gradioApiInfo, +}: { + inputs: Record, + gradioApiInfo: GradioApiInfo +}): { + endpoint: string + inputs: Array + } { + + const mainGradioEndpoint = findMainGradioEndpoint({ gradioApiInfo }) + + if (!mainGradioEndpoint) { + throw new Error(`couldn't find a suitable endpoint`) + } + + // input fields passed by the parent calling function + let inputFields: Record> = {} + let allInputFields = getDefaultFields() + for (const [key, value] of Object.entries(inputs)) { + const inputField = identifyField(key, value) + inputFields[key] = inputField + allInputFields = {...allInputFields, ...inputField} + } + + // the gradio input array + const gradioInputs: any[] = [] + + for (const parameter of mainGradioEndpoint.endpoint.parameters) { + let gradioInputValue: any = undefined + + const fields = mainGradioEndpoint.fields[parameter.parameter_name] + + // TODO: rewrite this in a better way maybe + // until then, please don't blame me if you forget to update those! + if (fields.hasPositiveTextPrompt) { gradioInputValue = allInputFields.inputPositiveTextPrompt } + if (fields.hasNegativeTextPrompt) { gradioInputValue = allInputFields.inputNegativeTextPrompt } + if (fields.hasInputImage) { gradioInputValue = allInputFields.inputImage } + if (fields.hasInputAudio) { gradioInputValue = allInputFields.inputAudio } + if (fields.hasInputWidth) { gradioInputValue = allInputFields.inputWidth } + if (fields.hasInputHeight) { gradioInputValue = allInputFields.inputHeight } + if (fields.hasInputSteps) { gradioInputValue = allInputFields.inputSteps } + if (fields.hasInputGuidance) { gradioInputValue = allInputFields.inputGuidance } + if (fields.hasInputSeed) { gradioInputValue = allInputFields.inputSeed } + + gradioInputs.push(gradioInputValue) + } + + return { + endpoint: mainGradioEndpoint.name, + inputs: gradioInputs + } +} \ No newline at end of file diff --git a/src/lib/hf/adapter/findMainGradioEndpoint.ts b/src/lib/hf/adapter/findMainGradioEndpoint.ts new file mode 100644 index 0000000000000000000000000000000000000000..6393b91264b5f6dc9109366f92fd6ab8689b4847 --- /dev/null +++ b/src/lib/hf/adapter/findMainGradioEndpoint.ts @@ -0,0 +1,57 @@ +import { GradioApiInfo, GradioEndpoint, SupportedFields } from "../types" +import { identifyField } from "./identifyField" +import { getDefaultFields } from "./getDefaultFields" +import { getAdaptationScore } from "./getAdaptationScore" + +/** + * Find the main entrypoint (main entry endpoint) of a Gradio API + */ +export function findMainGradioEndpoint({ + gradioApiInfo, +}: { + gradioApiInfo: GradioApiInfo +}): GradioEndpoint | undefined { + const endpoints: GradioEndpoint[] = [ + ...Object.entries(gradioApiInfo.named_endpoints) + .map(([name, endpoint]) => ({ isNamed: true, name, endpoint, fields: {}, score: 0 })), + ...Object.entries(gradioApiInfo.unnamed_endpoints) + .map(([name, endpoint]) => ({ isNamed: true, name, endpoint, fields: {}, score: 0 })), + ] + + // generally the main entry point will be called "/run", "/call", "/predict" etc + // and contain stuff we usually expect: a text prompt, or image etc + const sortableEndpoints = endpoints.map(({ isNamed, name, endpoint, score }) => { + console.log(`found endpoint: ${name}`) + + const isContinuous = !!endpoint.type?.continuous + const isGenerator = !!endpoint.type?.generator + const canCancel = !!endpoint.type?.cancel + + let gradioFields: Record> = {} + let allGradioFields = getDefaultFields() + for (const gradioParameter of endpoint.parameters) { + const gradioParameterField = identifyField( + gradioParameter.parameter_name, + gradioParameter.parameter_default + ) + gradioFields[gradioParameter.parameter_name] = gradioParameterField + allGradioFields = { ...allGradioFields, ...gradioParameterField } + } + + score = getAdaptationScore(allGradioFields) + console.log(`allGradioFields:`, allGradioFields) + console.log(`score:`, score) + + return { + isNamed, + name, + endpoint, + fields: gradioFields, + score, + } + }) + + return sortableEndpoints.sort((a, b) => { + return b.score - a.score + }).at(0) +} \ No newline at end of file diff --git a/src/lib/hf/adapter/getAdaptationScore.ts b/src/lib/hf/adapter/getAdaptationScore.ts new file mode 100644 index 0000000000000000000000000000000000000000..6840e900b558a85ec3931d6904641b11237c83c0 --- /dev/null +++ b/src/lib/hf/adapter/getAdaptationScore.ts @@ -0,0 +1,18 @@ +import { SupportedFields } from "../types" + +export function getAdaptationScore(supportedFields: SupportedFields) { + let score = 0 + + // TODO: rewrite this in a better way maybe + // until then, please don't blame me if you forget to update those! + score += supportedFields.hasPositiveTextPrompt ? 1 : 0 + score += supportedFields.hasNegativeTextPrompt ? 1 : 0 + score += supportedFields.hasInputImage ? 1 : 0 + score += supportedFields.hasInputAudio ? 1 : 0 + score += supportedFields.hasInputWidth ? 1 : 0 + score += supportedFields.hasInputHeight ? 1 : 0 + score += supportedFields.hasInputSteps ? 1 : 0 + score += supportedFields.hasInputGuidance ? 1 : 0 + score += supportedFields.inputSeed ? 1 : 0 + return score +} \ No newline at end of file diff --git a/src/lib/hf/adapter/getDefaultFields.ts b/src/lib/hf/adapter/getDefaultFields.ts new file mode 100644 index 0000000000000000000000000000000000000000..dcac2d4695a8384f98a834562aa748c6314f65a0 --- /dev/null +++ b/src/lib/hf/adapter/getDefaultFields.ts @@ -0,0 +1,24 @@ +import { SupportedFields } from "../types" + +export function getDefaultFields(): SupportedFields { + return { + inputPositiveTextPrompt: "", + hasPositiveTextPrompt: false, + inputNegativeTextPrompt: "", + hasNegativeTextPrompt: false, + inputImage: "", + hasInputImage: false, + inputAudio: "", + hasInputAudio: false, + inputWidth: 1024, + hasInputWidth: false, + inputHeight: 574, + hasInputHeight: false, + inputSteps: 8, + hasInputSteps: false, + inputGuidance: 7, + hasInputGuidance: false, + inputSeed: 0, + hasInputSeed: false + } +} \ No newline at end of file diff --git a/src/lib/hf/adapter/identifyField.ts b/src/lib/hf/adapter/identifyField.ts new file mode 100644 index 0000000000000000000000000000000000000000..ec667a8e5b1500e962fa3c0f200913c2ff813402 --- /dev/null +++ b/src/lib/hf/adapter/identifyField.ts @@ -0,0 +1,218 @@ +import { SupportedFields } from "../types" + +export function identifyField(key: string, value?: any, index?: number): Partial { + const normalizedKey = key.toLowerCase().trim() + switch (normalizedKey) { + + case "width": + let strWidth = "" + let numWidth = 0 + if (typeof value === "string" && value.length) { + strWidth = value + } + let maybeNumWidth = Number(strWidth) + if (typeof maybeNumWidth === "number" && isFinite(maybeNumWidth) && !isNaN(maybeNumWidth) && maybeNumWidth) { + numWidth = maybeNumWidth + return { + hasInputWidth: true, + inputWidth: numWidth + } + } else if (strWidth) { + return { + hasInputWidth: true, + inputWidth: strWidth + } + } else { + return { + hasInputWidth: true, + // indexInputWidth: index, + } + } + + case "height": + let strHeight = "" + let numHeight = 0 + if (typeof value === "string" && value.length) { + strHeight = value + } + let maybeNumHeight = Number(strHeight) + if (typeof maybeNumHeight === "number" && isFinite(maybeNumHeight) && !isNaN(maybeNumHeight) && maybeNumHeight) { + numHeight = maybeNumHeight + return { + hasInputHeight: true, + inputHeight: numHeight + } + } else if (strHeight) { + return { + hasInputHeight: true, + inputHeight: strHeight + } + } else { + return { + hasInputHeight: true, + // indexInputHeight: index, + } + } + + case "seed": + let strSeed = "" + let numSeed = 0 + if (typeof value === "string" && value.length) { + strSeed = value + } + let maybeNumSeed = Number(strSeed) + if (typeof maybeNumSeed === "number" && isFinite(maybeNumSeed) && !isNaN(maybeNumSeed) && maybeNumSeed) { + numSeed = maybeNumSeed + return { + hasInputSeed: true, + inputSeed: numSeed + } + } else if (strSeed) { + return { + hasInputSeed: true, + inputSeed: strSeed + } + } else { + return { + hasInputSeed: true, + // indexInputSeed: index, + } + } + + case "steps": + case "n_steps": + case "nb_steps": + case "num_steps": + case "step_count": + case "inference_steps": + case "n_inference_steps": + case "nb_inference_steps": + case "num_inference_steps": + let strSteps = "" + let numSteps = 0 + if (typeof value === "string" && value.length) { + strSteps = value + } + let maybeNumSteps = Number(strSteps) + if (typeof maybeNumSteps === "number" && isFinite(maybeNumSteps) && !isNaN(maybeNumSteps) && maybeNumSteps) { + numSteps = maybeNumSteps + return { + hasInputSteps: true, + inputSteps: numSteps + } + } else if (strSteps) { + return { + hasInputSteps: true, + inputSteps: strSteps + } + } else { + return { + hasInputSteps: true, + // indexInputSteps: index, + } + } + + // note: what we have to choose depends on what Gradio expects + // steps = numSteps + break; + + case "guidance": + case "guidance_scale": + case "guidancescale": + let strGuidanceScale = "" + let numGuidanceScale = 0 + if (typeof value === "string" && value.length) { + strGuidanceScale = value + } + let maybeNumGuidanceScale = Number(strGuidanceScale) + if (typeof maybeNumGuidanceScale === "number" && isFinite(maybeNumGuidanceScale) && !isNaN(maybeNumGuidanceScale) && maybeNumGuidanceScale) { + numGuidanceScale = maybeNumGuidanceScale + return { + hasInputGuidance: true, + inputGuidance: numGuidanceScale + } + } else if (strGuidanceScale) { + return { + hasInputGuidance: true, + inputGuidance: strGuidanceScale + } + } else { + return { + hasInputGuidance: true, + // indexInputGuidance: index, + } + } + + case "negative": + case "negativeprompt": + case "negative_prompt": + if (typeof value === "string" && value.length) { + return { + hasNegativeTextPrompt: true, + inputNegativeTextPrompt: value, + } + } else { + return { + hasNegativeTextPrompt: true, + // indexNegativeTextPrompt: index, + } + } + + case "source_image": + case "input_image": + case "image_input": + case "image": + case "image": + if (typeof value === "string" && value.length) { + return { + hasInputImage: true, + inputImage: value + } + } else { + return { + hasInputImage: true, + // indexPositiveTextPrompt: index, + } + } + + case "source_audio": + case "input_audio": + case "audio_input": + case "driving_audio": + case "voice": + case "audio": + if (typeof value === "string" && value.length) { + return { + hasInputAudio: true, + inputAudio: value + } + } else { + return { + hasInputAudio: true, + // indexPositiveTextPrompt: index, + } + } + + case "prompt": + case "positive": + case "positiveprompt": + case "positive_prompt": + case "input_prompt": + case "input_text": + case "prompt_text": + case "text_prompt": + case "text": + if (typeof value === "string" && value.length) { + return { + hasPositiveTextPrompt: true, + inputPositiveTextPrompt: value + } + } else { + return { + hasPositiveTextPrompt: true, + // indexPositiveTextPrompt: index, + } + } + } + return {} +} \ No newline at end of file diff --git a/src/lib/hf/callGradioApi.ts b/src/lib/hf/callGradioApi.ts new file mode 100644 index 0000000000000000000000000000000000000000..4cf02e84a0588f98931fbdc33bea2fe7d5092ae8 --- /dev/null +++ b/src/lib/hf/callGradioApi.ts @@ -0,0 +1,77 @@ +import { Client } from "@gradio/client" + +import { getGradioApiInfo } from "./getGradioApiInfo" +import { parseHuggingFaceHubId } from "./parseHuggingFaceHubId" +import { adaptAnyInputsToGradioInputs } from "./adapter/adaptAnyInputsToGradioInputs" +import { getCurrentOwner } from "./getCurrentOwner" + +/** + * + * @param param0 + * @returns + */ +export async function callGradioApi({ + url, + inputs, + apiKey +}: { + url: string + inputs: Record + apiKey?: string +}): Promise { + + // console.log(`callGradioApi called on: `, { url, apiKey }) + // we can support either a call to the original space, or to the current user space + + const { owner: previousOwner, id } = parseHuggingFaceHubId(url, "spaces") + + // console.log(`then: `, { previousOwner, id }) + + const owner = apiKey ? (await getCurrentOwner(apiKey)) : previousOwner + + const ownerAndId = `${owner}/${id}` + // console.log(`then: `, { owner, ownerAndId }) + // TODO: if the user doesn't have forked the space yet we should ask the user to do sp + + /* + // first step is to check if the user already has this space + const gradioSpaces = await getSpaces({ apiKey, sdk: "gradio" }) + const gradioSpace = gradioSpaces.find(s => s.name === id) + + if (gradioSpace) { + // good, there is already a space for that + console.log(`good, user did the homework and forked the space to their own account`) + } else { + // bad, there is no space for that + console.log(`bad, user should fork the space`) + throw new Error(`Couldn't find "${ownerAndId}". Please make sure you created or duplicated the space correctly.`) + } + */ + + const gradioApiInfo = await getGradioApiInfo({ + url: ownerAndId, + apiKey + }) + + // console.log(`gradioApiInfo: `, gradioApiInfo) + + const gradioEndpointInputs = adaptAnyInputsToGradioInputs({ + inputs, + gradioApiInfo + }) + + // console.log(`gradioEndpointInputs: `, gradioEndpointInputs) + + const app = await Client.connect(ownerAndId, { + hf_token: apiKey as any + }) + // console.log(`app: `, app) + + const output = await app.predict( + gradioEndpointInputs.endpoint, + gradioEndpointInputs.inputs + ) + console.log(`output: `, output) + + return output.data as unknown as T +} \ No newline at end of file diff --git a/src/lib/hf/cloneSpace.ts b/src/lib/hf/cloneSpace.ts new file mode 100644 index 0000000000000000000000000000000000000000..c48d8c1f959f2298a35cdd953286f14a63c68322 --- /dev/null +++ b/src/lib/hf/cloneSpace.ts @@ -0,0 +1,11 @@ +import { listSpaces, Credentials, whoAmI, SpaceSdk } from "@huggingface/hub" + +export async function cloneSpace({ + id, + apiKey +}: { + id: string + apiKey: string +}) { + +} \ No newline at end of file diff --git a/src/lib/hf/getCurrentOwner.ts b/src/lib/hf/getCurrentOwner.ts new file mode 100644 index 0000000000000000000000000000000000000000..887c3148bdd2e0947cadd15785f8e4eb89ac9499 --- /dev/null +++ b/src/lib/hf/getCurrentOwner.ts @@ -0,0 +1,25 @@ +import { Credentials, whoAmI } from "@huggingface/hub" + +export async function getCurrentOwner(apiKey: string): Promise { + + const accessToken = apiKey || "" + + if (!accessToken) { + throw new Error(`cannot list spaces without a Hugging Face access token`) + } + + const credentials: Credentials = { accessToken } + + let username = "" + try { + const { name } = await whoAmI({ credentials }) + username = name + if (!username) { + throw new Error(`returned username is empty`) + } + } catch (err) { + throw new Error(`cannot list spaces: ${err}`) + } + + return username +} \ No newline at end of file diff --git a/src/lib/hf/getGradioApiInfo.ts b/src/lib/hf/getGradioApiInfo.ts new file mode 100644 index 0000000000000000000000000000000000000000..94e1882936e5ee4bdb3a5c3596d8d2309a8fe53c --- /dev/null +++ b/src/lib/hf/getGradioApiInfo.ts @@ -0,0 +1,20 @@ +import { Client } from "@gradio/client" + +import { GradioApiInfo } from "./types" +import { parseHuggingFaceHubId } from "./parseHuggingFaceHubId" + +export async function getGradioApiInfo({ + url, + apiKey, +}: { + url: string + apiKey?: string +}): Promise { + const { ownerAndId } = parseHuggingFaceHubId(url, "spaces") + + const app = await Client.connect(ownerAndId, { + hf_token: apiKey as any + }) + const apiInfo: GradioApiInfo = await app.view_api() + return apiInfo +} \ No newline at end of file diff --git a/src/lib/hf/getSpaceStatus.ts b/src/lib/hf/getSpaceStatus.ts new file mode 100644 index 0000000000000000000000000000000000000000..119c99e0726f66001d86194aa200be8ecd29df73 --- /dev/null +++ b/src/lib/hf/getSpaceStatus.ts @@ -0,0 +1,34 @@ +import { HFSpaceStatus } from "./types" +import { parseHuggingFaceHubId } from "./parseHuggingFaceHubId" + + +export async function getSpaceStatus({ + id, + apiKey, +}: { + id: String + apiKey: string +}): Promise { + + const { category, ownerAndId } = parseHuggingFaceHubId(id) + if (category !== "spaces") { + throw new Error(`cannot get the running status of ${category} "${ownerAndId}": this is not a space!`) + } + const res = await fetch(`https://huggingface.co/api/spaces/${ownerAndId}`, { + method: "GET", + headers: { + Authorization: `Bearer ${apiKey}` + } + }) + + if (res.status !== 200) { + throw new Error("failed to get the space data") + } + + try { + const data = await res.json() as HFSpaceStatus + return data + } catch (err) { + throw new Error(`failed to parse space data: ${err}`) + } +} diff --git a/src/lib/hf/getMyGradioSpaces.ts b/src/lib/hf/getSpaces.ts similarity index 55% rename from src/lib/hf/getMyGradioSpaces.ts rename to src/lib/hf/getSpaces.ts index 5138cf0743383a5e2ae3305c033e3b0af12569cc..4dbd99730d36f214b20b580966b2a996e4b69391 100644 --- a/src/lib/hf/getMyGradioSpaces.ts +++ b/src/lib/hf/getSpaces.ts @@ -1,13 +1,15 @@ -import { listSpaces, Credentials, whoAmI, SpaceEntry } from "@huggingface/hub" -import { GradioSpace } from "./types" +import { listSpaces, Credentials, whoAmI, SpaceSdk } from "@huggingface/hub" +import { HFSpace } from "./types" -export async function getMyGradioSpaces({ - huggingFaceApiKey +export async function getSpaces({ + apiKey, + sdk = "gradio" }: { - huggingFaceApiKey: string -}): Promise { + apiKey: string + sdk?: SpaceSdk +}): Promise { - const accessToken = huggingFaceApiKey || "" + const accessToken = apiKey || "" if (!accessToken) { throw new Error(`cannot list spaces without a Hugging Face access token`) @@ -26,9 +28,7 @@ export async function getMyGradioSpaces({ throw new Error(`cannot list spaces: ${err}`) } - - let maxNbSpaces = 10 - let gradioSpaces: GradioSpace[] = [] + let results: HFSpace[] = [] for await (const space of listSpaces({ search: { @@ -42,14 +42,10 @@ export async function getMyGradioSpaces({ ], credentials })) { - if ( - space.sdk !== "gradio" - ) { continue } - - console.log("MySpace:", gradioSpaces) - gradioSpaces.push(space) + if (sdk && space.sdk != sdk) { continue } + results.push(space) } - return gradioSpaces + return results } \ No newline at end of file diff --git a/src/lib/hf/parseHuggingFaceHubId.ts b/src/lib/hf/parseHuggingFaceHubId.ts new file mode 100644 index 0000000000000000000000000000000000000000..3c47c9d3358d13936032f50ea8e61596904c8491 --- /dev/null +++ b/src/lib/hf/parseHuggingFaceHubId.ts @@ -0,0 +1,35 @@ +import { HFHubCategory } from "./types" + +export function parseHuggingFaceHubId(input?: any, defaultCategory: HFHubCategory = "models"): { + category: HFHubCategory + owner: string + id: string + categoryAndOwnerAndId: string + ownerAndId: string +} { + let inputStr = `${input || ""}` + + if (inputStr.includes(".co/")) { + inputStr = inputStr.split(".co/").pop() || "" + } else if (inputStr.includes(".com/")) { + inputStr = inputStr.split(".com/").pop() || "" + } + + let parts = inputStr.split("/") + if (parts.length < 2 || parts.length > 3) { throw new Error(`input seems invalid, cannot extract chunks`) } + + if (parts.length === 2) { + parts = [defaultCategory, parts[0], parts[1]] + } + + const [category, owner, id] = parts + + + return { + category: category as HFHubCategory, + owner, + id, + categoryAndOwnerAndId: `${category}/${owner}/${id}`, + ownerAndId: `${owner}/${id}`, + } +} \ No newline at end of file diff --git a/src/lib/hf/runSpace.ts b/src/lib/hf/runSpace.ts new file mode 100644 index 0000000000000000000000000000000000000000..1c3d11212962722c916624de580cb31b3c23bd8c --- /dev/null +++ b/src/lib/hf/runSpace.ts @@ -0,0 +1,27 @@ +import { getSpaces } from "./getSpaces" +import { parseHuggingFaceHubId } from "./parseHuggingFaceHubId" + +export async function runGradioSpace({ + url, + params, + apiKey, + sleepAfter = "hour" +}: { + url: string + params?: I + apiKey: string + sleepAfter?: "hour" | "day" +}): Promise { + const { id } = parseHuggingFaceHubId(url) + + let gradioSpaces = await getSpaces({ apiKey, sdk: "gradio" }) + + if (gradioSpaces.find(s => s.name === id)) { + console.log("runGradioSpace: good, we already have cloned the space") + } else { + console.log("runGradioSpace: hm, we need to clone the space") + console.log("runGradioSpace: we might want to ask the user for confirmation here") + + } + return {} as O +} diff --git a/src/lib/hf/types.ts b/src/lib/hf/types.ts index ac71c9f7736bab3fa4129cb5a62bdf88622b877f..2dce4c2fbe25dd92738e2955e7480a91f43f2fb8 100644 --- a/src/lib/hf/types.ts +++ b/src/lib/hf/types.ts @@ -1,5 +1,65 @@ -import { SpaceEntry} from "@huggingface/hub" +import { SpaceEntry, SpaceRuntime } from "@huggingface/hub" import { ApiSpaceInfo } from "@huggingface/hub/dist/src/types/api/api-space" +import { ApiInfo, EndpointInfo, JsApiData } from "@gradio/client/dist/types" -export type GradioSpace = - SpaceEntry & Pick \ No newline at end of file +export type HFSpace = + SpaceEntry & Pick + +export interface HFSpaceStatus { + _id: string + id: string + author: string + sha: string + lastModified: string + private: boolean + gated: boolean + disabled: boolean + host: string + subdomain: string + tags: string[] + likes: number + sdk: string + runtime: SpaceRuntime + createdAt: string +} + +export type HFHubCategory = "spaces" | "models" + +export type GradioApiInfo = ApiInfo + +export type SupportedFields = { + inputPositiveTextPrompt: string + hasPositiveTextPrompt: boolean + + inputNegativeTextPrompt: string + hasNegativeTextPrompt: boolean + + inputImage: string + hasInputImage: boolean + + inputAudio: string + hasInputAudio: boolean + + inputWidth: string | number + hasInputWidth: boolean + + inputHeight: string | number + hasInputHeight: boolean + + inputSteps: string | number + hasInputSteps: boolean + + inputGuidance: string | number + hasInputGuidance: boolean + + inputSeed: string | number + hasInputSeed: boolean +} + +export type GradioEndpoint = { + isNamed: boolean + name: string + endpoint: EndpointInfo + fields: Record> + score: number +} diff --git a/src/lib/hf/useMyGradioSpaces.ts b/src/lib/hf/useMyGradioSpaces.ts index 0d6e7d3bcc1d5b6fd07cd895893a8460953d5943..fcdf205a11f092e3058754d2e4b346a6090f1e50 100644 --- a/src/lib/hf/useMyGradioSpaces.ts +++ b/src/lib/hf/useMyGradioSpaces.ts @@ -2,18 +2,18 @@ import { useEffect, useState } from "react" import { useSettings } from "@/controllers/settings" -import { GradioSpace } from "./types" -import { getMyGradioSpaces } from "./getMyGradioSpaces" +import { getSpaces } from "./getSpaces" +import { HFSpace } from "./types" -export function useMyGradioSpaces() { - const [gradioSpaces, setGradioSpaces] = useState([]) +export function useMyGradioSpaces(): HFSpace[] { + const [gradioSpaces, setGradioSpaces] = useState([]) const huggingFaceApiKey = useSettings(s => s.huggingFaceApiKey) useEffect(() => { const fn = async () => { try { - const newSpaces = await getMyGradioSpaces({ huggingFaceApiKey }) + const newSpaces = await getSpaces({ apiKey: huggingFaceApiKey, sdk: "gradio" }) setGradioSpaces(newSpaces) } catch (err) { console.error(`failed to load the Gradio spaces (most likely your HF token is invalid)`, err) diff --git a/src/lib/utils/getResolveRequestPrompts.ts b/src/lib/utils/getResolveRequestPrompts.ts deleted file mode 100644 index bb6408af16eef6ee57d9bb436d9af5830d3a619c..0000000000000000000000000000000000000000 --- a/src/lib/utils/getResolveRequestPrompts.ts +++ /dev/null @@ -1,67 +0,0 @@ -import { ClapEntity, ClapSegment, ClapSegmentCategory } from "@aitube/clap" -import { getVideoPrompt } from "@aitube/engine" - -import { SettingsState } from "@/controllers/settings" - -export function getResolveRequestPrompts({ - settings, - segment, - segments, - entities, -}: { - settings: SettingsState - segment: ClapSegment - segments: ClapSegment[] - entities: Record -}): { - positivePrompt: string - negativePrompt: string -} { - - const videoPrompt = getVideoPrompt( - segments, - entities - ) - - const positivePrompt = [ - segment.category === ClapSegmentCategory.VIDEO - ? settings.videoPromptPrefix - : segment.category === ClapSegmentCategory.STORYBOARD - ? settings.imagePromptPrefix - : "", - videoPrompt, - segment.category === ClapSegmentCategory.VIDEO - ? settings.videoPromptSuffix - : segment.category === ClapSegmentCategory.STORYBOARD - ? settings.imagePromptSuffix - : "" - ].map(x => x.trim()).filter(x => x).join(", ") - - const negativePrompt = [ - segment.category === ClapSegmentCategory.VIDEO - ? settings.videoNegativePrompt - : segment.category === ClapSegmentCategory.STORYBOARD - ? settings.imageNegativePrompt - : "" - ].map(x => x.trim()).filter(x => x).join(", ") - - /* - console.log(`getResolveRequestPrompts:`, { - segments: segments.map(s => ({ - ...s, - assetUrl: "" - })), - videoPrompt, - positivePrompt, - negativePrompt, - // entity: entities['6c1e99b5-02af-47fc-8e6a-0a5bcb9ee8b1'], - }) - throw new Error("uh uh") - */ - - - return { - positivePrompt, - negativePrompt - } -} \ No newline at end of file diff --git a/src/lib/utils/getTypeAndExtension.ts b/src/lib/utils/getTypeAndExtension.ts new file mode 100644 index 0000000000000000000000000000000000000000..f68c5608299dd978e930fb5dcbc4f33bbac0da9a --- /dev/null +++ b/src/lib/utils/getTypeAndExtension.ts @@ -0,0 +1,47 @@ +import { ClapOutputType } from "@aitube/clap" + +/** + * break a base64 string into sub-components + */ +export function getTypeAndExtension(base64: string = ""): { + + // category eg. video, audio, text + category: string + + // file format eg. video/mp4 text/html audio/wave + assetFileFormat: string + + // file extension eg. .mp4 .html .wav + extension: string + + outputType: ClapOutputType +} { + // Regular expression to extract the MIME type and the base64 data + const matches = base64.match(/^data:([A-Za-z-+0-9/]+);base64,(.+)$/) + + if (!matches || matches.length !== 3) { + throw new Error("Invalid base64 string") + } + + const assetFileFormat = matches[1] || "" + + // this should be enough for most media formats (jpeg, png, webp, mp4) + const [category, extension] = assetFileFormat.split("/") + + let outputType = ClapOutputType.TEXT + + if (category === "audio") { + outputType = ClapOutputType.AUDIO + } else if (category === "image") { + outputType = ClapOutputType.IMAGE + } else if (category === "video") { + outputType = ClapOutputType.VIDEO + } + + return { + category, + assetFileFormat, + extension, + outputType, + } +} \ No newline at end of file diff --git a/src/types.ts b/src/types.ts index f4a86d96e1fb8e403d8baa8ad04a644f88a0a66c..893d0ad60aed04ffd2fab0f5e3a59156f31f17ea 100644 --- a/src/types.ts +++ b/src/types.ts @@ -39,6 +39,9 @@ export enum ComputeProvider { LUMALABS = "LUMALABS", KUAISHOU = "KUAISHOU", RUNWAYML = "RUNWAYML", + HEDRA = "HEDRA", + LEONARDOAI = "LEONARDOAI", + EVERARTAI = "EVERARTAI", } export enum ComfyIcuAccelerator { @@ -50,6 +53,39 @@ export enum ComfyIcuAccelerator { H100 = "H100" } +export type ResolveRequestPrompts = { + image: { + // the positive prompt - elements we want in the scene + positive: string + + // the positive prompt - elements we don't want in the scene + negative: string + + + // the "identification picture" of the character, if available + identity: string + + // TODO: add LoRAs etc.. for location consistency + } + video: { + // input image to use for the video generation + image: string + + // input voice sample to use for the video generation + voice: string + } + voice: { + // the "identification voiceprint" of the character, if available + identity: string + + // the positive prompt - elements we want in the voice + positive: string + + // the positive prompt - elements we don't want in the voice + negative: string + } +} + export type ResolveRequest = { settings: SettingsState @@ -67,6 +103,8 @@ export type ResolveRequest = { mainCharacterEntity?: ClapEntity meta: ClapMeta + + prompts: ResolveRequestPrompts } export type AssistantRequest = { @@ -160,49 +198,3 @@ export interface ImageSegment { label: string; score: number; } - -export enum SegmentVisibility { - // the segment is visible, and the user explicitly requested to render it before the others - DEMANDED = "DEMANDED", - - // TODO: add some implicit intermediary priority options - // such as SELECTED, HOVERED.. - - // the segment (or at least a portion of it) is currently visible in the sliding window - VISIBLE = "VISIBLE", - - // the segment is hidden, but not too far from the sliding window - BUFFERED = "BUFFERED", - - // fully hidden, far from the sliding window - HIDDEN = "HIDDEN" -} - -// used for sort -export const SegmentVisibilityPriority: Record = { - // the segment is visible, and the user explicitly requested to render it before the others - [SegmentVisibility.DEMANDED]: 3, - - // TODO: add some implicit intermediary priority options - // such as SELECTED, HOVERED.. - - // the segment (or at least a portion of it) is currently visible in the sliding window - [SegmentVisibility.VISIBLE]: 2, - - // the segment is hidden, but not too far from the sliding window - [SegmentVisibility.BUFFERED]: 1, - - // fully hidden, far from the sliding window - [SegmentVisibility.HIDDEN]: 0 -} - -// some data can only exist inside a browser session (eg. AudioBuffer) -// or at least data that only make sense on client side -// we could put things like a mouse hover or selected state in here -export type BrowserOnlySegmentData = { - audioBuffer?: AudioBuffer - - visibility?: SegmentVisibility -} - -export type RuntimeSegment = ClapSegment & BrowserOnlySegmentData \ No newline at end of file