Spaces:
Running
on
CPU Upgrade
Running
on
CPU Upgrade
Commit
β’
7fd5ad1
1
Parent(s):
2068834
various improvements
Browse files- next.config.js +26 -1
- package-lock.json +54 -52
- package.json +2 -2
- src/app/api/{render β resolve}/providers/comfy-comfyicu/index.ts +2 -2
- src/app/api/{render β resolve}/providers/comfy-huggingface/index.ts +2 -2
- src/app/api/{render β resolve}/providers/comfy-replicate/index.ts +2 -2
- src/app/api/{render β resolve}/providers/comfy-replicate/runWorkflow.ts +0 -0
- src/app/api/{render β resolve}/providers/comfy/getComfyWorkflow.ts +2 -2
- src/app/api/{render β resolve}/providers/falai/index.ts +24 -17
- src/app/api/{render β resolve}/providers/falai/types.ts +0 -0
- src/app/api/{render β resolve}/providers/huggingface/index.ts +4 -4
- src/app/api/{render β resolve}/providers/modelslab/index.ts +2 -2
- src/app/api/{render β resolve}/providers/replicate/index.ts +4 -4
- src/app/api/{render β resolve}/route.ts +19 -19
- src/app/favicon.ico +0 -0
- src/app/favicon_square.ico +0 -0
- src/app/icon.png +0 -0
- src/app/icon_square.png +0 -0
- src/components/core/timeline/index.tsx +11 -59
- src/components/monitor/DynamicPlayer/VideoClipBuffer.tsx +61 -0
- src/components/monitor/DynamicPlayer/index.tsx +113 -0
- src/components/monitor/UniversalPlayer/index.tsx +12 -6
- src/components/monitor/icons/single-icon.tsx +1 -1
- src/controllers/audio/getDefaultAudioState.ts +3 -0
- src/controllers/audio/startAudioSourceNode.ts +4 -3
- src/controllers/audio/types.ts +14 -0
- src/controllers/audio/useAudio.ts +94 -0
- src/controllers/monitor/getDefaultMonitorState.ts +1 -0
- src/controllers/monitor/types.ts +3 -0
- src/controllers/monitor/useMonitor.ts +28 -7
- src/controllers/renderer/constants.ts +12 -0
- src/controllers/renderer/getDefaultBufferedSegments.ts +18 -0
- src/controllers/renderer/getDefaultRendererState.ts +11 -0
- src/controllers/renderer/getSegmentCacheKey.ts +13 -0
- src/controllers/renderer/index.ts +17 -0
- src/controllers/renderer/types.ts +46 -0
- src/controllers/renderer/useRenderLoop.ts +23 -0
- src/controllers/renderer/useRenderer.ts +118 -0
- src/controllers/resolver/constants.ts +3 -0
- src/controllers/resolver/getDefaultResolverState.ts +26 -0
- src/controllers/resolver/types.ts +42 -0
- src/controllers/resolver/useResolver.ts +366 -0
- src/lib/core/constants.ts +1 -1
- src/lib/utils/convertToJpeg.ts +20 -0
- src/lib/utils/decodeOutput.ts +16 -4
- src/lib/utils/{getRenderRequestPrompts.ts β getResolveRequestPrompts.ts} +4 -3
- src/types.ts +38 -1
next.config.js
CHANGED
@@ -5,7 +5,32 @@ const nextConfig = {
|
|
5 |
// a clap file can be quite large - but that's OK
|
6 |
bodySizeLimit: '32mb'
|
7 |
}
|
8 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
}
|
10 |
|
11 |
module.exports = nextConfig
|
|
|
5 |
// a clap file can be quite large - but that's OK
|
6 |
bodySizeLimit: '32mb'
|
7 |
}
|
8 |
+
},
|
9 |
+
async headers() {
|
10 |
+
return [
|
11 |
+
{
|
12 |
+
// matching all API routes
|
13 |
+
source: "/api/:path*",
|
14 |
+
headers: [
|
15 |
+
{ key: "Access-Control-Allow-Credentials", value: "true" },
|
16 |
+
{ key: "Access-Control-Allow-Origin", value: "*" }, // replace this your actual origin
|
17 |
+
{ key: "Access-Control-Allow-Methods", value: "GET,DELETE,PATCH,POST,PUT" },
|
18 |
+
{ key: "Access-Control-Allow-Headers", value: "X-CSRF-Token, X-Requested-With, Accept, Accept-Version, Content-Length, Content-MD5, Content-Type, Date, X-Api-Version" },
|
19 |
+
]
|
20 |
+
},
|
21 |
+
{
|
22 |
+
// matching ALL routes
|
23 |
+
source: "/:path*",
|
24 |
+
headers: [
|
25 |
+
// for security reasons, performance.now() not performant unless we disable some CORS stuff
|
26 |
+
// more context about why, please check the Security paragraph here:
|
27 |
+
// https://developer.mozilla.org/en-US/docs/Web/API/Performance/now#security_requirements
|
28 |
+
{ key: "Cross-Origin-Opener-Policy", value: "same-origin" },
|
29 |
+
{ key: "Cross-Origin-Embedder-Policy", value: "require-corp" }
|
30 |
+
]
|
31 |
+
}
|
32 |
+
]
|
33 |
+
}
|
34 |
}
|
35 |
|
36 |
module.exports = nextConfig
|
package-lock.json
CHANGED
@@ -1,16 +1,16 @@
|
|
1 |
{
|
2 |
-
"name": "@
|
3 |
-
"version": "0.0.
|
4 |
"lockfileVersion": 3,
|
5 |
"requires": true,
|
6 |
"packages": {
|
7 |
"": {
|
8 |
-
"name": "@
|
9 |
-
"version": "0.0.
|
10 |
"dependencies": {
|
11 |
"@aitube/clap": "0.0.25",
|
12 |
-
"@aitube/engine": "0.0.
|
13 |
-
"@aitube/timeline": "0.0.
|
14 |
"@fal-ai/serverless-client": "^0.10.3",
|
15 |
"@huggingface/hub": "^0.15.1",
|
16 |
"@huggingface/inference": "^2.7.0",
|
@@ -106,17 +106,17 @@
|
|
106 |
}
|
107 |
},
|
108 |
"node_modules/@aitube/engine": {
|
109 |
-
"version": "0.0.
|
110 |
-
"resolved": "https://registry.npmjs.org/@aitube/engine/-/engine-0.0.
|
111 |
-
"integrity": "sha512-
|
112 |
"peerDependencies": {
|
113 |
"@aitube/clap": "0.0.25"
|
114 |
}
|
115 |
},
|
116 |
"node_modules/@aitube/timeline": {
|
117 |
-
"version": "0.0.
|
118 |
-
"resolved": "https://registry.npmjs.org/@aitube/timeline/-/timeline-0.0.
|
119 |
-
"integrity": "sha512-
|
120 |
"dependencies": {
|
121 |
"date-fns": "^3.6.0",
|
122 |
"react-virtualized-auto-sizer": "^1.0.24"
|
@@ -764,14 +764,15 @@
|
|
764 |
}
|
765 |
},
|
766 |
"node_modules/@huggingface/tasks": {
|
767 |
-
"version": "0.10.
|
768 |
-
"resolved": "https://registry.npmjs.org/@huggingface/tasks/-/tasks-0.10.
|
769 |
-
"integrity": "sha512-
|
770 |
},
|
771 |
"node_modules/@humanwhocodes/config-array": {
|
772 |
"version": "0.11.14",
|
773 |
"resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz",
|
774 |
"integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==",
|
|
|
775 |
"dependencies": {
|
776 |
"@humanwhocodes/object-schema": "^2.0.2",
|
777 |
"debug": "^4.3.1",
|
@@ -796,7 +797,8 @@
|
|
796 |
"node_modules/@humanwhocodes/object-schema": {
|
797 |
"version": "2.0.3",
|
798 |
"resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz",
|
799 |
-
"integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA=="
|
|
|
800 |
},
|
801 |
"node_modules/@img/sharp-darwin-arm64": {
|
802 |
"version": "0.33.4",
|
@@ -3995,9 +3997,9 @@
|
|
3995 |
}
|
3996 |
},
|
3997 |
"node_modules/caniuse-lite": {
|
3998 |
-
"version": "1.0.
|
3999 |
-
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.
|
4000 |
-
"integrity": "sha512-
|
4001 |
"funding": [
|
4002 |
{
|
4003 |
"type": "opencollective",
|
@@ -6676,9 +6678,9 @@
|
|
6676 |
}
|
6677 |
},
|
6678 |
"node_modules/jiti": {
|
6679 |
-
"version": "1.21.
|
6680 |
-
"resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.
|
6681 |
-
"integrity": "sha512-
|
6682 |
"bin": {
|
6683 |
"jiti": "bin/jiti.js"
|
6684 |
}
|
@@ -6776,9 +6778,9 @@
|
|
6776 |
}
|
6777 |
},
|
6778 |
"node_modules/langsmith": {
|
6779 |
-
"version": "0.1.
|
6780 |
-
"resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.1.
|
6781 |
-
"integrity": "sha512-
|
6782 |
"dependencies": {
|
6783 |
"@types/uuid": "^9.0.1",
|
6784 |
"commander": "^10.0.1",
|
@@ -7475,9 +7477,9 @@
|
|
7475 |
}
|
7476 |
},
|
7477 |
"node_modules/openai": {
|
7478 |
-
"version": "4.
|
7479 |
-
"resolved": "https://registry.npmjs.org/openai/-/openai-4.
|
7480 |
-
"integrity": "sha512-
|
7481 |
"dependencies": {
|
7482 |
"@types/node": "^18.11.18",
|
7483 |
"@types/node-fetch": "^2.6.4",
|
@@ -7878,9 +7880,9 @@
|
|
7878 |
}
|
7879 |
},
|
7880 |
"node_modules/postcss-load-config/node_modules/lilconfig": {
|
7881 |
-
"version": "3.1.
|
7882 |
-
"resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.
|
7883 |
-
"integrity": "sha512-
|
7884 |
"engines": {
|
7885 |
"node": ">=14"
|
7886 |
},
|
@@ -8421,9 +8423,9 @@
|
|
8421 |
}
|
8422 |
},
|
8423 |
"node_modules/replicate": {
|
8424 |
-
"version": "0.30.
|
8425 |
-
"resolved": "https://registry.npmjs.org/replicate/-/replicate-0.30.
|
8426 |
-
"integrity": "sha512-
|
8427 |
"engines": {
|
8428 |
"git": ">=2.11.0",
|
8429 |
"node": ">=18.0.0",
|
@@ -9796,35 +9798,35 @@
|
|
9796 |
"integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg=="
|
9797 |
},
|
9798 |
"node_modules/web-audio-beat-detector": {
|
9799 |
-
"version": "8.2.
|
9800 |
-
"resolved": "https://registry.npmjs.org/web-audio-beat-detector/-/web-audio-beat-detector-8.2.
|
9801 |
-
"integrity": "sha512-
|
9802 |
"dependencies": {
|
9803 |
-
"@babel/runtime": "^7.24.
|
9804 |
-
"tslib": "^2.6.
|
9805 |
-
"web-audio-beat-detector-broker": "^4.1.
|
9806 |
-
"web-audio-beat-detector-worker": "^5.2.
|
9807 |
}
|
9808 |
},
|
9809 |
"node_modules/web-audio-beat-detector-broker": {
|
9810 |
-
"version": "4.1.
|
9811 |
-
"resolved": "https://registry.npmjs.org/web-audio-beat-detector-broker/-/web-audio-beat-detector-broker-4.1.
|
9812 |
-
"integrity": "sha512-
|
9813 |
"dependencies": {
|
9814 |
-
"@babel/runtime": "^7.24.
|
9815 |
"fast-unique-numbers": "^9.0.5",
|
9816 |
"standardized-audio-context": "^25.3.72",
|
9817 |
-
"tslib": "^2.6.
|
9818 |
-
"web-audio-beat-detector-worker": "^5.2.
|
9819 |
}
|
9820 |
},
|
9821 |
"node_modules/web-audio-beat-detector-worker": {
|
9822 |
-
"version": "5.2.
|
9823 |
-
"resolved": "https://registry.npmjs.org/web-audio-beat-detector-worker/-/web-audio-beat-detector-worker-5.2.
|
9824 |
-
"integrity": "sha512
|
9825 |
"dependencies": {
|
9826 |
-
"@babel/runtime": "^7.24.
|
9827 |
-
"tslib": "^2.6.
|
9828 |
}
|
9829 |
},
|
9830 |
"node_modules/web-streams-polyfill": {
|
|
|
1 |
{
|
2 |
+
"name": "@aitube/clapper",
|
3 |
+
"version": "0.0.0",
|
4 |
"lockfileVersion": 3,
|
5 |
"requires": true,
|
6 |
"packages": {
|
7 |
"": {
|
8 |
+
"name": "@aitube/clapper",
|
9 |
+
"version": "0.0.0",
|
10 |
"dependencies": {
|
11 |
"@aitube/clap": "0.0.25",
|
12 |
+
"@aitube/engine": "0.0.17",
|
13 |
+
"@aitube/timeline": "0.0.20",
|
14 |
"@fal-ai/serverless-client": "^0.10.3",
|
15 |
"@huggingface/hub": "^0.15.1",
|
16 |
"@huggingface/inference": "^2.7.0",
|
|
|
106 |
}
|
107 |
},
|
108 |
"node_modules/@aitube/engine": {
|
109 |
+
"version": "0.0.17",
|
110 |
+
"resolved": "https://registry.npmjs.org/@aitube/engine/-/engine-0.0.17.tgz",
|
111 |
+
"integrity": "sha512-1uC7diXvU1nzpOu5C6SdngerCe2Pn9xi5SEV4VQBbm8plYuiuWeDgzpwqCDcoa6lDz0o3ExQaovBL7nkGre0sg==",
|
112 |
"peerDependencies": {
|
113 |
"@aitube/clap": "0.0.25"
|
114 |
}
|
115 |
},
|
116 |
"node_modules/@aitube/timeline": {
|
117 |
+
"version": "0.0.20",
|
118 |
+
"resolved": "https://registry.npmjs.org/@aitube/timeline/-/timeline-0.0.20.tgz",
|
119 |
+
"integrity": "sha512-gzis4zAIOXdfcwh8azhWuv4NT+2E8g3/XE33mOxxZEzQIRLTJg/RnoKliFzl6gwPIrt6Z/Z1lW7/kK3HMGdTRQ==",
|
120 |
"dependencies": {
|
121 |
"date-fns": "^3.6.0",
|
122 |
"react-virtualized-auto-sizer": "^1.0.24"
|
|
|
764 |
}
|
765 |
},
|
766 |
"node_modules/@huggingface/tasks": {
|
767 |
+
"version": "0.10.14",
|
768 |
+
"resolved": "https://registry.npmjs.org/@huggingface/tasks/-/tasks-0.10.14.tgz",
|
769 |
+
"integrity": "sha512-8Q3aqTO+ldTTqtK4OfMz/h5DiiMBzUnZKdV0Dq2+JX+UXvqnTDVOk+bJd0QVytJYyNeZgKsj7XQHvEQGyo9cFg=="
|
770 |
},
|
771 |
"node_modules/@humanwhocodes/config-array": {
|
772 |
"version": "0.11.14",
|
773 |
"resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz",
|
774 |
"integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==",
|
775 |
+
"deprecated": "Use @eslint/config-array instead",
|
776 |
"dependencies": {
|
777 |
"@humanwhocodes/object-schema": "^2.0.2",
|
778 |
"debug": "^4.3.1",
|
|
|
797 |
"node_modules/@humanwhocodes/object-schema": {
|
798 |
"version": "2.0.3",
|
799 |
"resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz",
|
800 |
+
"integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==",
|
801 |
+
"deprecated": "Use @eslint/object-schema instead"
|
802 |
},
|
803 |
"node_modules/@img/sharp-darwin-arm64": {
|
804 |
"version": "0.33.4",
|
|
|
3997 |
}
|
3998 |
},
|
3999 |
"node_modules/caniuse-lite": {
|
4000 |
+
"version": "1.0.30001632",
|
4001 |
+
"resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001632.tgz",
|
4002 |
+
"integrity": "sha512-udx3o7yHJfUxMLkGohMlVHCvFvWmirKh9JAH/d7WOLPetlH+LTL5cocMZ0t7oZx/mdlOWXti97xLZWc8uURRHg==",
|
4003 |
"funding": [
|
4004 |
{
|
4005 |
"type": "opencollective",
|
|
|
6678 |
}
|
6679 |
},
|
6680 |
"node_modules/jiti": {
|
6681 |
+
"version": "1.21.6",
|
6682 |
+
"resolved": "https://registry.npmjs.org/jiti/-/jiti-1.21.6.tgz",
|
6683 |
+
"integrity": "sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w==",
|
6684 |
"bin": {
|
6685 |
"jiti": "bin/jiti.js"
|
6686 |
}
|
|
|
6778 |
}
|
6779 |
},
|
6780 |
"node_modules/langsmith": {
|
6781 |
+
"version": "0.1.31",
|
6782 |
+
"resolved": "https://registry.npmjs.org/langsmith/-/langsmith-0.1.31.tgz",
|
6783 |
+
"integrity": "sha512-G9zi+84RvUZ7UP/ZC0dx/9SYHk8Bhe9GywUeVBzEyt8M4QeU6FPWT7TEjDSqp/XuPJf5o59Z2QlmNJgMnpUd8Q==",
|
6784 |
"dependencies": {
|
6785 |
"@types/uuid": "^9.0.1",
|
6786 |
"commander": "^10.0.1",
|
|
|
7477 |
}
|
7478 |
},
|
7479 |
"node_modules/openai": {
|
7480 |
+
"version": "4.50.0",
|
7481 |
+
"resolved": "https://registry.npmjs.org/openai/-/openai-4.50.0.tgz",
|
7482 |
+
"integrity": "sha512-2ADkNIU6Q589oYHr5pn9k7SbUcrBTK9X0rIXrYqwMVSoqOj1yK9/1OO0ExaWsqOOpD7o58UmRjeKlx9gKAcuKQ==",
|
7483 |
"dependencies": {
|
7484 |
"@types/node": "^18.11.18",
|
7485 |
"@types/node-fetch": "^2.6.4",
|
|
|
7880 |
}
|
7881 |
},
|
7882 |
"node_modules/postcss-load-config/node_modules/lilconfig": {
|
7883 |
+
"version": "3.1.2",
|
7884 |
+
"resolved": "https://registry.npmjs.org/lilconfig/-/lilconfig-3.1.2.tgz",
|
7885 |
+
"integrity": "sha512-eop+wDAvpItUys0FWkHIKeC9ybYrTGbU41U5K7+bttZZeohvnY7M9dZ5kB21GNWiFT2q1OoPTvncPCgSOVO5ow==",
|
7886 |
"engines": {
|
7887 |
"node": ">=14"
|
7888 |
},
|
|
|
8423 |
}
|
8424 |
},
|
8425 |
"node_modules/replicate": {
|
8426 |
+
"version": "0.30.2",
|
8427 |
+
"resolved": "https://registry.npmjs.org/replicate/-/replicate-0.30.2.tgz",
|
8428 |
+
"integrity": "sha512-U3yDrlwAV/zhaCd5Tcb8IPXqwkzvfXbN+YABM6RGPugtmuo8W5uwxoEQAetWD+xjS5jWLzISBYR5gPNBZPLOQw==",
|
8429 |
"engines": {
|
8430 |
"git": ">=2.11.0",
|
8431 |
"node": ">=18.0.0",
|
|
|
9798 |
"integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg=="
|
9799 |
},
|
9800 |
"node_modules/web-audio-beat-detector": {
|
9801 |
+
"version": "8.2.11",
|
9802 |
+
"resolved": "https://registry.npmjs.org/web-audio-beat-detector/-/web-audio-beat-detector-8.2.11.tgz",
|
9803 |
+
"integrity": "sha512-FJ3xvB42aPAV0KekVCasghzFPeLdIky3qQYzAObksPquqybzJE558KZQm1OdETWL7S/Rc1mL4K+Je+gwZp2Vzw==",
|
9804 |
"dependencies": {
|
9805 |
+
"@babel/runtime": "^7.24.7",
|
9806 |
+
"tslib": "^2.6.3",
|
9807 |
+
"web-audio-beat-detector-broker": "^4.1.10",
|
9808 |
+
"web-audio-beat-detector-worker": "^5.2.53"
|
9809 |
}
|
9810 |
},
|
9811 |
"node_modules/web-audio-beat-detector-broker": {
|
9812 |
+
"version": "4.1.10",
|
9813 |
+
"resolved": "https://registry.npmjs.org/web-audio-beat-detector-broker/-/web-audio-beat-detector-broker-4.1.10.tgz",
|
9814 |
+
"integrity": "sha512-mSsZGXtWEp2cC+21GMZ8CPK583rwHtUHxCae84rIrh6i1DCXUuL5ETQZEaIEw2B+3I0WTnEF6mrnHrIjAYvfuw==",
|
9815 |
"dependencies": {
|
9816 |
+
"@babel/runtime": "^7.24.7",
|
9817 |
"fast-unique-numbers": "^9.0.5",
|
9818 |
"standardized-audio-context": "^25.3.72",
|
9819 |
+
"tslib": "^2.6.3",
|
9820 |
+
"web-audio-beat-detector-worker": "^5.2.53"
|
9821 |
}
|
9822 |
},
|
9823 |
"node_modules/web-audio-beat-detector-worker": {
|
9824 |
+
"version": "5.2.53",
|
9825 |
+
"resolved": "https://registry.npmjs.org/web-audio-beat-detector-worker/-/web-audio-beat-detector-worker-5.2.53.tgz",
|
9826 |
+
"integrity": "sha512-K6gOGN/mxglO5A/IqkjdnKSOs7u8ltb8paVXN9x6DUhSWHJoMta9HqBlFiHZgIKNqDzmYTRS2SiKE+uaukZWzw==",
|
9827 |
"dependencies": {
|
9828 |
+
"@babel/runtime": "^7.24.7",
|
9829 |
+
"tslib": "^2.6.3"
|
9830 |
}
|
9831 |
},
|
9832 |
"node_modules/web-streams-polyfill": {
|
package.json
CHANGED
@@ -11,8 +11,8 @@
|
|
11 |
},
|
12 |
"dependencies": {
|
13 |
"@aitube/clap": "0.0.25",
|
14 |
-
"@aitube/engine": "0.0.
|
15 |
-
"@aitube/timeline": "0.0.
|
16 |
"@fal-ai/serverless-client": "^0.10.3",
|
17 |
"@huggingface/hub": "^0.15.1",
|
18 |
"@huggingface/inference": "^2.7.0",
|
|
|
11 |
},
|
12 |
"dependencies": {
|
13 |
"@aitube/clap": "0.0.25",
|
14 |
+
"@aitube/engine": "0.0.17",
|
15 |
+
"@aitube/timeline": "0.0.20",
|
16 |
"@fal-ai/serverless-client": "^0.10.3",
|
17 |
"@huggingface/hub": "^0.15.1",
|
18 |
"@huggingface/inference": "^2.7.0",
|
src/app/api/{render β resolve}/providers/comfy-comfyicu/index.ts
RENAMED
@@ -1,10 +1,10 @@
|
|
1 |
import Replicate from 'replicate'
|
2 |
|
3 |
-
import {
|
4 |
import { ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
5 |
import { getVideoPrompt } from "@aitube/engine"
|
6 |
|
7 |
-
export async function
|
8 |
if (!request.settings.comfyIcuApiKey) {
|
9 |
throw new Error(`Missing API key for "Comfy.icu"`)
|
10 |
}
|
|
|
1 |
import Replicate from 'replicate'
|
2 |
|
3 |
+
import { ResolveRequest } from "@/types"
|
4 |
import { ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
5 |
import { getVideoPrompt } from "@aitube/engine"
|
6 |
|
7 |
+
export async function resolveSegment(request: ResolveRequest): Promise<ClapSegment> {
|
8 |
if (!request.settings.comfyIcuApiKey) {
|
9 |
throw new Error(`Missing API key for "Comfy.icu"`)
|
10 |
}
|
src/app/api/{render β resolve}/providers/comfy-huggingface/index.ts
RENAMED
@@ -1,8 +1,8 @@
|
|
1 |
-
import {
|
2 |
import { ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
3 |
import { getVideoPrompt } from "@aitube/engine"
|
4 |
|
5 |
-
export async function
|
6 |
if (!request.settings.huggingFaceApiKey) {
|
7 |
throw new Error(`Missing API key for "Hugging Face"`)
|
8 |
}
|
|
|
1 |
+
import { ResolveRequest } from "@/types"
|
2 |
import { ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
3 |
import { getVideoPrompt } from "@aitube/engine"
|
4 |
|
5 |
+
export async function resolveSegment(request: ResolveRequest): Promise<ClapSegment> {
|
6 |
if (!request.settings.huggingFaceApiKey) {
|
7 |
throw new Error(`Missing API key for "Hugging Face"`)
|
8 |
}
|
src/app/api/{render β resolve}/providers/comfy-replicate/index.ts
RENAMED
@@ -1,10 +1,10 @@
|
|
1 |
import { ClapSegment, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
2 |
|
3 |
-
import {
|
4 |
import { getComfyWorkflow } from "../comfy/getComfyWorkflow"
|
5 |
import { runWorkflow } from "./runWorkflow"
|
6 |
|
7 |
-
export async function
|
8 |
if (!request.settings.replicateApiKey) {
|
9 |
throw new Error(`Missing API key for "Replicate.com"`)
|
10 |
}
|
|
|
1 |
import { ClapSegment, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
2 |
|
3 |
+
import { ResolveRequest } from "@/types"
|
4 |
import { getComfyWorkflow } from "../comfy/getComfyWorkflow"
|
5 |
import { runWorkflow } from "./runWorkflow"
|
6 |
|
7 |
+
export async function resolveSegment(request: ResolveRequest): Promise<ClapSegment> {
|
8 |
if (!request.settings.replicateApiKey) {
|
9 |
throw new Error(`Missing API key for "Replicate.com"`)
|
10 |
}
|
src/app/api/{render β resolve}/providers/comfy-replicate/runWorkflow.ts
RENAMED
File without changes
|
src/app/api/{render β resolve}/providers/comfy/getComfyWorkflow.ts
RENAMED
@@ -1,10 +1,10 @@
|
|
1 |
import { ClapSegmentCategory } from "@aitube/clap"
|
2 |
import { getVideoPrompt } from "@aitube/engine"
|
3 |
|
4 |
-
import { ComfyNode,
|
5 |
|
6 |
// TODO move this to @aitube/engine or @aitube/engine-comfy
|
7 |
-
export function getComfyWorkflow(request:
|
8 |
|
9 |
let comfyWorkflow = "{}"
|
10 |
|
|
|
1 |
import { ClapSegmentCategory } from "@aitube/clap"
|
2 |
import { getVideoPrompt } from "@aitube/engine"
|
3 |
|
4 |
+
import { ComfyNode, ResolveRequest } from "@/types"
|
5 |
|
6 |
// TODO move this to @aitube/engine or @aitube/engine-comfy
|
7 |
+
export function getComfyWorkflow(request: ResolveRequest) {
|
8 |
|
9 |
let comfyWorkflow = "{}"
|
10 |
|
src/app/api/{render β resolve}/providers/falai/index.ts
RENAMED
@@ -1,13 +1,13 @@
|
|
1 |
import * as fal from '@fal-ai/serverless-client'
|
2 |
|
3 |
-
import { FalAiImageSize,
|
4 |
import { ClapMediaOrientation, ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
5 |
import { getVideoPrompt } from "@aitube/engine"
|
6 |
import { decodeOutput } from '@/lib/utils/decodeOutput'
|
7 |
import { FalAiAudioResponse, FalAiImageResponse, FalAiSpeechResponse, FalAiVideoResponse } from './types'
|
8 |
-
import {
|
9 |
|
10 |
-
export async function
|
11 |
if (!request.settings.falAiApiKey) {
|
12 |
throw new Error(`Missing API key for "Fal.ai"`)
|
13 |
}
|
@@ -20,8 +20,8 @@ export async function renderSegment(request: RenderRequest): Promise<ClapSegment
|
|
20 |
|
21 |
let content = ''
|
22 |
|
23 |
-
const prompts =
|
24 |
-
|
25 |
try {
|
26 |
|
27 |
// for doc see:
|
@@ -29,10 +29,11 @@ export async function renderSegment(request: RenderRequest): Promise<ClapSegment
|
|
29 |
|
30 |
if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
|
|
36 |
|
37 |
const imageSize =
|
38 |
request.meta.orientation === ClapMediaOrientation.SQUARE
|
@@ -43,10 +44,22 @@ export async function renderSegment(request: RenderRequest): Promise<ClapSegment
|
|
43 |
|
44 |
let result: FalAiImageResponse | undefined = undefined
|
45 |
|
46 |
-
if (request.settings.falAiModelForImage === "fal-ai/pulid"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
result = await fal.run(request.settings.falAiModelForImage, {
|
48 |
input: {
|
49 |
-
|
|
|
|
|
50 |
image_size: imageSize,
|
51 |
num_images: 1,
|
52 |
sync_mode: true,
|
@@ -55,12 +68,6 @@ export async function renderSegment(request: RenderRequest): Promise<ClapSegment
|
|
55 |
}) as FalAiImageResponse
|
56 |
|
57 |
} else {
|
58 |
-
//throw new Error(`you selected model ${request.settings.falAiModelForImage}, but no character was found, so skipping`)
|
59 |
-
console.log(`warning: user selected model ${request.settings.falAiModelForImage}, but no character was found. Falling back to fal-ai/fast-sdxl`)
|
60 |
-
|
61 |
-
// dirty fix to fallback to a non-face model
|
62 |
-
request.settings.falAiModelForImage = "fal-ai/fast-sdxl"
|
63 |
-
|
64 |
result = await fal.run(request.settings.falAiModelForImage, {
|
65 |
input: {
|
66 |
prompt: prompts.positivePrompt,
|
|
|
1 |
import * as fal from '@fal-ai/serverless-client'
|
2 |
|
3 |
+
import { FalAiImageSize, ResolveRequest } from "@/types"
|
4 |
import { ClapMediaOrientation, ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
5 |
import { getVideoPrompt } from "@aitube/engine"
|
6 |
import { decodeOutput } from '@/lib/utils/decodeOutput'
|
7 |
import { FalAiAudioResponse, FalAiImageResponse, FalAiSpeechResponse, FalAiVideoResponse } from './types'
|
8 |
+
import { getResolveRequestPrompts } from '@/lib/utils/getResolveRequestPrompts'
|
9 |
|
10 |
+
export async function resolveSegment(request: ResolveRequest): Promise<ClapSegment> {
|
11 |
if (!request.settings.falAiApiKey) {
|
12 |
throw new Error(`Missing API key for "Fal.ai"`)
|
13 |
}
|
|
|
20 |
|
21 |
let content = ''
|
22 |
|
23 |
+
const prompts = getResolveRequestPrompts(request)
|
24 |
+
|
25 |
try {
|
26 |
|
27 |
// for doc see:
|
|
|
29 |
|
30 |
if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
|
31 |
|
32 |
+
|
33 |
+
if (!prompts.positivePrompt) {
|
34 |
+
console.error(`resolveSegment: cannot resolve a storyboard with an empty prompt`)
|
35 |
+
return segment
|
36 |
+
}
|
37 |
|
38 |
const imageSize =
|
39 |
request.meta.orientation === ClapMediaOrientation.SQUARE
|
|
|
44 |
|
45 |
let result: FalAiImageResponse | undefined = undefined
|
46 |
|
47 |
+
if (request.settings.falAiModelForImage === "fal-ai/pulid") {
|
48 |
+
if (!request.mainCharacterEntity?.imageId) {
|
49 |
+
// throw new Error(`you selected model ${request.settings.falAiModelForImage}, but no character was found, so skipping`)
|
50 |
+
// console.log(`warning: user selected model ${request.settings.falAiModelForImage}, but no character was found. Falling back to fal-ai/fast-sdxl`)
|
51 |
+
|
52 |
+
// dirty fix to fallback to a non-face model
|
53 |
+
request.settings.falAiModelForImage = "fal-ai/fast-sdxl"
|
54 |
+
}
|
55 |
+
}
|
56 |
+
|
57 |
+
if (request.settings.falAiModelForImage === "fal-ai/pulid") {
|
58 |
result = await fal.run(request.settings.falAiModelForImage, {
|
59 |
input: {
|
60 |
+
reference_images: [{
|
61 |
+
image_url: request.mainCharacterEntity?.imageId
|
62 |
+
}],
|
63 |
image_size: imageSize,
|
64 |
num_images: 1,
|
65 |
sync_mode: true,
|
|
|
68 |
}) as FalAiImageResponse
|
69 |
|
70 |
} else {
|
|
|
|
|
|
|
|
|
|
|
|
|
71 |
result = await fal.run(request.settings.falAiModelForImage, {
|
72 |
input: {
|
73 |
prompt: prompts.positivePrompt,
|
src/app/api/{render β resolve}/providers/falai/types.ts
RENAMED
File without changes
|
src/app/api/{render β resolve}/providers/huggingface/index.ts
RENAMED
@@ -1,13 +1,13 @@
|
|
1 |
import { HfInference, HfInferenceEndpoint } from "@huggingface/inference"
|
2 |
|
3 |
-
import {
|
4 |
import { ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
5 |
import { getVideoPrompt } from "@aitube/engine"
|
6 |
import { blobToBase64DataUri } from "@/lib/utils/blobToBase64DataUri"
|
7 |
-
import {
|
8 |
import { decodeOutput } from "@/lib/utils/decodeOutput"
|
9 |
|
10 |
-
export async function
|
11 |
|
12 |
if (!request.settings.huggingFaceApiKey) {
|
13 |
throw new Error(`Missing API key for "Hugging Face"`)
|
@@ -22,7 +22,7 @@ export async function renderSegment(request: RenderRequest): Promise<ClapSegment
|
|
22 |
|
23 |
const segment: ClapSegment = { ...request.segment }
|
24 |
|
25 |
-
const prompts =
|
26 |
|
27 |
try {
|
28 |
const blob: Blob = await hf.textToImage({
|
|
|
1 |
import { HfInference, HfInferenceEndpoint } from "@huggingface/inference"
|
2 |
|
3 |
+
import { ResolveRequest } from "@/types"
|
4 |
import { ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
5 |
import { getVideoPrompt } from "@aitube/engine"
|
6 |
import { blobToBase64DataUri } from "@/lib/utils/blobToBase64DataUri"
|
7 |
+
import { getResolveRequestPrompts } from "@/lib/utils/getResolveRequestPrompts"
|
8 |
import { decodeOutput } from "@/lib/utils/decodeOutput"
|
9 |
|
10 |
+
export async function resolveSegment(request: ResolveRequest): Promise<ClapSegment> {
|
11 |
|
12 |
if (!request.settings.huggingFaceApiKey) {
|
13 |
throw new Error(`Missing API key for "Hugging Face"`)
|
|
|
22 |
|
23 |
const segment: ClapSegment = { ...request.segment }
|
24 |
|
25 |
+
const prompts = getResolveRequestPrompts(request)
|
26 |
|
27 |
try {
|
28 |
const blob: Blob = await hf.textToImage({
|
src/app/api/{render β resolve}/providers/modelslab/index.ts
RENAMED
@@ -1,8 +1,8 @@
|
|
1 |
-
import {
|
2 |
import { ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
3 |
import { getVideoPrompt } from "@aitube/engine"
|
4 |
|
5 |
-
export async function
|
6 |
if (!request.settings.modelsLabApiKey) {
|
7 |
throw new Error(`Missing API key for "ModelsLab.com"`)
|
8 |
}
|
|
|
1 |
+
import { ResolveRequest } from "@/types"
|
2 |
import { ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
3 |
import { getVideoPrompt } from "@aitube/engine"
|
4 |
|
5 |
+
export async function resolveSegment(request: ResolveRequest): Promise<ClapSegment> {
|
6 |
if (!request.settings.modelsLabApiKey) {
|
7 |
throw new Error(`Missing API key for "ModelsLab.com"`)
|
8 |
}
|
src/app/api/{render β resolve}/providers/replicate/index.ts
RENAMED
@@ -1,13 +1,13 @@
|
|
1 |
import Replicate from 'replicate'
|
2 |
|
3 |
-
import {
|
4 |
import { ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
5 |
import { getVideoPrompt } from "@aitube/engine"
|
6 |
import { fetchContentToBase64 } from '@/lib/utils/fetchContentToBase64'
|
7 |
-
import {
|
8 |
import { decodeOutput } from '@/lib/utils/decodeOutput'
|
9 |
|
10 |
-
export async function
|
11 |
if (!request.settings.replicateApiKey) {
|
12 |
throw new Error(`Missing API key for "Replicate.com"`)
|
13 |
}
|
@@ -19,7 +19,7 @@ export async function renderSegment(request: RenderRequest): Promise<ClapSegment
|
|
19 |
|
20 |
const segment: ClapSegment = { ...request.segment }
|
21 |
|
22 |
-
const prompts =
|
23 |
|
24 |
try {
|
25 |
|
|
|
1 |
import Replicate from 'replicate'
|
2 |
|
3 |
+
import { ResolveRequest } from "@/types"
|
4 |
import { ClapSegment, ClapSegmentCategory, ClapSegmentStatus, getClapAssetSourceType } from "@aitube/clap"
|
5 |
import { getVideoPrompt } from "@aitube/engine"
|
6 |
import { fetchContentToBase64 } from '@/lib/utils/fetchContentToBase64'
|
7 |
+
import { getResolveRequestPrompts } from '@/lib/utils/getResolveRequestPrompts'
|
8 |
import { decodeOutput } from '@/lib/utils/decodeOutput'
|
9 |
|
10 |
+
export async function resolveSegment(request: ResolveRequest): Promise<ClapSegment> {
|
11 |
if (!request.settings.replicateApiKey) {
|
12 |
throw new Error(`Missing API key for "Replicate.com"`)
|
13 |
}
|
|
|
19 |
|
20 |
const segment: ClapSegment = { ...request.segment }
|
21 |
|
22 |
+
const prompts = getResolveRequestPrompts(request)
|
23 |
|
24 |
try {
|
25 |
|
src/app/api/{render β resolve}/route.ts
RENAMED
@@ -1,22 +1,22 @@
|
|
1 |
import { NextResponse, NextRequest } from "next/server"
|
2 |
|
3 |
-
import {
|
4 |
-
import {
|
5 |
-
import {
|
6 |
-
import {
|
7 |
-
import {
|
8 |
-
import {
|
9 |
|
10 |
-
import { ComputeProvider,
|
11 |
import { ClapSegmentCategory } from "@aitube/clap"
|
12 |
|
13 |
export async function POST(req: NextRequest) {
|
14 |
// do we really need to secure it?
|
15 |
// I mean.. in the end, the user is using their own credentials,
|
16 |
// so they cannot siphon free OpenAI, HF, Replicate tokens
|
17 |
-
console.log(`TODO Julian: secure the endpoint`)
|
18 |
// await throwIfInvalidToken(req.headers.get("Authorization"))
|
19 |
-
const request = (await req.json()) as
|
20 |
|
21 |
const provider =
|
22 |
request.segment.category === ClapSegmentCategory.STORYBOARD
|
@@ -33,25 +33,25 @@ export async function POST(req: NextRequest) {
|
|
33 |
|
34 |
if (!provider) { throw new Error(`Segments of category ${request.segment.category} are not supported yet`)}
|
35 |
|
36 |
-
// console.log(`API
|
37 |
-
const
|
38 |
provider === ComputeProvider.HUGGINGFACE
|
39 |
-
?
|
40 |
: provider === ComputeProvider.COMFY_HUGGINGFACE
|
41 |
-
?
|
42 |
: provider === ComputeProvider.REPLICATE
|
43 |
-
?
|
44 |
: provider === ComputeProvider.COMFY_COMFYICU
|
45 |
-
?
|
46 |
: provider === ComputeProvider.FALAI
|
47 |
-
?
|
48 |
: provider === ComputeProvider.MODELSLAB
|
49 |
-
?
|
50 |
: null
|
51 |
|
52 |
-
if (!
|
53 |
|
54 |
-
const segment = await
|
55 |
|
56 |
return NextResponse.json(segment)
|
57 |
}
|
|
|
1 |
import { NextResponse, NextRequest } from "next/server"
|
2 |
|
3 |
+
import { resolveSegment as resolveSegmentUsingHuggingFace } from "./providers/huggingface"
|
4 |
+
import { resolveSegment as resolveSegmentUsingComfyReplicate } from "./providers/comfy-replicate"
|
5 |
+
import { resolveSegment as resolveSegmentUsingReplicate } from "./providers/replicate"
|
6 |
+
import { resolveSegment as resolveSegmentUsingComfyComfyIcu } from "./providers/comfy-comfyicu"
|
7 |
+
import { resolveSegment as resolveSegmentUsingFalAi } from "./providers/falai"
|
8 |
+
import { resolveSegment as resolveSegmentUsingModelsLab } from "./providers/modelslab"
|
9 |
|
10 |
+
import { ComputeProvider, ResolveRequest } from "@/types"
|
11 |
import { ClapSegmentCategory } from "@aitube/clap"
|
12 |
|
13 |
export async function POST(req: NextRequest) {
|
14 |
// do we really need to secure it?
|
15 |
// I mean.. in the end, the user is using their own credentials,
|
16 |
// so they cannot siphon free OpenAI, HF, Replicate tokens
|
17 |
+
// console.log(`TODO Julian: secure the endpoint`)
|
18 |
// await throwIfInvalidToken(req.headers.get("Authorization"))
|
19 |
+
const request = (await req.json()) as ResolveRequest
|
20 |
|
21 |
const provider =
|
22 |
request.segment.category === ClapSegmentCategory.STORYBOARD
|
|
|
33 |
|
34 |
if (!provider) { throw new Error(`Segments of category ${request.segment.category} are not supported yet`)}
|
35 |
|
36 |
+
// console.log(`API ResolveRequest = `, request.settings)
|
37 |
+
const resolveSegment =
|
38 |
provider === ComputeProvider.HUGGINGFACE
|
39 |
+
? resolveSegmentUsingHuggingFace
|
40 |
: provider === ComputeProvider.COMFY_HUGGINGFACE
|
41 |
+
? resolveSegmentUsingComfyReplicate
|
42 |
: provider === ComputeProvider.REPLICATE
|
43 |
+
? resolveSegmentUsingReplicate
|
44 |
: provider === ComputeProvider.COMFY_COMFYICU
|
45 |
+
? resolveSegmentUsingComfyComfyIcu
|
46 |
: provider === ComputeProvider.FALAI
|
47 |
+
? resolveSegmentUsingFalAi
|
48 |
: provider === ComputeProvider.MODELSLAB
|
49 |
+
? resolveSegmentUsingModelsLab
|
50 |
: null
|
51 |
|
52 |
+
if (!resolveSegment) { throw new Error(`Provider ${provider} is not supported yet`)}
|
53 |
|
54 |
+
const segment = await resolveSegment(request)
|
55 |
|
56 |
return NextResponse.json(segment)
|
57 |
}
|
src/app/favicon.ico
CHANGED
src/app/favicon_square.ico
ADDED
src/app/icon.png
CHANGED
src/app/icon_square.png
ADDED
src/components/core/timeline/index.tsx
CHANGED
@@ -1,69 +1,16 @@
|
|
1 |
-
import { useEffect
|
2 |
-
import {
|
3 |
-
import { ClapTimeline, useTimeline, SegmentRenderer } from "@aitube/timeline"
|
4 |
|
5 |
import { cn } from "@/lib/utils"
|
6 |
-
import { useSettings } from "@/controllers/settings"
|
7 |
-
import { RenderRequest } from "@/types"
|
8 |
import { useMonitor } from "@/controllers/monitor/useMonitor"
|
|
|
9 |
|
10 |
-
const segmentRenderer: SegmentRenderer = async ({
|
11 |
-
segment,
|
12 |
-
segments,
|
13 |
-
entities,
|
14 |
-
speakingCharactersIds,
|
15 |
-
generalCharactersIds,
|
16 |
-
mainCharacterId,
|
17 |
-
mainCharacterEntity,
|
18 |
-
meta,
|
19 |
-
}) => {
|
20 |
-
|
21 |
-
const settings = useSettings.getState().getSettings()
|
22 |
-
|
23 |
-
const request: RenderRequest = {
|
24 |
-
settings,
|
25 |
-
segment,
|
26 |
-
segments,
|
27 |
-
entities,
|
28 |
-
speakingCharactersIds,
|
29 |
-
generalCharactersIds,
|
30 |
-
mainCharacterId,
|
31 |
-
mainCharacterEntity,
|
32 |
-
meta,
|
33 |
-
}
|
34 |
-
|
35 |
-
const res = await fetch("/api/render", {
|
36 |
-
method: "POST",
|
37 |
-
headers: {
|
38 |
-
"Content-Type": "application/json",
|
39 |
-
},
|
40 |
-
body: JSON.stringify(request)
|
41 |
-
})
|
42 |
-
console.log(`res:`, res)
|
43 |
-
const newSegment = (await res.json()) as ClapSegment
|
44 |
-
console.log(`newSegment:`, newSegment)
|
45 |
-
return newSegment
|
46 |
-
}
|
47 |
|
48 |
export function Timeline() {
|
49 |
-
const [_isPending, startTransition] = useTransition()
|
50 |
-
|
51 |
const isReady = useTimeline(s => s.isReady)
|
52 |
|
53 |
-
const
|
54 |
-
const
|
55 |
-
useEffect(() => {
|
56 |
-
if (isReady) setImageRenderingStrategy(imageRenderingStrategy)
|
57 |
-
}, [isReady, setImageRenderingStrategy, imageRenderingStrategy])
|
58 |
-
|
59 |
-
const videoRenderingStrategy = useSettings(s => s.videoRenderingStrategy)
|
60 |
-
const setVideoRenderingStrategy = useTimeline(s => s.setVideoRenderingStrategy)
|
61 |
-
useEffect(() => {
|
62 |
-
if (isReady) setVideoRenderingStrategy(videoRenderingStrategy)
|
63 |
-
}, [isReady, setVideoRenderingStrategy, videoRenderingStrategy])
|
64 |
-
|
65 |
-
const getSettings = useSettings(s => s.getSettings)
|
66 |
-
const setSegmentRenderer = useTimeline(s => s.setSegmentRenderer)
|
67 |
|
68 |
const jumpAt = useMonitor(s => s.jumpAt)
|
69 |
const checkIfPlaying = useMonitor(s => s.checkIfPlaying)
|
@@ -73,14 +20,19 @@ export function Timeline() {
|
|
73 |
const setIsPlaying = useTimeline(s => s.setIsPlaying)
|
74 |
const setTogglePlayback = useTimeline(s => s.setTogglePlayback)
|
75 |
|
|
|
|
|
76 |
// this is important: we connect the monitor to the timeline
|
77 |
useEffect(() => {
|
78 |
-
|
|
|
79 |
setJumpAt(jumpAt)
|
80 |
setIsPlaying(checkIfPlaying)
|
81 |
setTogglePlayback(togglePlayback)
|
|
|
82 |
}, [isReady])
|
83 |
|
|
|
84 |
return (
|
85 |
<ClapTimeline
|
86 |
showFPS={false}
|
|
|
1 |
+
import { useEffect } from "react"
|
2 |
+
import { ClapTimeline, useTimeline, SegmentResolver } from "@aitube/timeline"
|
|
|
3 |
|
4 |
import { cn } from "@/lib/utils"
|
|
|
|
|
5 |
import { useMonitor } from "@/controllers/monitor/useMonitor"
|
6 |
+
import { useResolver } from "@/controllers/resolver/useResolver"
|
7 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
export function Timeline() {
|
|
|
|
|
10 |
const isReady = useTimeline(s => s.isReady)
|
11 |
|
12 |
+
const resolveSegment: SegmentResolver = useResolver(s => s.resolveSegment)
|
13 |
+
const setSegmentResolver = useTimeline(s => s.setSegmentResolver)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
15 |
const jumpAt = useMonitor(s => s.jumpAt)
|
16 |
const checkIfPlaying = useMonitor(s => s.checkIfPlaying)
|
|
|
20 |
const setIsPlaying = useTimeline(s => s.setIsPlaying)
|
21 |
const setTogglePlayback = useTimeline(s => s.setTogglePlayback)
|
22 |
|
23 |
+
const startLoop = useResolver(s => s.startLoop)
|
24 |
+
|
25 |
// this is important: we connect the monitor to the timeline
|
26 |
useEffect(() => {
|
27 |
+
if (!isReady) { return }
|
28 |
+
setSegmentResolver(resolveSegment)
|
29 |
setJumpAt(jumpAt)
|
30 |
setIsPlaying(checkIfPlaying)
|
31 |
setTogglePlayback(togglePlayback)
|
32 |
+
startLoop()
|
33 |
}, [isReady])
|
34 |
|
35 |
+
|
36 |
return (
|
37 |
<ClapTimeline
|
38 |
showFPS={false}
|
src/components/monitor/DynamicPlayer/VideoClipBuffer.tsx
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { useEffect, useRef } from "react"
|
2 |
+
|
3 |
+
import { cn } from "@/lib/utils"
|
4 |
+
|
5 |
+
export function VideoClipBuffer({
|
6 |
+
src,
|
7 |
+
className,
|
8 |
+
muted = true,
|
9 |
+
isPlaying = false,
|
10 |
+
}: {
|
11 |
+
src?: string;
|
12 |
+
className?: string;
|
13 |
+
muted?: boolean;
|
14 |
+
isPlaying?: boolean;
|
15 |
+
}) {
|
16 |
+
const ref = useRef<HTMLVideoElement>(null)
|
17 |
+
|
18 |
+
const togglePlayVideo = (play: boolean) => {
|
19 |
+
const player = ref.current
|
20 |
+
if (!player) { return }
|
21 |
+
// console.log(`togglePlayVideo(${play}) (current status = ${player.paused ? "paused" : "playing"})`)
|
22 |
+
if (play && player.paused) {
|
23 |
+
// console.log("playing video..")
|
24 |
+
player.play()
|
25 |
+
} else if (!play && !player.paused) {
|
26 |
+
// console.log("pausing video..")
|
27 |
+
player.pause()
|
28 |
+
}
|
29 |
+
}
|
30 |
+
|
31 |
+
useEffect(() => {
|
32 |
+
const player = ref.current
|
33 |
+
if (!player) { return }
|
34 |
+
togglePlayVideo(isPlaying)
|
35 |
+
}, [isPlaying])
|
36 |
+
|
37 |
+
if (!src) { return null }
|
38 |
+
|
39 |
+
return (
|
40 |
+
<video
|
41 |
+
ref={ref}
|
42 |
+
autoPlay={isPlaying}
|
43 |
+
controls={false}
|
44 |
+
playsInline
|
45 |
+
muted={muted}
|
46 |
+
loop
|
47 |
+
className={cn(
|
48 |
+
`absolute`,
|
49 |
+
`h-full w-full rounded-md overflow-hidden`,
|
50 |
+
|
51 |
+
// iseally we could only use the ease-out and duration-150
|
52 |
+
// to avoid a weird fade to grey,
|
53 |
+
// but the ease out also depends on which video is on top of each other,
|
54 |
+
// in term of z-index, so we should also intervert this
|
55 |
+
`transition-all duration-100 ease-out`,
|
56 |
+
className
|
57 |
+
)}
|
58 |
+
src={src}
|
59 |
+
/>
|
60 |
+
)
|
61 |
+
}
|
src/components/monitor/DynamicPlayer/index.tsx
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"use client"
|
2 |
+
|
3 |
+
import { useEffect, useRef, useState } from "react"
|
4 |
+
|
5 |
+
import { cn } from "@/lib/utils"
|
6 |
+
import { useMonitor } from "@/controllers/monitor/useMonitor"
|
7 |
+
import { useTimeline } from "@aitube/timeline"
|
8 |
+
|
9 |
+
import { useRequestAnimationFrame } from "@/lib/hooks"
|
10 |
+
import { MonitoringMode } from "@/controllers/monitor/types"
|
11 |
+
import { VideoClipBuffer } from "./VideoClipBuffer"
|
12 |
+
import { useRenderLoop } from "@/controllers/renderer/useRenderLoop"
|
13 |
+
import { useRenderer } from "@/controllers/renderer/useRenderer"
|
14 |
+
|
15 |
+
export const DynamicPlayer = ({
|
16 |
+
className,
|
17 |
+
}: {
|
18 |
+
className?: string
|
19 |
+
}) => {
|
20 |
+
const isPlaying = useMonitor(s => s.isPlaying)
|
21 |
+
const setMonitoringMode = useMonitor(s => s.setMonitoringMode)
|
22 |
+
|
23 |
+
const setCursorTimestampAtInMs = useTimeline(s => s.setCursorTimestampAtInMs)
|
24 |
+
|
25 |
+
// this should only be called once and at only one place in the project!
|
26 |
+
useRenderLoop()
|
27 |
+
|
28 |
+
const { activeVideoSegment, upcomingVideoSegment } = useRenderer(s => s.bufferedSegments)
|
29 |
+
|
30 |
+
const currentVideoUrl = activeVideoSegment?.assetUrl || ""
|
31 |
+
|
32 |
+
// the upcoming video we want to preload (note: we just want to preload it, not display it just yet)
|
33 |
+
const preloadVideoUrl = upcomingVideoSegment?.assetUrl || ""
|
34 |
+
|
35 |
+
const [buffer1Value, setBuffer1Value] = useState("")
|
36 |
+
const [buffer2Value, setBuffer2Value] = useState("")
|
37 |
+
const [activeBufferNumber, setActiveBufferNumber] = useState(1)
|
38 |
+
|
39 |
+
const timeoutRef = useRef<NodeJS.Timeout>()
|
40 |
+
|
41 |
+
const fadeDurationInMs = 300
|
42 |
+
|
43 |
+
useEffect(() => {
|
44 |
+
setMonitoringMode(MonitoringMode.DYNAMIC)
|
45 |
+
}, [])
|
46 |
+
|
47 |
+
useRequestAnimationFrame(() => {
|
48 |
+
const { isPlaying, lastTimelineUpdateAtInMs, setLastTimelineUpdateAtInMs } = useMonitor.getState()
|
49 |
+
if (!isPlaying) { return }
|
50 |
+
const { cursorTimestampAtInMs } = useTimeline.getState()
|
51 |
+
const newTimelineUpdateAtInMs = performance.now()
|
52 |
+
const elapsedTimeInMs = newTimelineUpdateAtInMs - lastTimelineUpdateAtInMs
|
53 |
+
// console.log(`TODO: move the timeline cursor according to the elapsed time`)
|
54 |
+
setCursorTimestampAtInMs(cursorTimestampAtInMs + elapsedTimeInMs)
|
55 |
+
setLastTimelineUpdateAtInMs(newTimelineUpdateAtInMs)
|
56 |
+
|
57 |
+
})
|
58 |
+
|
59 |
+
useEffect(() => {
|
60 |
+
// trivial case: we are at the initial state
|
61 |
+
if (!buffer1Value && !buffer2Value) {
|
62 |
+
setBuffer1Value(currentVideoUrl)
|
63 |
+
setBuffer2Value(preloadVideoUrl)
|
64 |
+
setActiveBufferNumber(1)
|
65 |
+
}
|
66 |
+
}, [buffer1Value, currentVideoUrl, preloadVideoUrl])
|
67 |
+
|
68 |
+
|
69 |
+
// console.log("cursorInSteps:", cursorInSteps)
|
70 |
+
useEffect(() => {
|
71 |
+
/*
|
72 |
+
console.log("ATTENTION: something changed among those: ", {
|
73 |
+
currentVideoUrl, preloadVideoUrl
|
74 |
+
})
|
75 |
+
*/
|
76 |
+
|
77 |
+
clearTimeout(timeoutRef.current)
|
78 |
+
|
79 |
+
const newActiveBufferNumber = activeBufferNumber === 1 ? 2 : 1
|
80 |
+
// console.log(`our pre-loaded video should already be available in buffer ${newActiveBufferNumber}`)
|
81 |
+
|
82 |
+
setActiveBufferNumber(newActiveBufferNumber)
|
83 |
+
|
84 |
+
timeoutRef.current = setTimeout(() => {
|
85 |
+
// by now one buffer should be visible, and the other should be hidden
|
86 |
+
// so let's update the invisible one
|
87 |
+
if (newActiveBufferNumber === 2) {
|
88 |
+
setBuffer1Value(preloadVideoUrl)
|
89 |
+
} else {
|
90 |
+
setBuffer2Value(preloadVideoUrl)
|
91 |
+
}
|
92 |
+
}, fadeDurationInMs + 200) // let's add some security in here
|
93 |
+
|
94 |
+
return () => {
|
95 |
+
clearTimeout(timeoutRef.current)
|
96 |
+
}
|
97 |
+
}, [currentVideoUrl, preloadVideoUrl])
|
98 |
+
|
99 |
+
return (
|
100 |
+
<div className={cn(`@container flex flex-col flex-grow w-full`, className)}>
|
101 |
+
<VideoClipBuffer
|
102 |
+
src={buffer1Value}
|
103 |
+
isPlaying={isPlaying}
|
104 |
+
className={cn(activeBufferNumber === 1 ? `opacity-100` : `opacity-0`)}
|
105 |
+
/>
|
106 |
+
<VideoClipBuffer
|
107 |
+
src={buffer2Value}
|
108 |
+
isPlaying={isPlaying}
|
109 |
+
className={cn(activeBufferNumber === 2 ? `opacity-100` : `opacity-0`)}
|
110 |
+
/>
|
111 |
+
</div>
|
112 |
+
)
|
113 |
+
}
|
src/components/monitor/UniversalPlayer/index.tsx
CHANGED
@@ -2,6 +2,7 @@ import { ClapSegment } from "@aitube/clap"
|
|
2 |
import { useTimeline } from "@aitube/timeline"
|
3 |
|
4 |
import { StaticPlayer } from "../../monitor/StaticPlayer"
|
|
|
5 |
|
6 |
// TODO: put this in a separate component eg @aitube-player or @aitube/monitor
|
7 |
export function UniversalPlayer() {
|
@@ -9,8 +10,12 @@ export function UniversalPlayer() {
|
|
9 |
|
10 |
const assetUrl: string = finalVideo?.assetUrl || ""
|
11 |
|
12 |
-
console.log('finalVideo:', finalVideo)
|
13 |
|
|
|
|
|
|
|
|
|
14 |
if (assetUrl) {
|
15 |
return (
|
16 |
<div className="
|
@@ -28,16 +33,17 @@ export function UniversalPlayer() {
|
|
28 |
</div>
|
29 |
)
|
30 |
}
|
|
|
31 |
|
32 |
-
console.log(`TODO: render the scene dynamically`)
|
33 |
|
34 |
return (
|
35 |
<div className="
|
36 |
-
flex flex-col
|
37 |
items-center justify-center
|
38 |
-
w-full h-
|
39 |
-
|
40 |
-
|
|
|
41 |
</div>
|
42 |
)
|
43 |
}
|
|
|
2 |
import { useTimeline } from "@aitube/timeline"
|
3 |
|
4 |
import { StaticPlayer } from "../../monitor/StaticPlayer"
|
5 |
+
import { DynamicPlayer } from "../DynamicPlayer"
|
6 |
|
7 |
// TODO: put this in a separate component eg @aitube-player or @aitube/monitor
|
8 |
export function UniversalPlayer() {
|
|
|
10 |
|
11 |
const assetUrl: string = finalVideo?.assetUrl || ""
|
12 |
|
13 |
+
// console.log('finalVideo:', finalVideo)
|
14 |
|
15 |
+
// note: I think we are going to only display the final video in specific cases,
|
16 |
+
// and use the dynamic player 99% the time
|
17 |
+
|
18 |
+
/*
|
19 |
if (assetUrl) {
|
20 |
return (
|
21 |
<div className="
|
|
|
33 |
</div>
|
34 |
)
|
35 |
}
|
36 |
+
*/
|
37 |
|
|
|
38 |
|
39 |
return (
|
40 |
<div className="
|
41 |
+
flex flex-col flex-grow
|
42 |
items-center justify-center
|
43 |
+
w-full h-[calc(100%-60px)]
|
44 |
+
">
|
45 |
+
<DynamicPlayer
|
46 |
+
/>
|
47 |
</div>
|
48 |
)
|
49 |
}
|
src/components/monitor/icons/single-icon.tsx
CHANGED
@@ -25,7 +25,7 @@ export function SingleIcon({
|
|
25 |
|
26 |
// icons is a bit too fat, let's thin them out
|
27 |
// for a bit of flair we increase the stroke width on group hover
|
28 |
-
thickOnHover ? `stroke-
|
29 |
className,
|
30 |
)}
|
31 |
/>
|
|
|
25 |
|
26 |
// icons is a bit too fat, let's thin them out
|
27 |
// for a bit of flair we increase the stroke width on group hover
|
28 |
+
thickOnHover ? `stroke-[0.5] group-hover:stroke-[1]` : ``,
|
29 |
className,
|
30 |
)}
|
31 |
/>
|
src/controllers/audio/getDefaultAudioState.ts
CHANGED
@@ -11,7 +11,10 @@ export function getDefaultAudioState(): AudioState {
|
|
11 |
}
|
12 |
|
13 |
const state: AudioState = {
|
|
|
14 |
isMuted: false,
|
|
|
|
|
15 |
audioContext,
|
16 |
currentlyPlaying: [],
|
17 |
}
|
|
|
11 |
}
|
12 |
|
13 |
const state: AudioState = {
|
14 |
+
isPlaying: false,
|
15 |
isMuted: false,
|
16 |
+
userDefinedGain: 1.0,
|
17 |
+
currentGain: 1.0,
|
18 |
audioContext,
|
19 |
currentlyPlaying: [],
|
20 |
}
|
src/controllers/audio/startAudioSourceNode.ts
CHANGED
@@ -14,7 +14,7 @@ import { CurrentlyPlayingAudioSource } from "./types"
|
|
14 |
export function startAudioSourceNode({
|
15 |
audioContext,
|
16 |
segment,
|
17 |
-
|
18 |
onEnded
|
19 |
}: {
|
20 |
/**
|
@@ -32,7 +32,7 @@ export function startAudioSourceNode({
|
|
32 |
*
|
33 |
* This is the position of the playback cursor in the project, in milliseconds (eg. 20000ms)
|
34 |
*/
|
35 |
-
|
36 |
|
37 |
/**
|
38 |
* Called whenever the audio source will finish playing
|
@@ -73,7 +73,7 @@ export function startAudioSourceNode({
|
|
73 |
gainNode.connect(audioContext.destination)
|
74 |
|
75 |
// make sure we play the segment at a specific time
|
76 |
-
const startTimeInMs =
|
77 |
|
78 |
// convert milliseconds to seconds by dividing by 1000
|
79 |
source.start(audioContext.currentTime, startTimeInMs >= 1000 ? (startTimeInMs / 1000) : 0)
|
@@ -82,6 +82,7 @@ export function startAudioSourceNode({
|
|
82 |
sourceId: UUID(),
|
83 |
segmentId: segment.id,
|
84 |
sourceNode: source,
|
|
|
85 |
gainNode: gainNode,
|
86 |
}
|
87 |
|
|
|
14 |
export function startAudioSourceNode({
|
15 |
audioContext,
|
16 |
segment,
|
17 |
+
cursorTimestampAtInMs,
|
18 |
onEnded
|
19 |
}: {
|
20 |
/**
|
|
|
32 |
*
|
33 |
* This is the position of the playback cursor in the project, in milliseconds (eg. 20000ms)
|
34 |
*/
|
35 |
+
cursorTimestampAtInMs: number
|
36 |
|
37 |
/**
|
38 |
* Called whenever the audio source will finish playing
|
|
|
73 |
gainNode.connect(audioContext.destination)
|
74 |
|
75 |
// make sure we play the segment at a specific time
|
76 |
+
const startTimeInMs = cursorTimestampAtInMs - segment.startTimeInMs
|
77 |
|
78 |
// convert milliseconds to seconds by dividing by 1000
|
79 |
source.start(audioContext.currentTime, startTimeInMs >= 1000 ? (startTimeInMs / 1000) : 0)
|
|
|
82 |
sourceId: UUID(),
|
83 |
segmentId: segment.id,
|
84 |
sourceNode: source,
|
85 |
+
originalGain: segment.outputGain,
|
86 |
gainNode: gainNode,
|
87 |
}
|
88 |
|
src/controllers/audio/types.ts
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
|
2 |
export type AudioAnalysis = {
|
3 |
audioBuffer: AudioBuffer
|
@@ -7,12 +8,22 @@ export type AudioAnalysis = {
|
|
7 |
}
|
8 |
|
9 |
export type AudioState = {
|
|
|
10 |
isMuted: boolean
|
|
|
|
|
11 |
audioContext: AudioContext // we keep a single audio context
|
12 |
currentlyPlaying: CurrentlyPlayingAudioSource[]
|
13 |
}
|
14 |
|
15 |
export type AudioControls = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
}
|
17 |
|
18 |
export type AudioStore = AudioState & AudioControls
|
@@ -39,6 +50,9 @@ export type CurrentlyPlayingAudioSource = {
|
|
39 |
*/
|
40 |
sourceNode: AudioScheduledSourceNode
|
41 |
|
|
|
|
|
|
|
42 |
/**
|
43 |
* The gain node, to control the volume
|
44 |
*
|
|
|
1 |
+
import { RuntimeSegment } from "@/types"
|
2 |
|
3 |
export type AudioAnalysis = {
|
4 |
audioBuffer: AudioBuffer
|
|
|
8 |
}
|
9 |
|
10 |
export type AudioState = {
|
11 |
+
isPlaying: boolean
|
12 |
isMuted: boolean
|
13 |
+
userDefinedGain: number
|
14 |
+
currentGain: number
|
15 |
audioContext: AudioContext // we keep a single audio context
|
16 |
currentlyPlaying: CurrentlyPlayingAudioSource[]
|
17 |
}
|
18 |
|
19 |
export type AudioControls = {
|
20 |
+
play: () => void
|
21 |
+
stop: () => void
|
22 |
+
setUserDefinedGain: (userDefinedGain: number) => void
|
23 |
+
setCurrentGain: (currentGain: number) => void
|
24 |
+
mute: () => void
|
25 |
+
unmute: () => void
|
26 |
+
syncAudioToCurrentCursorPosition: (activeAudioSegments: RuntimeSegment[]) => void
|
27 |
}
|
28 |
|
29 |
export type AudioStore = AudioState & AudioControls
|
|
|
50 |
*/
|
51 |
sourceNode: AudioScheduledSourceNode
|
52 |
|
53 |
+
// the original value that was set to the segment
|
54 |
+
originalGain: number
|
55 |
+
|
56 |
/**
|
57 |
* The gain node, to control the volume
|
58 |
*
|
src/controllers/audio/useAudio.ts
CHANGED
@@ -1,10 +1,104 @@
|
|
1 |
"use client"
|
2 |
|
3 |
import { create } from "zustand"
|
|
|
|
|
|
|
4 |
|
5 |
import { AudioStore } from "./types"
|
6 |
import { getDefaultAudioState } from "./getDefaultAudioState"
|
|
|
7 |
|
8 |
export const useAudio = create<AudioStore>((set, get) => ({
|
9 |
...getDefaultAudioState(),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
}))
|
|
|
1 |
"use client"
|
2 |
|
3 |
import { create } from "zustand"
|
4 |
+
import { TimelineStore, useTimeline } from "@aitube/timeline"
|
5 |
+
|
6 |
+
import { RuntimeSegment } from "@/types"
|
7 |
|
8 |
import { AudioStore } from "./types"
|
9 |
import { getDefaultAudioState } from "./getDefaultAudioState"
|
10 |
+
import { startAudioSourceNode } from "./startAudioSourceNode"
|
11 |
|
12 |
export const useAudio = create<AudioStore>((set, get) => ({
|
13 |
...getDefaultAudioState(),
|
14 |
+
|
15 |
+
play: () => {
|
16 |
+
// console.log("useAudio: play()")
|
17 |
+
const { isPlaying, currentlyPlaying } = get()
|
18 |
+
if (isPlaying) { return }
|
19 |
+
currentlyPlaying.forEach(p => { p.sourceNode.start() })
|
20 |
+
},
|
21 |
+
stop: () => {
|
22 |
+
// console.log("useAudio: stop()")
|
23 |
+
const { isPlaying, currentlyPlaying } = get()
|
24 |
+
if (isPlaying) { return }
|
25 |
+
currentlyPlaying.forEach(p => { p.sourceNode.stop() })
|
26 |
+
// no need to update currentlyPlaying, it will be automatic
|
27 |
+
// see function playAudioSegment(), below the "source.sourceNode.onended = () => {"
|
28 |
+
},
|
29 |
+
|
30 |
+
setUserDefinedGain: (userDefinedGain: number) => {
|
31 |
+
// console.log(`useAudio: setUserDefinedGain(${userDefinedGain})`)
|
32 |
+
const { setCurrentGain } = get()
|
33 |
+
set({ userDefinedGain })
|
34 |
+
setCurrentGain(userDefinedGain)
|
35 |
+
},
|
36 |
+
|
37 |
+
setCurrentGain: (currentGain: number) => {
|
38 |
+
// console.log(`useAudio: setCurrentGain(${currentGain})`)
|
39 |
+
const { currentlyPlaying } = get()
|
40 |
+
set({ currentGain, isMuted: currentGain === 0 })
|
41 |
+
currentlyPlaying.forEach(p => {
|
42 |
+
p.gainNode.gain.value = p.originalGain * currentGain
|
43 |
+
})
|
44 |
+
},
|
45 |
+
|
46 |
+
mute: () => {
|
47 |
+
// console.log("useAudio: mute()")
|
48 |
+
const { setCurrentGain } = get()
|
49 |
+
setCurrentGain(0)
|
50 |
+
},
|
51 |
+
|
52 |
+
unmute: () => {
|
53 |
+
// console.log("useAudio: unmute()")
|
54 |
+
const { setCurrentGain, userDefinedGain } = get()
|
55 |
+
setCurrentGain(userDefinedGain)
|
56 |
+
},
|
57 |
+
|
58 |
+
/**
|
59 |
+
* This makes sure we are playing what should be played
|
60 |
+
*
|
61 |
+
* @returns
|
62 |
+
*/
|
63 |
+
syncAudioToCurrentCursorPosition: (activeAudioSegments: RuntimeSegment[]) => {
|
64 |
+
// console.log("useAudio: syncAudioToCurrentCursorPosition()")
|
65 |
+
const { audioContext, currentlyPlaying } = get()
|
66 |
+
|
67 |
+
const timelineStore: TimelineStore = useTimeline.getState()
|
68 |
+
const { cursorTimestampAtInMs } = timelineStore
|
69 |
+
|
70 |
+
const segments: RuntimeSegment[] = activeAudioSegments.filter(s =>
|
71 |
+
!currentlyPlaying.some(p => p.segmentId === s.id)
|
72 |
+
)
|
73 |
+
|
74 |
+
if (!segments.length) {
|
75 |
+
return
|
76 |
+
}
|
77 |
+
// console.log("useAudio: found audio segments that should be playing")
|
78 |
+
|
79 |
+
const newlyStartedAudioSourceNodes = segments.map((segment: RuntimeSegment) =>
|
80 |
+
startAudioSourceNode({
|
81 |
+
audioContext,
|
82 |
+
segment,
|
83 |
+
cursorTimestampAtInMs,
|
84 |
+
onEnded: (sourceId) => {
|
85 |
+
// console.log("useAudio: removing the old source node from the list of playing")
|
86 |
+
// since this callback might be called 30 sec, 3 min, 60 min later,
|
87 |
+
//it is vital to import a fresh store state using useAudio.getState()
|
88 |
+
set({
|
89 |
+
// then we can finally we remove the source from the list, synchronously
|
90 |
+
currentlyPlaying: get().currentlyPlaying.filter(p => p.sourceId !== sourceId)
|
91 |
+
})
|
92 |
+
}
|
93 |
+
})
|
94 |
+
)
|
95 |
+
|
96 |
+
set({
|
97 |
+
currentlyPlaying: [
|
98 |
+
// let's not forget to keep the current samples!
|
99 |
+
...currentlyPlaying,
|
100 |
+
...newlyStartedAudioSourceNodes
|
101 |
+
]
|
102 |
+
})
|
103 |
+
},
|
104 |
}))
|
src/controllers/monitor/getDefaultMonitorState.ts
CHANGED
@@ -3,6 +3,7 @@ import { MonitoringMode, MonitorState } from "./types"
|
|
3 |
export function getDefaultMonitorState(): MonitorState {
|
4 |
const state: MonitorState = {
|
5 |
mode: MonitoringMode.NONE,
|
|
|
6 |
isPlaying: false,
|
7 |
staticVideoRef: undefined
|
8 |
}
|
|
|
3 |
export function getDefaultMonitorState(): MonitorState {
|
4 |
const state: MonitorState = {
|
5 |
mode: MonitoringMode.NONE,
|
6 |
+
lastTimelineUpdateAtInMs: 0,
|
7 |
isPlaying: false,
|
8 |
staticVideoRef: undefined
|
9 |
}
|
src/controllers/monitor/types.ts
CHANGED
@@ -6,6 +6,7 @@ export enum MonitoringMode {
|
|
6 |
|
7 |
export type MonitorState = {
|
8 |
mode: MonitoringMode
|
|
|
9 |
isPlaying: boolean
|
10 |
staticVideoRef?: HTMLVideoElement
|
11 |
}
|
@@ -34,5 +35,7 @@ export type MonitorControls = {
|
|
34 |
* @returns
|
35 |
*/
|
36 |
jumpAt: (timeInMs?: number) => void
|
|
|
|
|
37 |
}
|
38 |
export type MonitorStore = MonitorState & MonitorControls
|
|
|
6 |
|
7 |
export type MonitorState = {
|
8 |
mode: MonitoringMode
|
9 |
+
lastTimelineUpdateAtInMs: number
|
10 |
isPlaying: boolean
|
11 |
staticVideoRef?: HTMLVideoElement
|
12 |
}
|
|
|
35 |
* @returns
|
36 |
*/
|
37 |
jumpAt: (timeInMs?: number) => void
|
38 |
+
|
39 |
+
setLastTimelineUpdateAtInMs: (lastTimelineUpdateAtInMs: number) => void
|
40 |
}
|
41 |
export type MonitorStore = MonitorState & MonitorControls
|
src/controllers/monitor/useMonitor.ts
CHANGED
@@ -1,8 +1,11 @@
|
|
1 |
"use client"
|
2 |
|
3 |
import { create } from "zustand"
|
|
|
4 |
import { useTimeline } from "@aitube/timeline"
|
5 |
|
|
|
|
|
6 |
import { MonitoringMode, MonitorStore } from "./types"
|
7 |
import { getDefaultMonitorState } from "./getDefaultMonitorState"
|
8 |
|
@@ -34,8 +37,13 @@ export const useMonitor = create<MonitorStore>((set, get) => ({
|
|
34 |
isPlaying: boolean
|
35 |
} => {
|
36 |
const { isPlaying: wasPlaying, mode, staticVideoRef } = get()
|
|
|
37 |
|
38 |
if (mode === MonitoringMode.NONE) {
|
|
|
|
|
|
|
|
|
39 |
return {
|
40 |
wasPlaying: false,
|
41 |
isPlaying: false
|
@@ -43,21 +51,29 @@ export const useMonitor = create<MonitorStore>((set, get) => ({
|
|
43 |
}
|
44 |
|
45 |
const isPlaying = typeof forcePlaying === "boolean" ? forcePlaying : !wasPlaying
|
46 |
-
|
47 |
-
set({
|
48 |
-
isPlaying
|
49 |
-
})
|
50 |
|
51 |
if (mode === MonitoringMode.STATIC && staticVideoRef) {
|
52 |
if (isPlaying) {
|
53 |
-
console.log(`previous value = ` + staticVideoRef.currentTime)
|
54 |
staticVideoRef.play()
|
55 |
} else {
|
56 |
staticVideoRef.pause()
|
57 |
}
|
58 |
} else if (mode === MonitoringMode.DYNAMIC) {
|
59 |
-
console.log(`TODO Julian: implement dynamic mode`)
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
}
|
|
|
|
|
|
|
|
|
|
|
61 |
|
62 |
return {
|
63 |
wasPlaying,
|
@@ -79,8 +95,13 @@ export const useMonitor = create<MonitorStore>((set, get) => ({
|
|
79 |
// console.log("resetting static video current time")
|
80 |
staticVideoRef.currentTime = timeInMs / 1000
|
81 |
} else if (mode === MonitoringMode.DYNAMIC) {
|
82 |
-
console.log(`TODO Julian: implement
|
|
|
83 |
}
|
84 |
},
|
85 |
|
|
|
|
|
|
|
|
|
86 |
}))
|
|
|
1 |
"use client"
|
2 |
|
3 |
import { create } from "zustand"
|
4 |
+
import { ClapSegment } from "@aitube/clap"
|
5 |
import { useTimeline } from "@aitube/timeline"
|
6 |
|
7 |
+
|
8 |
+
import { useAudio } from "../audio/useAudio"
|
9 |
import { MonitoringMode, MonitorStore } from "./types"
|
10 |
import { getDefaultMonitorState } from "./getDefaultMonitorState"
|
11 |
|
|
|
37 |
isPlaying: boolean
|
38 |
} => {
|
39 |
const { isPlaying: wasPlaying, mode, staticVideoRef } = get()
|
40 |
+
const { play, stop } = useAudio.getState()
|
41 |
|
42 |
if (mode === MonitoringMode.NONE) {
|
43 |
+
set({
|
44 |
+
isPlaying: false,
|
45 |
+
lastTimelineUpdateAtInMs: performance.now()
|
46 |
+
})
|
47 |
return {
|
48 |
wasPlaying: false,
|
49 |
isPlaying: false
|
|
|
51 |
}
|
52 |
|
53 |
const isPlaying = typeof forcePlaying === "boolean" ? forcePlaying : !wasPlaying
|
54 |
+
|
|
|
|
|
|
|
55 |
|
56 |
if (mode === MonitoringMode.STATIC && staticVideoRef) {
|
57 |
if (isPlaying) {
|
58 |
+
// console.log(`previous value = ` + staticVideoRef.currentTime)
|
59 |
staticVideoRef.play()
|
60 |
} else {
|
61 |
staticVideoRef.pause()
|
62 |
}
|
63 |
} else if (mode === MonitoringMode.DYNAMIC) {
|
64 |
+
// console.log(`TODO Julian: implement dynamic mode`)
|
65 |
+
if (isPlaying) {
|
66 |
+
// restart audio
|
67 |
+
play()
|
68 |
+
} else {
|
69 |
+
stop()
|
70 |
+
}
|
71 |
}
|
72 |
+
|
73 |
+
set({
|
74 |
+
isPlaying,
|
75 |
+
lastTimelineUpdateAtInMs: performance.now()
|
76 |
+
})
|
77 |
|
78 |
return {
|
79 |
wasPlaying,
|
|
|
95 |
// console.log("resetting static video current time")
|
96 |
staticVideoRef.currentTime = timeInMs / 1000
|
97 |
} else if (mode === MonitoringMode.DYNAMIC) {
|
98 |
+
// console.log(`TODO Julian: implement jump`)
|
99 |
+
// for audio I think it will be automatic
|
100 |
}
|
101 |
},
|
102 |
|
103 |
+
setLastTimelineUpdateAtInMs: (lastTimelineUpdateAtInMs: number) => {
|
104 |
+
set({ lastTimelineUpdateAtInMs })
|
105 |
+
},
|
106 |
+
|
107 |
}))
|
src/controllers/renderer/constants.ts
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import { DEFAULT_COLUMNS_PER_SLICE, DEFAULT_DURATION_IN_MS_PER_STEP } from "@aitube/timeline"
|
3 |
+
|
4 |
+
// how much we look up in the future
|
5 |
+
export const blockSizeInMs =
|
6 |
+
DEFAULT_COLUMNS_PER_SLICE // this equals to 4 at the time of writing, but this might become dynamic
|
7 |
+
* DEFAULT_DURATION_IN_MS_PER_STEP // this equals to 500 at the time of writing
|
8 |
+
|
9 |
+
// max refresh rate (eg. 100 ms)
|
10 |
+
// if the user has a fast device you can try faster rates
|
11 |
+
export const ACTIVE_SEGMENTS_REFRESH_RATE_IN_MS = 100
|
12 |
+
|
src/controllers/renderer/getDefaultBufferedSegments.ts
ADDED
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { BufferedSegments } from "./types"
|
2 |
+
|
3 |
+
export function getDefaultBufferedSegments(): BufferedSegments {
|
4 |
+
const result: BufferedSegments = {
|
5 |
+
activeSegmentsCacheKey: "",
|
6 |
+
activeSegments: [],
|
7 |
+
activeVideoSegment: undefined,
|
8 |
+
activeStoryboardSegment: undefined,
|
9 |
+
activeAudioSegments: [],
|
10 |
+
|
11 |
+
upcomingSegmentsCacheKey: "",
|
12 |
+
upcomingSegments: [],
|
13 |
+
upcomingVideoSegment: undefined,
|
14 |
+
upcomingStoryboardSegment: undefined,
|
15 |
+
upcomingAudioSegments: [],
|
16 |
+
}
|
17 |
+
return result
|
18 |
+
}
|
src/controllers/renderer/getDefaultRendererState.ts
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { getDefaultBufferedSegments } from "./getDefaultBufferedSegments"
|
2 |
+
import { RendererState } from "./types"
|
3 |
+
|
4 |
+
export function getDefaultRendererState(): RendererState {
|
5 |
+
const state: RendererState = {
|
6 |
+
bufferedSegments: getDefaultBufferedSegments(),
|
7 |
+
|
8 |
+
// put more stuff here
|
9 |
+
}
|
10 |
+
return state
|
11 |
+
}
|
src/controllers/renderer/getSegmentCacheKey.ts
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { ClapSegment } from "@aitube/clap"
|
2 |
+
|
3 |
+
export function getSegmentCacheKey(segment: ClapSegment, prefix = "") {
|
4 |
+
|
5 |
+
// we have to be smart here because we can't take the full base64 assetUrl (it might be huge)
|
6 |
+
// so we only use a portion of it
|
7 |
+
|
8 |
+
return `${prefix}:${
|
9 |
+
segment.id
|
10 |
+
}_${
|
11 |
+
segment.assetUrl.slice(0, 1024)
|
12 |
+
}`
|
13 |
+
}
|
src/controllers/renderer/index.ts
ADDED
@@ -0,0 +1,17 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
export {
|
2 |
+
blockSizeInMs,
|
3 |
+
ACTIVE_SEGMENTS_REFRESH_RATE_IN_MS
|
4 |
+
} from "./constants"
|
5 |
+
export { getDefaultBufferedSegments } from "./getDefaultBufferedSegments"
|
6 |
+
export { getDefaultRendererState } from "./getDefaultRendererState"
|
7 |
+
export { getSegmentCacheKey } from "./getSegmentCacheKey"
|
8 |
+
export type {
|
9 |
+
ActiveSegments,
|
10 |
+
UpcomingSegments,
|
11 |
+
BufferedSegments,
|
12 |
+
RendererState,
|
13 |
+
RendererControls,
|
14 |
+
RendererStore
|
15 |
+
} from "./types"
|
16 |
+
export { useRenderer } from "./useRenderer"
|
17 |
+
export { useRenderLoop } from "./useRenderLoop"
|
src/controllers/renderer/types.ts
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { ClapSegment } from "@aitube/clap"
|
2 |
+
|
3 |
+
export type ActiveSegments = {
|
4 |
+
activeSegmentsCacheKey: string
|
5 |
+
activeSegments: ClapSegment[]
|
6 |
+
activeVideoSegment?: ClapSegment
|
7 |
+
activeStoryboardSegment?: ClapSegment
|
8 |
+
activeAudioSegments: ClapSegment[]
|
9 |
+
}
|
10 |
+
|
11 |
+
export type UpcomingSegments = {
|
12 |
+
upcomingSegmentsCacheKey: string
|
13 |
+
upcomingSegments: ClapSegment[]
|
14 |
+
upcomingVideoSegment?: ClapSegment
|
15 |
+
upcomingStoryboardSegment?: ClapSegment
|
16 |
+
upcomingAudioSegments: ClapSegment[]
|
17 |
+
}
|
18 |
+
|
19 |
+
export type BufferedSegments = ActiveSegments & UpcomingSegments
|
20 |
+
|
21 |
+
export type RendererState = {
|
22 |
+
bufferedSegments: BufferedSegments
|
23 |
+
}
|
24 |
+
|
25 |
+
export type RendererControls = {
|
26 |
+
// this will be called at 60 FPS - and yes, it is expensive
|
27 |
+
// we could probably improve things by using a temporal tree index
|
28 |
+
|
29 |
+
/**
|
30 |
+
* Cycle through the segments to see which ones are crossing the current cursor,
|
31 |
+
* then this updates the internal buffer of segments
|
32 |
+
* (this has side effects as it modifies the internal state)
|
33 |
+
*
|
34 |
+
* @returns
|
35 |
+
*/
|
36 |
+
renderLoop: () => BufferedSegments
|
37 |
+
|
38 |
+
/**
|
39 |
+
* Cycle through the segments to see which ones are crossing the current cursor,
|
40 |
+
* and return it (this doesn't change any state, but it reads the state from various stores)
|
41 |
+
* @returns
|
42 |
+
*/
|
43 |
+
computeBufferedSegments: () => BufferedSegments
|
44 |
+
}
|
45 |
+
|
46 |
+
export type RendererStore = RendererState & RendererControls
|
src/controllers/renderer/useRenderLoop.ts
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { useRequestAnimationFrame } from "@/lib/hooks"
|
2 |
+
import { useRenderer } from "./useRenderer"
|
3 |
+
import { useAudio } from "@/controllers/audio/useAudio"
|
4 |
+
import { useMonitor } from "../monitor/useMonitor"
|
5 |
+
|
6 |
+
/**
|
7 |
+
* Runs a rendering loop
|
8 |
+
*
|
9 |
+
* Should only be called once!!
|
10 |
+
* @returns
|
11 |
+
*/
|
12 |
+
export function useRenderLoop(): void {
|
13 |
+
useRequestAnimationFrame(() => {
|
14 |
+
if (!useMonitor.getState().isPlaying) { return }
|
15 |
+
// this update the internal state of the renderer to make it hold
|
16 |
+
// all the currently visible or hearable items
|
17 |
+
const { activeAudioSegments } = useRenderer.getState().renderLoop()
|
18 |
+
|
19 |
+
// now all we need to do is to update the audio
|
20 |
+
// (well we also need to update the visuals, but it is done in <DynamicPlayer />
|
21 |
+
useAudio.getState().syncAudioToCurrentCursorPosition(activeAudioSegments)
|
22 |
+
})
|
23 |
+
}
|
src/controllers/renderer/useRenderer.ts
ADDED
@@ -0,0 +1,118 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"use client"
|
2 |
+
|
3 |
+
import { create } from "zustand"
|
4 |
+
import { ClapOutputType, ClapSegmentCategory } from "@aitube/clap"
|
5 |
+
import { TimelineStore, useTimeline } from "@aitube/timeline"
|
6 |
+
|
7 |
+
import { RuntimeSegment } from "@/types"
|
8 |
+
|
9 |
+
import { BufferedSegments, RendererStore } from "./types"
|
10 |
+
import { getDefaultRendererState } from "./getDefaultRendererState"
|
11 |
+
import { getSegmentCacheKey } from "./getSegmentCacheKey"
|
12 |
+
import { blockSizeInMs } from "./constants"
|
13 |
+
import { getDefaultBufferedSegments } from "./getDefaultBufferedSegments"
|
14 |
+
|
15 |
+
export const useRenderer = create<RendererStore>((set, get) => ({
|
16 |
+
...getDefaultRendererState(),
|
17 |
+
|
18 |
+
|
19 |
+
// this will be called at 60 FPS - and yes, it is expensive
|
20 |
+
// we could probably improve things by using a temporal tree index
|
21 |
+
renderLoop: (): BufferedSegments => {
|
22 |
+
|
23 |
+
const { computeBufferedSegments, bufferedSegments } = get()
|
24 |
+
|
25 |
+
|
26 |
+
// note: although useRequestAnimationFrame is called at 60 FPS,
|
27 |
+
// computeBufferedSegments has a throttle since it is expensive
|
28 |
+
const maybeNewBufferedSegments = computeBufferedSegments()
|
29 |
+
|
30 |
+
const activeSegmentsChanged = maybeNewBufferedSegments.activeSegmentsCacheKey !== bufferedSegments.activeSegmentsCacheKey
|
31 |
+
const upcomingSegmentsChanged = maybeNewBufferedSegments.upcomingSegmentsCacheKey !== bufferedSegments.upcomingSegmentsCacheKey
|
32 |
+
|
33 |
+
if (activeSegmentsChanged || upcomingSegmentsChanged) {
|
34 |
+
|
35 |
+
set({ bufferedSegments: maybeNewBufferedSegments })
|
36 |
+
|
37 |
+
return maybeNewBufferedSegments
|
38 |
+
}
|
39 |
+
|
40 |
+
return bufferedSegments
|
41 |
+
},
|
42 |
+
|
43 |
+
computeBufferedSegments: (): BufferedSegments => {
|
44 |
+
const timelineState: TimelineStore = useTimeline.getState()
|
45 |
+
const { cursorTimestampAtInMs, segments: clapSegments } = timelineState
|
46 |
+
const segments = clapSegments as RuntimeSegment[]
|
47 |
+
|
48 |
+
const results: BufferedSegments = getDefaultBufferedSegments()
|
49 |
+
// console.log("useRenderer: computeBufferedSegments() called")
|
50 |
+
|
51 |
+
|
52 |
+
// we could use a temporal index to keep things efficient here
|
53 |
+
// thiere is this relatively recent algorithm, the IB+ Tree,
|
54 |
+
// which seems to be good for what we want to do
|
55 |
+
// https://www.npmjs.com/package/i2bplustree
|
56 |
+
// another solution could be to filter the segments into multiple arrays by category,
|
57 |
+
// although we will have to see when those segments are re-computed / synced
|
58 |
+
for (const segment of segments) {
|
59 |
+
const inActiveShot = segment.startTimeInMs <= cursorTimestampAtInMs && cursorTimestampAtInMs < segment.endTimeInMs
|
60 |
+
|
61 |
+
if (inActiveShot) {
|
62 |
+
const isActiveVideo = segment.category === ClapSegmentCategory.VIDEO && segment.assetUrl
|
63 |
+
// const isActiveStoryboard = segment.category === ClapSegmentCategory.STORYBOARD && segment.assetUrl
|
64 |
+
if (isActiveVideo) {
|
65 |
+
results.activeSegments.push(segment)
|
66 |
+
results.activeVideoSegment = segment
|
67 |
+
results.activeSegmentsCacheKey = getSegmentCacheKey(segment, results.activeSegmentsCacheKey)
|
68 |
+
}
|
69 |
+
|
70 |
+
const isActiveAudio =
|
71 |
+
// IF this is an audio segment
|
72 |
+
segment.outputType === ClapOutputType.AUDIO &&
|
73 |
+
|
74 |
+
// AND there is an actual audio buffer attached to it
|
75 |
+
segment.audioBuffer
|
76 |
+
|
77 |
+
if (isActiveAudio) {
|
78 |
+
results.activeSegments.push(segment)
|
79 |
+
results.activeAudioSegments.push(segment)
|
80 |
+
results.activeSegmentsCacheKey = getSegmentCacheKey(segment, results.activeSegmentsCacheKey)
|
81 |
+
}
|
82 |
+
}
|
83 |
+
|
84 |
+
const inUpcomingShot =
|
85 |
+
(segment.startTimeInMs <= (cursorTimestampAtInMs + blockSizeInMs))
|
86 |
+
&&
|
87 |
+
((cursorTimestampAtInMs + blockSizeInMs) < segment.endTimeInMs)
|
88 |
+
|
89 |
+
if (inUpcomingShot) {
|
90 |
+
const isUpcomingVideo = segment.category === ClapSegmentCategory.VIDEO && segment.assetUrl
|
91 |
+
// const isUpcomingStoryboard = segment.category === ClapSegmentCategory.STORYBOARD && segment.assetUrl
|
92 |
+
if (isUpcomingVideo) {
|
93 |
+
results.upcomingSegments.push(segment)
|
94 |
+
results.upcomingVideoSegment = segment
|
95 |
+
results.upcomingSegmentsCacheKey = getSegmentCacheKey(segment, results.upcomingSegmentsCacheKey)
|
96 |
+
}
|
97 |
+
|
98 |
+
const isUpcomingAudio =
|
99 |
+
// IF this is an audio segment
|
100 |
+
segment.outputType === ClapOutputType.AUDIO &&
|
101 |
+
|
102 |
+
// AND there is an actual audio buffer attached to it
|
103 |
+
segment.audioBuffer
|
104 |
+
|
105 |
+
if (isUpcomingAudio) {
|
106 |
+
results.upcomingSegments.push(segment)
|
107 |
+
results.upcomingAudioSegments.push(segment)
|
108 |
+
results.upcomingSegmentsCacheKey = getSegmentCacheKey(segment, results.upcomingSegmentsCacheKey)
|
109 |
+
}
|
110 |
+
}
|
111 |
+
|
112 |
+
}
|
113 |
+
|
114 |
+
// console.log("useRenderer: computeBufferedSegments() returning:", results)
|
115 |
+
|
116 |
+
return results
|
117 |
+
},
|
118 |
+
}))
|
src/controllers/resolver/constants.ts
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
// TODO: we should take into account the rate limit of our provider
|
2 |
+
// but I think this can be the responsibility of the parent app
|
3 |
+
export const DEFAULT_WAIT_TIME_IF_NOTHING_TO_DO_IN_MS = 500
|
src/controllers/resolver/getDefaultResolverState.ts
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { ResolverState } from "./types"
|
2 |
+
|
3 |
+
export function getDefaultResolverState(): ResolverState {
|
4 |
+
const state: ResolverState = {
|
5 |
+
isRunning: false,
|
6 |
+
|
7 |
+
defaultParallelismQuotas: {
|
8 |
+
video: 1,
|
9 |
+
image: 1,
|
10 |
+
voice: 1,
|
11 |
+
sound: 1,
|
12 |
+
music: 1,
|
13 |
+
},
|
14 |
+
|
15 |
+
currentParallelismQuotas: {
|
16 |
+
video: 1,
|
17 |
+
image: 1,
|
18 |
+
voice: 1,
|
19 |
+
sound: 1,
|
20 |
+
music: 1,
|
21 |
+
},
|
22 |
+
|
23 |
+
nbRequestsRunningInParallel: 0
|
24 |
+
}
|
25 |
+
return state
|
26 |
+
}
|
src/controllers/resolver/types.ts
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import { ClapSegment } from "@aitube/clap"
|
2 |
+
|
3 |
+
export type ResolverState = {
|
4 |
+
isRunning: boolean
|
5 |
+
|
6 |
+
defaultParallelismQuotas: {
|
7 |
+
video: number
|
8 |
+
image: number
|
9 |
+
voice: number
|
10 |
+
sound: number
|
11 |
+
music: number
|
12 |
+
}
|
13 |
+
|
14 |
+
// used for UI display, show some metrics
|
15 |
+
currentParallelismQuotas: {
|
16 |
+
video: number
|
17 |
+
image: number
|
18 |
+
voice: number
|
19 |
+
sound: number
|
20 |
+
music: number
|
21 |
+
}
|
22 |
+
|
23 |
+
// used for UI display, show some metrics
|
24 |
+
nbRequestsRunningInParallel: number
|
25 |
+
}
|
26 |
+
|
27 |
+
export type ResolverControls = {
|
28 |
+
startLoop: () => void
|
29 |
+
runLoop: () => Promise<void>
|
30 |
+
|
31 |
+
/**
|
32 |
+
* This resolve a segment
|
33 |
+
*
|
34 |
+
* Note: while we return a clap segment, the original will be replaced, too
|
35 |
+
*
|
36 |
+
* @param segment
|
37 |
+
* @returns
|
38 |
+
*/
|
39 |
+
resolveSegment: (segment: ClapSegment) => Promise<ClapSegment>
|
40 |
+
}
|
41 |
+
|
42 |
+
export type ResolverStore = ResolverState & ResolverControls
|
src/controllers/resolver/useResolver.ts
ADDED
@@ -0,0 +1,366 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"use client"
|
2 |
+
|
3 |
+
import { create } from "zustand"
|
4 |
+
import { ClapEntity, ClapSegment, ClapSegmentCategory, ClapSegmentFilteringMode, ClapSegmentStatus, filterSegments } from "@aitube/clap"
|
5 |
+
import { RenderingStrategy, TimelineStore, useTimeline } from "@aitube/timeline"
|
6 |
+
|
7 |
+
import { ResolveRequest, RuntimeSegment, SegmentVisibility, SegmentVisibilityPriority } from "@/types"
|
8 |
+
|
9 |
+
import { getDefaultResolverState } from "./getDefaultResolverState"
|
10 |
+
import { useSettings } from "../settings"
|
11 |
+
import { DEFAULT_WAIT_TIME_IF_NOTHING_TO_DO_IN_MS } from "./constants"
|
12 |
+
import { ResolverStore } from "./types"
|
13 |
+
|
14 |
+
|
15 |
+
export const useResolver = create<ResolverStore>((set, get) => ({
|
16 |
+
...getDefaultResolverState(),
|
17 |
+
|
18 |
+
startLoop: () => {
|
19 |
+
const {
|
20 |
+
isRunning,
|
21 |
+
runLoop
|
22 |
+
} = get()
|
23 |
+
|
24 |
+
console.log(`useResolver.startLoop() isRunning: ${isRunning}`)
|
25 |
+
|
26 |
+
if (isRunning) { return }
|
27 |
+
|
28 |
+
set({ isRunning: true })
|
29 |
+
|
30 |
+
setTimeout(() => {
|
31 |
+
runLoop()
|
32 |
+
}, 0)
|
33 |
+
},
|
34 |
+
|
35 |
+
/**
|
36 |
+
* A loop which reconstruct a queue at each cycle
|
37 |
+
*
|
38 |
+
* this has to be dynamic since the user might be moving around
|
39 |
+
* inside the timeline
|
40 |
+
* @returns
|
41 |
+
*/
|
42 |
+
runLoop: async (): Promise<void> => {
|
43 |
+
|
44 |
+
const {
|
45 |
+
imageRenderingStrategy,
|
46 |
+
videoRenderingStrategy,
|
47 |
+
soundRenderingStrategy,
|
48 |
+
voiceRenderingStrategy,
|
49 |
+
musicRenderingStrategy,
|
50 |
+
} = useSettings.getState()
|
51 |
+
|
52 |
+
const runLoopAgain = (waitTimeIfNothingToDoInMs = DEFAULT_WAIT_TIME_IF_NOTHING_TO_DO_IN_MS) => {
|
53 |
+
setTimeout(() => {
|
54 |
+
get().runLoop()
|
55 |
+
}, waitTimeIfNothingToDoInMs)
|
56 |
+
}
|
57 |
+
|
58 |
+
// ------- trivial case: maybe we have nothing to do? ------
|
59 |
+
|
60 |
+
const allStrategiesAreOnDemand =
|
61 |
+
imageRenderingStrategy === RenderingStrategy.ON_DEMAND &&
|
62 |
+
videoRenderingStrategy === RenderingStrategy.ON_DEMAND &&
|
63 |
+
soundRenderingStrategy === RenderingStrategy.ON_DEMAND &&
|
64 |
+
voiceRenderingStrategy === RenderingStrategy.ON_DEMAND &&
|
65 |
+
musicRenderingStrategy
|
66 |
+
|
67 |
+
// nothing to do
|
68 |
+
if (allStrategiesAreOnDemand) {
|
69 |
+
// console.log(`useResolver.runLoop(): all strategies are on-demand only`)
|
70 |
+
return runLoopAgain()
|
71 |
+
}
|
72 |
+
|
73 |
+
// ---------- end of the very trivial case ----------------
|
74 |
+
|
75 |
+
|
76 |
+
|
77 |
+
// console.log(`useResolver.runLoop()`)
|
78 |
+
const timelineState: TimelineStore = useTimeline.getState()
|
79 |
+
const { visibleSegments, loadedSegments, segments: allSegments, resolveSegment } = timelineState
|
80 |
+
|
81 |
+
// ------------------------------------------------------------------------------------------------
|
82 |
+
//
|
83 |
+
// - we modify the original object in-line to add the visibility setting
|
84 |
+
// - there is a priority order: the info that a segment is "visible" (on screen),
|
85 |
+
// is more important, which is why it is done after processing the "loaded" segments (the ones that are buffered, because near the sliding window)
|
86 |
+
const tmp: Record<string, RuntimeSegment> = {}
|
87 |
+
for (const s of loadedSegments) { (s as RuntimeSegment).visibility = SegmentVisibility.BUFFERED }
|
88 |
+
for (const s of visibleSegments) { (s as RuntimeSegment).visibility = SegmentVisibility.VISIBLE }
|
89 |
+
|
90 |
+
// sort segments by visibility:
|
91 |
+
// segments visible on screen are show first,
|
92 |
+
// then those nearby, then the hidden ones
|
93 |
+
const segments: RuntimeSegment[] = ([...allSegments] as RuntimeSegment[]).sort((segment1, segment2) => {
|
94 |
+
const priority1 = SegmentVisibilityPriority[segment1.visibility || SegmentVisibility.HIDDEN] || 0
|
95 |
+
const priority2 = SegmentVisibilityPriority[segment2.visibility || SegmentVisibility.HIDDEN] || 0
|
96 |
+
|
97 |
+
return priority2 - priority1
|
98 |
+
})
|
99 |
+
//
|
100 |
+
// -------------------------------------------------------------------------
|
101 |
+
|
102 |
+
const { defaultParallelismQuotas } = get()
|
103 |
+
|
104 |
+
// note that we to create a copy here, that way we can modify it
|
105 |
+
const parallelismQuotas = {
|
106 |
+
...defaultParallelismQuotas,
|
107 |
+
}
|
108 |
+
|
109 |
+
// console.log(`useResolver.runLoop() parallelismQuotas = `, parallelismQuotas)
|
110 |
+
|
111 |
+
// we do not need ot get currentParallelismQuotas,
|
112 |
+
// as we are going to re-compute it
|
113 |
+
// (currentParallelismQuotas is only used in the UI
|
114 |
+
// to display of the parallel request counter)
|
115 |
+
|
116 |
+
const segmentsToRender: ClapSegment[] = []
|
117 |
+
|
118 |
+
// the following loop isn't the prettiest, but I think it presents
|
119 |
+
// the dynamic generation logic in a clear way, so let's keep it for now
|
120 |
+
for (const s of segments) {
|
121 |
+
|
122 |
+
if (s.category === ClapSegmentCategory.VIDEO) {
|
123 |
+
|
124 |
+
if (s.status !== ClapSegmentStatus.TO_GENERATE) {
|
125 |
+
|
126 |
+
// this is important: we found an in-progress task!
|
127 |
+
// it is thus vital to deduct it from the parallelism quota,
|
128 |
+
// to avoir triggering quoote limit on the providers side
|
129 |
+
if (s.status === ClapSegmentStatus.IN_PROGRESS) {
|
130 |
+
parallelismQuotas.video = Math.max(0, parallelismQuotas.video - 1)
|
131 |
+
}
|
132 |
+
|
133 |
+
continue
|
134 |
+
}
|
135 |
+
|
136 |
+
if (videoRenderingStrategy === RenderingStrategy.ON_DEMAND) {
|
137 |
+
continue
|
138 |
+
}
|
139 |
+
|
140 |
+
if (parallelismQuotas.video > 0) {
|
141 |
+
parallelismQuotas.video = Math.max(0, parallelismQuotas.video - 1)
|
142 |
+
segmentsToRender.push(s)
|
143 |
+
}
|
144 |
+
} else if (s.category === ClapSegmentCategory.STORYBOARD) {
|
145 |
+
|
146 |
+
// console.log(`useResolver.runLoop(): found a storyboard segment`)
|
147 |
+
|
148 |
+
if (s.status !== ClapSegmentStatus.TO_GENERATE) {
|
149 |
+
// console.log(`useResolver.runLoop(): found a storyboard segment that is not to_generate`)
|
150 |
+
|
151 |
+
// this is important: we found an in-progress task!
|
152 |
+
// it is thus vital to deduct it from the parallelism quota,
|
153 |
+
// to avoir triggering quoote limit on the providers side
|
154 |
+
if (s.status === ClapSegmentStatus.IN_PROGRESS) {
|
155 |
+
parallelismQuotas.image = Math.max(0, parallelismQuotas.image - 1)
|
156 |
+
}
|
157 |
+
|
158 |
+
continue
|
159 |
+
}
|
160 |
+
// console.log(`useResolver.runLoop(): found a storyboard segment that has to be generated`)
|
161 |
+
|
162 |
+
if (imageRenderingStrategy === RenderingStrategy.ON_DEMAND) {
|
163 |
+
continue
|
164 |
+
}
|
165 |
+
|
166 |
+
// console.log(`useResolver.runLoop(): strategy is good to go`)
|
167 |
+
|
168 |
+
if (parallelismQuotas.image > 0) {
|
169 |
+
// console.log(`useResolver.runLoop(): quota is good to go`)
|
170 |
+
parallelismQuotas.image = Math.max(0, parallelismQuotas.image - 1)
|
171 |
+
segmentsToRender.push(s)
|
172 |
+
}
|
173 |
+
} else if (s.category === ClapSegmentCategory.DIALOGUE) {
|
174 |
+
|
175 |
+
if (s.status !== ClapSegmentStatus.TO_GENERATE) {
|
176 |
+
|
177 |
+
// this is important: we found an in-progress task!
|
178 |
+
// it is thus vital to deduct it from the parallelism quota,
|
179 |
+
// to avoir triggering quoote limit on the providers side
|
180 |
+
if (s.status === ClapSegmentStatus.IN_PROGRESS) {
|
181 |
+
parallelismQuotas.voice = Math.max(0, parallelismQuotas.voice - 1)
|
182 |
+
}
|
183 |
+
|
184 |
+
continue
|
185 |
+
}
|
186 |
+
|
187 |
+
if (voiceRenderingStrategy === RenderingStrategy.ON_DEMAND) {
|
188 |
+
continue
|
189 |
+
}
|
190 |
+
if (parallelismQuotas.voice > 0) {
|
191 |
+
parallelismQuotas.voice = Math.max(0, parallelismQuotas.voice - 1)
|
192 |
+
segmentsToRender.push(s)
|
193 |
+
}
|
194 |
+
} else if (s.category === ClapSegmentCategory.SOUND) {
|
195 |
+
|
196 |
+
if (s.status !== ClapSegmentStatus.TO_GENERATE) {
|
197 |
+
|
198 |
+
// this is important: we found an in-progress task!
|
199 |
+
// it is thus vital to deduct it from the parallelism quota,
|
200 |
+
// to avoir triggering quoote limit on the providers side
|
201 |
+
if (s.status === ClapSegmentStatus.IN_PROGRESS) {
|
202 |
+
parallelismQuotas.sound = Math.max(0, parallelismQuotas.sound - 1)
|
203 |
+
}
|
204 |
+
|
205 |
+
continue
|
206 |
+
}
|
207 |
+
if (soundRenderingStrategy === RenderingStrategy.ON_DEMAND) {
|
208 |
+
continue
|
209 |
+
}
|
210 |
+
if (parallelismQuotas.sound > 0) {
|
211 |
+
parallelismQuotas.sound = Math.max(0, parallelismQuotas.sound - 1)
|
212 |
+
segmentsToRender.push(s)
|
213 |
+
}
|
214 |
+
} else if (s.category === ClapSegmentCategory.MUSIC) {
|
215 |
+
|
216 |
+
if (s.status !== ClapSegmentStatus.TO_GENERATE) {
|
217 |
+
|
218 |
+
// this is important: we found an in-progress task!
|
219 |
+
// it is thus vital to deduct it from the parallelism quota,
|
220 |
+
// to avoir triggering quoote limit on the providers side
|
221 |
+
if (s.status === ClapSegmentStatus.IN_PROGRESS) {
|
222 |
+
parallelismQuotas.music = Math.max(0, parallelismQuotas.music - 1)
|
223 |
+
}
|
224 |
+
|
225 |
+
continue
|
226 |
+
}
|
227 |
+
|
228 |
+
if (musicRenderingStrategy === RenderingStrategy.ON_DEMAND) {
|
229 |
+
continue
|
230 |
+
}
|
231 |
+
if (parallelismQuotas.music > 0) {
|
232 |
+
parallelismQuotas.music = Math.max(0, parallelismQuotas.music - 1)
|
233 |
+
segmentsToRender.push(s)
|
234 |
+
}
|
235 |
+
} // else continue
|
236 |
+
}
|
237 |
+
|
238 |
+
if (!segmentsToRender.length) {
|
239 |
+
// nothing to do - this will be the most common case
|
240 |
+
return runLoopAgain()
|
241 |
+
}
|
242 |
+
|
243 |
+
// console.log(`useResolver.runLoop(): firing and forgetting ${segmentsToRender.length} new resolveSegment promises`)
|
244 |
+
// we fire and forget
|
245 |
+
segmentsToRender.forEach(segment => resolveSegment(segment))
|
246 |
+
|
247 |
+
// we don't want to do something like this:
|
248 |
+
// await Promise.allSettled(segmentsRenderingPromises)
|
249 |
+
// because that would limit us in terms of parallelism.
|
250 |
+
//
|
251 |
+
// the idea here is that we don't want to wait for all segments
|
252 |
+
// to finish before starting new ones.
|
253 |
+
|
254 |
+
return runLoopAgain()
|
255 |
+
},
|
256 |
+
|
257 |
+
/**
|
258 |
+
* This resolve a segment
|
259 |
+
*
|
260 |
+
* Note: while we return a clap segment, the original will be replaced, too
|
261 |
+
*
|
262 |
+
* @param segment
|
263 |
+
* @returns
|
264 |
+
*/
|
265 |
+
resolveSegment: async (segment: ClapSegment): Promise<ClapSegment> => {
|
266 |
+
|
267 |
+
const settings = useSettings.getState().getSettings()
|
268 |
+
|
269 |
+
const timelineState: TimelineStore = useTimeline.getState()
|
270 |
+
|
271 |
+
// note: do NOT use the visibleSegments here
|
272 |
+
// that's because resolveSegment is 100% asynchronous,
|
273 |
+
// meaning it might be called on invisible segments too!
|
274 |
+
const { clap, segments: allSegments } = timelineState
|
275 |
+
|
276 |
+
if (!clap?.meta || !allSegments.length) {
|
277 |
+
return segment
|
278 |
+
// throw new Error(`please call setSegmentRender(...) first`)
|
279 |
+
}
|
280 |
+
|
281 |
+
const shotSegments = filterSegments(
|
282 |
+
ClapSegmentFilteringMode.ANY,
|
283 |
+
segment,
|
284 |
+
allSegments
|
285 |
+
)
|
286 |
+
|
287 |
+
if (segment.status === ClapSegmentStatus.IN_PROGRESS) {
|
288 |
+
// console.log(`useResolver.resolveSegment(): warning: this segment is already being generated!`)
|
289 |
+
return segment
|
290 |
+
}
|
291 |
+
|
292 |
+
segment.status = ClapSegmentStatus.IN_PROGRESS
|
293 |
+
|
294 |
+
try {
|
295 |
+
const entities = clap.entityIndex || {}
|
296 |
+
|
297 |
+
const speakingCharactersIds = shotSegments.map(s =>
|
298 |
+
s.category === ClapSegmentCategory.DIALOGUE ? s.entityId : null
|
299 |
+
).filter(id => id) as string[]
|
300 |
+
|
301 |
+
const generalCharactersIds = shotSegments.map(s =>
|
302 |
+
s.category === ClapSegmentCategory.CHARACTER ? s.entityId : null
|
303 |
+
).filter(id => id) as string[]
|
304 |
+
|
305 |
+
const mainCharacterId: string | undefined = speakingCharactersIds.at(0) || generalCharactersIds.at(0) || undefined
|
306 |
+
|
307 |
+
const mainCharacterEntity: ClapEntity | undefined = mainCharacterId ? (entities[mainCharacterId] || undefined) : undefined
|
308 |
+
|
309 |
+
const request: ResolveRequest = {
|
310 |
+
settings,
|
311 |
+
segment,
|
312 |
+
segments: shotSegments,
|
313 |
+
entities,
|
314 |
+
speakingCharactersIds,
|
315 |
+
generalCharactersIds,
|
316 |
+
mainCharacterId,
|
317 |
+
mainCharacterEntity,
|
318 |
+
meta: clap.meta,
|
319 |
+
}
|
320 |
+
|
321 |
+
const res = await fetch("/api/resolve", {
|
322 |
+
method: "POST",
|
323 |
+
headers: {
|
324 |
+
"Content-Type": "application/json",
|
325 |
+
},
|
326 |
+
body: JSON.stringify(request)
|
327 |
+
})
|
328 |
+
// console.log(`useResolver.resolveSegment(): result from /api.render:`, res)
|
329 |
+
|
330 |
+
const newSegmentData = (await res.json()) as ClapSegment
|
331 |
+
// console.log(`useResolver.resolveSegment(): newSegmentData`, newSegmentData)
|
332 |
+
|
333 |
+
const {
|
334 |
+
id,
|
335 |
+
assetUrl,
|
336 |
+
assetDurationInMs,
|
337 |
+
assetFileFormat,
|
338 |
+
assetSourceType,
|
339 |
+
status
|
340 |
+
} = newSegmentData
|
341 |
+
|
342 |
+
// note: this modifies the old object in-place
|
343 |
+
const newSegment = Object.assign(segment, {
|
344 |
+
id,
|
345 |
+
assetUrl,
|
346 |
+
assetDurationInMs,
|
347 |
+
assetFileFormat,
|
348 |
+
assetSourceType,
|
349 |
+
status
|
350 |
+
})
|
351 |
+
|
352 |
+
|
353 |
+
return newSegment
|
354 |
+
} catch (err) {
|
355 |
+
console.error(`useResolver.resolveSegment(): error: ${err}`)
|
356 |
+
|
357 |
+
// we could do that in a future version to improve error tracking
|
358 |
+
// segment.status = ClapSegmentStatus.ERROR
|
359 |
+
} finally {
|
360 |
+
segment.status = ClapSegmentStatus.COMPLETED
|
361 |
+
}
|
362 |
+
|
363 |
+
return segment
|
364 |
+
}
|
365 |
+
|
366 |
+
}))
|
src/lib/core/constants.ts
CHANGED
@@ -4,7 +4,7 @@
|
|
4 |
export const HARD_LIMIT_NB_MAX_ASSETS_TO_GENERATE_IN_PARALLEL = 32
|
5 |
|
6 |
export const APP_NAME = "Clapper AI"
|
7 |
-
export const APP_REVISION = "r2024-06-
|
8 |
|
9 |
export const APP_DOMAIN = "Clapper.app"
|
10 |
export const APP_LINK = "https://clapper.app"
|
|
|
4 |
export const HARD_LIMIT_NB_MAX_ASSETS_TO_GENERATE_IN_PARALLEL = 32
|
5 |
|
6 |
export const APP_NAME = "Clapper AI"
|
7 |
+
export const APP_REVISION = "r2024-06-11"
|
8 |
|
9 |
export const APP_DOMAIN = "Clapper.app"
|
10 |
export const APP_LINK = "https://clapper.app"
|
src/lib/utils/convertToJpeg.ts
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sharp from 'sharp'
|
2 |
+
|
3 |
+
export function convertToJpeg(imageAsBase64DataUri: string): Promise<string> {
|
4 |
+
const matches = imageAsBase64DataUri.match(/^data:image\/([A-Za-z-+\/]+);base64,(.+)$/)
|
5 |
+
if (!matches || matches.length !== 3) {
|
6 |
+
throw new Error('Invalid input string format')
|
7 |
+
}
|
8 |
+
|
9 |
+
const imageData = Buffer.from(matches[2], 'base64')
|
10 |
+
|
11 |
+
return sharp(imageData)
|
12 |
+
.jpeg({
|
13 |
+
quality: 97
|
14 |
+
})
|
15 |
+
.toBuffer()
|
16 |
+
.then(newImageData => {
|
17 |
+
const base64Image = Buffer.from(newImageData).toString('base64')
|
18 |
+
return `data:image/jpeg;base64,${base64Image}`
|
19 |
+
})
|
20 |
+
}
|
src/lib/utils/decodeOutput.ts
CHANGED
@@ -1,11 +1,23 @@
|
|
1 |
import { fetchContentToBase64 } from "./fetchContentToBase64"
|
|
|
2 |
|
3 |
export async function decodeOutput(input: any): Promise<string> {
|
4 |
const urlOrBase64 = `${input || ''}`
|
5 |
|
6 |
if (!urlOrBase64) { return '' }
|
7 |
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import { fetchContentToBase64 } from "./fetchContentToBase64"
|
2 |
+
import { convertToJpeg } from "./convertToJpeg"
|
3 |
|
4 |
export async function decodeOutput(input: any): Promise<string> {
|
5 |
const urlOrBase64 = `${input || ''}`
|
6 |
|
7 |
if (!urlOrBase64) { return '' }
|
8 |
|
9 |
+
const base64Url =
|
10 |
+
urlOrBase64.startsWith("data:")
|
11 |
+
? urlOrBase64
|
12 |
+
: (await fetchContentToBase64(urlOrBase64))
|
13 |
+
|
14 |
+
// this step is important since some providers store data as PNG,
|
15 |
+
// which is a unreasonable since a few frames quickly add up to 10 Mb,
|
16 |
+
// we can't afford to have a 20 Gb .clap file
|
17 |
+
//
|
18 |
+
// if you really want to have a pro, Hollywood-grade storyboard storage,
|
19 |
+
// this isn't impossible but then you need to use either file paths or remote URL paths
|
20 |
+
const jpegImageAsBase64 = await convertToJpeg(base64Url)
|
21 |
+
|
22 |
+
return jpegImageAsBase64
|
23 |
+
}
|
src/lib/utils/{getRenderRequestPrompts.ts β getResolveRequestPrompts.ts}
RENAMED
@@ -3,7 +3,7 @@ import { getVideoPrompt } from "@aitube/engine"
|
|
3 |
|
4 |
import { SettingsState } from "@/controllers/settings"
|
5 |
|
6 |
-
export function
|
7 |
settings,
|
8 |
segment,
|
9 |
segments,
|
@@ -17,6 +17,7 @@ export function getRenderRequestPrompts({
|
|
17 |
positivePrompt: string
|
18 |
negativePrompt: string
|
19 |
} {
|
|
|
20 |
const videoPrompt = getVideoPrompt(
|
21 |
segments,
|
22 |
entities
|
@@ -34,7 +35,7 @@ export function getRenderRequestPrompts({
|
|
34 |
: segment.category === ClapSegmentCategory.STORYBOARD
|
35 |
? settings.imagePromptSuffix
|
36 |
: ""
|
37 |
-
].map(x => x.trim()).join(", ")
|
38 |
|
39 |
const negativePrompt = [
|
40 |
segment.category === ClapSegmentCategory.VIDEO
|
@@ -42,7 +43,7 @@ export function getRenderRequestPrompts({
|
|
42 |
: segment.category === ClapSegmentCategory.STORYBOARD
|
43 |
? settings.imageNegativePrompt
|
44 |
: ""
|
45 |
-
].map(x => x.trim()).join(", ")
|
46 |
|
47 |
return {
|
48 |
positivePrompt,
|
|
|
3 |
|
4 |
import { SettingsState } from "@/controllers/settings"
|
5 |
|
6 |
+
export function getResolveRequestPrompts({
|
7 |
settings,
|
8 |
segment,
|
9 |
segments,
|
|
|
17 |
positivePrompt: string
|
18 |
negativePrompt: string
|
19 |
} {
|
20 |
+
|
21 |
const videoPrompt = getVideoPrompt(
|
22 |
segments,
|
23 |
entities
|
|
|
35 |
: segment.category === ClapSegmentCategory.STORYBOARD
|
36 |
? settings.imagePromptSuffix
|
37 |
: ""
|
38 |
+
].map(x => x.trim()).filter(x => x).join(", ")
|
39 |
|
40 |
const negativePrompt = [
|
41 |
segment.category === ClapSegmentCategory.VIDEO
|
|
|
43 |
: segment.category === ClapSegmentCategory.STORYBOARD
|
44 |
? settings.imageNegativePrompt
|
45 |
: ""
|
46 |
+
].map(x => x.trim()).filter(x => x).join(", ")
|
47 |
|
48 |
return {
|
49 |
positivePrompt,
|
src/types.ts
CHANGED
@@ -37,7 +37,7 @@ export enum ComfyIcuAccelerator {
|
|
37 |
H100 = "H100"
|
38 |
}
|
39 |
|
40 |
-
export type
|
41 |
settings: SettingsState
|
42 |
|
43 |
// the reference segment to render (eg. storyboard or video)
|
@@ -128,11 +128,48 @@ export interface ImageSegment {
|
|
128 |
score: number;
|
129 |
}
|
130 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
131 |
// some data can only exist inside a browser session (eg. AudioBuffer)
|
132 |
// or at least data that only make sense on client side
|
133 |
// we could put things like a mouse hover or selected state in here
|
134 |
export type BrowserOnlySegmentData = {
|
135 |
audioBuffer?: AudioBuffer
|
|
|
|
|
136 |
}
|
137 |
|
138 |
export type RuntimeSegment = ClapSegment & BrowserOnlySegmentData
|
|
|
37 |
H100 = "H100"
|
38 |
}
|
39 |
|
40 |
+
export type ResolveRequest = {
|
41 |
settings: SettingsState
|
42 |
|
43 |
// the reference segment to render (eg. storyboard or video)
|
|
|
128 |
score: number;
|
129 |
}
|
130 |
|
131 |
+
export enum SegmentVisibility {
|
132 |
+
// the segment is visible, and the user explicitly requested to render it before the others
|
133 |
+
DEMANDED = "DEMANDED",
|
134 |
+
|
135 |
+
// TODO: add some implicit intermediary priority options
|
136 |
+
// such as SELECTED, HOVERED..
|
137 |
+
|
138 |
+
// the segment (or at least a portion of it) is currently visible in the sliding window
|
139 |
+
VISIBLE = "VISIBLE",
|
140 |
+
|
141 |
+
// the segment is hidden, but not too far from the sliding window
|
142 |
+
BUFFERED = "BUFFERED",
|
143 |
+
|
144 |
+
// fully hidden, far from the sliding window
|
145 |
+
HIDDEN = "HIDDEN"
|
146 |
+
}
|
147 |
+
|
148 |
+
// used for sort
|
149 |
+
export const SegmentVisibilityPriority: Record<SegmentVisibility, number> = {
|
150 |
+
// the segment is visible, and the user explicitly requested to render it before the others
|
151 |
+
[SegmentVisibility.DEMANDED]: 3,
|
152 |
+
|
153 |
+
// TODO: add some implicit intermediary priority options
|
154 |
+
// such as SELECTED, HOVERED..
|
155 |
+
|
156 |
+
// the segment (or at least a portion of it) is currently visible in the sliding window
|
157 |
+
[SegmentVisibility.VISIBLE]: 2,
|
158 |
+
|
159 |
+
// the segment is hidden, but not too far from the sliding window
|
160 |
+
[SegmentVisibility.BUFFERED]: 1,
|
161 |
+
|
162 |
+
// fully hidden, far from the sliding window
|
163 |
+
[SegmentVisibility.HIDDEN]: 0
|
164 |
+
}
|
165 |
+
|
166 |
// some data can only exist inside a browser session (eg. AudioBuffer)
|
167 |
// or at least data that only make sense on client side
|
168 |
// we could put things like a mouse hover or selected state in here
|
169 |
export type BrowserOnlySegmentData = {
|
170 |
audioBuffer?: AudioBuffer
|
171 |
+
|
172 |
+
visibility?: SegmentVisibility
|
173 |
}
|
174 |
|
175 |
export type RuntimeSegment = ClapSegment & BrowserOnlySegmentData
|