diff --git a/.env b/.env
index 8443be9ff582d3200bc967518837e930b83710df..7498243cc012e841aeb745fc0247394fc8403ed6 100644
--- a/.env
+++ b/.env
@@ -17,6 +17,11 @@ WINNERS=""
AUTH_ALCHEMY_API_KEY=""
+AUTH_OPENAI_API_KEY=""
+
+VIDEOCHAIN_API_URL=""
+VIDEOCHAIN_API_KEY=""
+
# ----------- CENSORSHIP -------
ENABLE_CENSORSHIP=
FINGERPRINT_KEY=
diff --git a/next.config.js b/next.config.js
index 5cd8cc341fdc321d3ffde0907685dfbec1db0686..d3b8458ff2b2f7f712d20ec2700b66c30bc7744e 100644
--- a/next.config.js
+++ b/next.config.js
@@ -1,6 +1,15 @@
/** @type {import('next').NextConfig} */
const nextConfig = {
output: 'standalone',
+ experimental: {
+ serverActions: {
+
+ // necessary as we are generating Clap files on server-side
+ // however, we are only generating text and not assets, so it should be lightweight,
+ // usually below 2mb
+ bodySizeLimit: '4mb',
+ },
+ }
}
module.exports = nextConfig
diff --git a/package-lock.json b/package-lock.json
index bbae859dfa9b334df3de1bbfa141c79c3f81bdc2..7fd23854763eb47efa3b729c7f41c51b06755135 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -9,7 +9,9 @@
"version": "0.0.0",
"dependencies": {
"@huggingface/hub": "0.12.3-oauth",
+ "@huggingface/inference": "^2.6.7",
"@jcoreio/async-throttle": "^1.6.0",
+ "@mediapipe/tasks-vision": "^0.10.12",
"@photo-sphere-viewer/core": "^5.7.2",
"@photo-sphere-viewer/equirectangular-video-adapter": "^5.7.2",
"@photo-sphere-viewer/gyroscope-plugin": "^5.7.2",
@@ -38,6 +40,7 @@
"@radix-ui/react-toast": "^1.1.5",
"@radix-ui/react-tooltip": "^1.0.7",
"@react-spring/web": "^9.7.3",
+ "@tailwindcss/container-queries": "^0.1.1",
"@types/lodash.debounce": "^4.0.9",
"@types/node": "20.4.2",
"@types/react": "18.2.15",
@@ -62,10 +65,12 @@
"markdown-yaml-metadata-parser": "^3.0.0",
"minisearch": "^6.3.0",
"next": "^14.1.4",
+ "openai": "^4.36.0",
"photo-sphere-viewer-lensflare-plugin": "^2.1.2",
"pick": "^0.0.1",
"postcss": "8.4.38",
"qs": "^6.12.0",
+ "query-string": "^9.0.0",
"react": "18.2.0",
"react-circular-progressbar": "^2.1.0",
"react-copy-to-clipboard": "^5.1.0",
@@ -77,6 +82,7 @@
"react-tuby": "^0.1.24",
"react-virtualized-auto-sizer": "^1.0.20",
"react-window-infinite-loader": "^1.0.9",
+ "runcss": "^0.1.6",
"sbd": "^1.0.19",
"sentence-splitter": "^4.3.0",
"sharp": "^0.33.3",
@@ -924,6 +930,14 @@
"node": ">=18"
}
},
+ "node_modules/@huggingface/inference": {
+ "version": "2.6.7",
+ "resolved": "https://registry.npmjs.org/@huggingface/inference/-/inference-2.6.7.tgz",
+ "integrity": "sha512-vFBqvtU3LhxjufTs0jcRrDSc0nK+lah10bOgvlIn80lAH4JwMzHHPBQ4g4ECEdRD0PIt6EpTiidEZQq2sArb5Q==",
+ "engines": {
+ "node": ">=18"
+ }
+ },
"node_modules/@humanwhocodes/config-array": {
"version": "0.11.14",
"resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz",
@@ -1477,6 +1491,11 @@
"@jridgewell/sourcemap-codec": "^1.4.14"
}
},
+ "node_modules/@mediapipe/tasks-vision": {
+ "version": "0.10.12",
+ "resolved": "https://registry.npmjs.org/@mediapipe/tasks-vision/-/tasks-vision-0.10.12.tgz",
+ "integrity": "sha512-688Vukid7hvGmx+7hzS/EQ3Q4diz4eeX4/FYDw8f/t56UjFueD8LTvA2rX5BCIwvT0oy8QHKh5uKIyct1AOFtQ=="
+ },
"node_modules/@next/env": {
"version": "14.1.4",
"resolved": "https://registry.npmjs.org/@next/env/-/env-14.1.4.tgz",
@@ -2852,6 +2871,14 @@
"tslib": "^2.4.0"
}
},
+ "node_modules/@tailwindcss/container-queries": {
+ "version": "0.1.1",
+ "resolved": "https://registry.npmjs.org/@tailwindcss/container-queries/-/container-queries-0.1.1.tgz",
+ "integrity": "sha512-p18dswChx6WnTSaJCSGx6lTmrGzNNvm2FtXmiO6AuA1V4U5REyoqwmT6kgAsIMdjo07QdAfYXHJ4hnMtfHzWgA==",
+ "peerDependencies": {
+ "tailwindcss": ">=3.2.0"
+ }
+ },
"node_modules/@textlint/ast-node-types": {
"version": "13.4.1",
"resolved": "https://registry.npmjs.org/@textlint/ast-node-types/-/ast-node-types-13.4.1.tgz",
@@ -2905,6 +2932,15 @@
"resolved": "https://registry.npmjs.org/@types/node/-/node-20.4.2.tgz",
"integrity": "sha512-Dd0BYtWgnWJKwO1jkmTrzofjK2QXXcai0dmtzvIBhcA+RsG5h8R3xlyta0kGOZRNfL9GuRtb1knmPEhQrePCEw=="
},
+ "node_modules/@types/node-fetch": {
+ "version": "2.6.11",
+ "resolved": "https://registry.npmjs.org/@types/node-fetch/-/node-fetch-2.6.11.tgz",
+ "integrity": "sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g==",
+ "dependencies": {
+ "@types/node": "*",
+ "form-data": "^4.0.0"
+ }
+ },
"node_modules/@types/prop-types": {
"version": "15.7.12",
"resolved": "https://registry.npmjs.org/@types/prop-types/-/prop-types-15.7.12.tgz",
@@ -3101,6 +3137,17 @@
"crypto-js": "^4.2.0"
}
},
+ "node_modules/abort-controller": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/abort-controller/-/abort-controller-3.0.0.tgz",
+ "integrity": "sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==",
+ "dependencies": {
+ "event-target-shim": "^5.0.0"
+ },
+ "engines": {
+ "node": ">=6.5"
+ }
+ },
"node_modules/acorn": {
"version": "8.11.3",
"resolved": "https://registry.npmjs.org/acorn/-/acorn-8.11.3.tgz",
@@ -3133,6 +3180,17 @@
"resolved": "https://registry.npmjs.org/aes-js/-/aes-js-3.0.0.tgz",
"integrity": "sha512-H7wUZRn8WpTq9jocdxQ2c8x2sKo9ZVmzfRE13GiNJXfp7NcKYEdvl3vspKjXox6RIG2VtaRe4JFvxG4rqp2Zuw=="
},
+ "node_modules/agentkeepalive": {
+ "version": "4.5.0",
+ "resolved": "https://registry.npmjs.org/agentkeepalive/-/agentkeepalive-4.5.0.tgz",
+ "integrity": "sha512-5GG/5IbQQpC9FpkRGsSvZI5QYeSCzlJHdpBQntCsuTOxhKD8lqKhrleg2Yi7yvMIf82Ycmmqln9U8V9qwEiJew==",
+ "dependencies": {
+ "humanize-ms": "^1.2.1"
+ },
+ "engines": {
+ "node": ">= 8.0.0"
+ }
+ },
"node_modules/ajv": {
"version": "6.12.6",
"resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz",
@@ -4021,6 +4079,14 @@
}
}
},
+ "node_modules/decode-uri-component": {
+ "version": "0.4.1",
+ "resolved": "https://registry.npmjs.org/decode-uri-component/-/decode-uri-component-0.4.1.tgz",
+ "integrity": "sha512-+8VxcR21HhTy8nOt6jf20w0c9CADrw1O8d+VZ/YzzCt4bJ3uBjw+D1q2osAB8RnpwwaeYBxy0HyKQxD5JBMuuQ==",
+ "engines": {
+ "node": ">=14.16"
+ }
+ },
"node_modules/deep-is": {
"version": "0.1.4",
"resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz",
@@ -4879,6 +4945,14 @@
"es5-ext": "~0.10.14"
}
},
+ "node_modules/event-target-shim": {
+ "version": "5.0.1",
+ "resolved": "https://registry.npmjs.org/event-target-shim/-/event-target-shim-5.0.1.tgz",
+ "integrity": "sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==",
+ "engines": {
+ "node": ">=6"
+ }
+ },
"node_modules/eventemitter3": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/eventemitter3/-/eventemitter3-5.0.1.tgz",
@@ -4977,6 +5051,17 @@
"node": ">=8"
}
},
+ "node_modules/filter-obj": {
+ "version": "5.1.0",
+ "resolved": "https://registry.npmjs.org/filter-obj/-/filter-obj-5.1.0.tgz",
+ "integrity": "sha512-qWeTREPoT7I0bifpPUXtxkZJ1XJzxWtfoWWkdVGqa+eCr3SHW/Ocp89o8vLvbUuQnadybJpjOKu4V+RwO6sGng==",
+ "engines": {
+ "node": ">=14.16"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
"node_modules/find-up": {
"version": "5.0.0",
"resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz",
@@ -5065,6 +5150,31 @@
"node": ">= 6"
}
},
+ "node_modules/form-data-encoder": {
+ "version": "1.7.2",
+ "resolved": "https://registry.npmjs.org/form-data-encoder/-/form-data-encoder-1.7.2.tgz",
+ "integrity": "sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A=="
+ },
+ "node_modules/formdata-node": {
+ "version": "4.4.1",
+ "resolved": "https://registry.npmjs.org/formdata-node/-/formdata-node-4.4.1.tgz",
+ "integrity": "sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==",
+ "dependencies": {
+ "node-domexception": "1.0.0",
+ "web-streams-polyfill": "4.0.0-beta.3"
+ },
+ "engines": {
+ "node": ">= 12.20"
+ }
+ },
+ "node_modules/formdata-node/node_modules/web-streams-polyfill": {
+ "version": "4.0.0-beta.3",
+ "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-4.0.0-beta.3.tgz",
+ "integrity": "sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==",
+ "engines": {
+ "node": ">= 14"
+ }
+ },
"node_modules/fraction.js": {
"version": "4.3.7",
"resolved": "https://registry.npmjs.org/fraction.js/-/fraction.js-4.3.7.tgz",
@@ -5411,6 +5521,14 @@
"entities": "^4.4.0"
}
},
+ "node_modules/humanize-ms": {
+ "version": "1.2.1",
+ "resolved": "https://registry.npmjs.org/humanize-ms/-/humanize-ms-1.2.1.tgz",
+ "integrity": "sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==",
+ "dependencies": {
+ "ms": "^2.0.0"
+ }
+ },
"node_modules/ignore": {
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz",
@@ -6277,6 +6395,43 @@
"node": "^10 || ^12 || >=14"
}
},
+ "node_modules/node-domexception": {
+ "version": "1.0.0",
+ "resolved": "https://registry.npmjs.org/node-domexception/-/node-domexception-1.0.0.tgz",
+ "integrity": "sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==",
+ "funding": [
+ {
+ "type": "github",
+ "url": "https://github.com/sponsors/jimmywarting"
+ },
+ {
+ "type": "github",
+ "url": "https://paypal.me/jimmywarting"
+ }
+ ],
+ "engines": {
+ "node": ">=10.5.0"
+ }
+ },
+ "node_modules/node-fetch": {
+ "version": "2.7.0",
+ "resolved": "https://registry.npmjs.org/node-fetch/-/node-fetch-2.7.0.tgz",
+ "integrity": "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==",
+ "dependencies": {
+ "whatwg-url": "^5.0.0"
+ },
+ "engines": {
+ "node": "4.x || >=6.0.0"
+ },
+ "peerDependencies": {
+ "encoding": "^0.1.0"
+ },
+ "peerDependenciesMeta": {
+ "encoding": {
+ "optional": true
+ }
+ }
+ },
"node_modules/node-gyp-build": {
"version": "4.8.0",
"resolved": "https://registry.npmjs.org/node-gyp-build/-/node-gyp-build-4.8.0.tgz",
@@ -6440,6 +6595,32 @@
"wrappy": "1"
}
},
+ "node_modules/openai": {
+ "version": "4.36.0",
+ "resolved": "https://registry.npmjs.org/openai/-/openai-4.36.0.tgz",
+ "integrity": "sha512-AtYrhhWY64LhB9P6f3H0nV8nTSaQJ89mWPnfNU5CnYg81zlYaV8nkyO+aTNfprdqP/9xv10woNNUgefXINT4Dg==",
+ "dependencies": {
+ "@types/node": "^18.11.18",
+ "@types/node-fetch": "^2.6.4",
+ "abort-controller": "^3.0.0",
+ "agentkeepalive": "^4.2.1",
+ "form-data-encoder": "1.7.2",
+ "formdata-node": "^4.3.2",
+ "node-fetch": "^2.6.7",
+ "web-streams-polyfill": "^3.2.1"
+ },
+ "bin": {
+ "openai": "bin/cli"
+ }
+ },
+ "node_modules/openai/node_modules/@types/node": {
+ "version": "18.19.31",
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.31.tgz",
+ "integrity": "sha512-ArgCD39YpyyrtFKIqMDvjz79jto5fcI/SVUs2HwB+f0dAzq68yqOdyaSivLiLugSziTpNXLQrVb7RZFmdZzbhA==",
+ "dependencies": {
+ "undici-types": "~5.26.4"
+ }
+ },
"node_modules/optionator": {
"version": "0.9.3",
"resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.3.tgz",
@@ -6803,6 +6984,22 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/query-string": {
+ "version": "9.0.0",
+ "resolved": "https://registry.npmjs.org/query-string/-/query-string-9.0.0.tgz",
+ "integrity": "sha512-4EWwcRGsO2H+yzq6ddHcVqkCQ2EFUSfDMEjF8ryp8ReymyZhIuaFRGLomeOQLkrzacMHoyky2HW0Qe30UbzkKw==",
+ "dependencies": {
+ "decode-uri-component": "^0.4.1",
+ "filter-obj": "^5.1.0",
+ "split-on-first": "^3.0.0"
+ },
+ "engines": {
+ "node": ">=18"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
"node_modules/queue-microtask": {
"version": "1.2.3",
"resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz",
@@ -7156,6 +7353,11 @@
"queue-microtask": "^1.2.2"
}
},
+ "node_modules/runcss": {
+ "version": "0.1.6",
+ "resolved": "https://registry.npmjs.org/runcss/-/runcss-0.1.6.tgz",
+ "integrity": "sha512-RyG7VVUxZi8+ynXA4YFH7N6uXK0QgULKTEEMmeUkK/uhfNGiQitWRKQp5b+1AEKu4/QTslFEx7wu971vQQk9Tg=="
+ },
"node_modules/safe-array-concat": {
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/safe-array-concat/-/safe-array-concat-1.1.2.tgz",
@@ -7391,6 +7593,17 @@
"node": ">=0.10.0"
}
},
+ "node_modules/split-on-first": {
+ "version": "3.0.0",
+ "resolved": "https://registry.npmjs.org/split-on-first/-/split-on-first-3.0.0.tgz",
+ "integrity": "sha512-qxQJTx2ryR0Dw0ITYyekNQWpz6f8dGd7vffGNflQQ3Iqj9NJ6qiZ7ELpZsJ/QBhIVAiDfXdag3+Gp8RvWa62AA==",
+ "engines": {
+ "node": ">=12"
+ },
+ "funding": {
+ "url": "https://github.com/sponsors/sindresorhus"
+ }
+ },
"node_modules/sprintf-js": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.0.3.tgz",
@@ -7885,6 +8098,11 @@
"resolved": "https://registry.npmjs.org/toggle-selection/-/toggle-selection-1.0.6.tgz",
"integrity": "sha512-BiZS+C1OS8g/q2RRbJmy59xpyghNBqrr6k5L/uKBGRsTfxmu3ffiRnd8mlGPUVayg8pvfi5urfnu8TU7DVOkLQ=="
},
+ "node_modules/tr46": {
+ "version": "0.0.3",
+ "resolved": "https://registry.npmjs.org/tr46/-/tr46-0.0.3.tgz",
+ "integrity": "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="
+ },
"node_modules/ts-interface-checker": {
"version": "0.1.13",
"resolved": "https://registry.npmjs.org/ts-interface-checker/-/ts-interface-checker-0.1.13.tgz",
@@ -8102,6 +8320,11 @@
"url": "https://github.com/sponsors/ljharb"
}
},
+ "node_modules/undici-types": {
+ "version": "5.26.5",
+ "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
+ "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="
+ },
"node_modules/update-browserslist-db": {
"version": "1.0.13",
"resolved": "https://registry.npmjs.org/update-browserslist-db/-/update-browserslist-db-1.0.13.tgz",
@@ -8236,6 +8459,19 @@
"resolved": "https://registry.npmjs.org/v8-compile-cache-lib/-/v8-compile-cache-lib-3.0.1.tgz",
"integrity": "sha512-wa7YjyUGfNZngI/vtK0UHAN+lgDCxBPCylVXGp0zu59Fz5aiGtNXaq3DhIov063MorB+VfufLh3JlF2KdTK3xg=="
},
+ "node_modules/web-streams-polyfill": {
+ "version": "3.3.3",
+ "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz",
+ "integrity": "sha512-d2JWLCivmZYTSIoge9MsgFCZrt571BikcWGYkjC1khllbTeDlGqZ2D8vD8E/lJa8WGWbb7Plm8/XJYV7IJHZZw==",
+ "engines": {
+ "node": ">= 8"
+ }
+ },
+ "node_modules/webidl-conversions": {
+ "version": "3.0.1",
+ "resolved": "https://registry.npmjs.org/webidl-conversions/-/webidl-conversions-3.0.1.tgz",
+ "integrity": "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="
+ },
"node_modules/websocket": {
"version": "1.0.34",
"resolved": "https://registry.npmjs.org/websocket/-/websocket-1.0.34.tgz",
@@ -8265,6 +8501,15 @@
"resolved": "https://registry.npmjs.org/ms/-/ms-2.0.0.tgz",
"integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A=="
},
+ "node_modules/whatwg-url": {
+ "version": "5.0.0",
+ "resolved": "https://registry.npmjs.org/whatwg-url/-/whatwg-url-5.0.0.tgz",
+ "integrity": "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==",
+ "dependencies": {
+ "tr46": "~0.0.3",
+ "webidl-conversions": "^3.0.0"
+ }
+ },
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
diff --git a/package.json b/package.json
index 8f4208024156a0b76aecbbb2ced088877d1fc882..6e43c7854c0fc4d73f179e7e861e97da11d436de 100644
--- a/package.json
+++ b/package.json
@@ -10,7 +10,9 @@
},
"dependencies": {
"@huggingface/hub": "0.12.3-oauth",
+ "@huggingface/inference": "^2.6.7",
"@jcoreio/async-throttle": "^1.6.0",
+ "@mediapipe/tasks-vision": "^0.10.12",
"@photo-sphere-viewer/core": "^5.7.2",
"@photo-sphere-viewer/equirectangular-video-adapter": "^5.7.2",
"@photo-sphere-viewer/gyroscope-plugin": "^5.7.2",
@@ -39,6 +41,7 @@
"@radix-ui/react-toast": "^1.1.5",
"@radix-ui/react-tooltip": "^1.0.7",
"@react-spring/web": "^9.7.3",
+ "@tailwindcss/container-queries": "^0.1.1",
"@types/lodash.debounce": "^4.0.9",
"@types/node": "20.4.2",
"@types/react": "18.2.15",
@@ -63,10 +66,12 @@
"markdown-yaml-metadata-parser": "^3.0.0",
"minisearch": "^6.3.0",
"next": "^14.1.4",
+ "openai": "^4.36.0",
"photo-sphere-viewer-lensflare-plugin": "^2.1.2",
"pick": "^0.0.1",
"postcss": "8.4.38",
"qs": "^6.12.0",
+ "query-string": "^9.0.0",
"react": "18.2.0",
"react-circular-progressbar": "^2.1.0",
"react-copy-to-clipboard": "^5.1.0",
@@ -78,6 +83,7 @@
"react-tuby": "^0.1.24",
"react-virtualized-auto-sizer": "^1.0.20",
"react-window-infinite-loader": "^1.0.9",
+ "runcss": "^0.1.6",
"sbd": "^1.0.19",
"sentence-splitter": "^4.3.0",
"sharp": "^0.33.3",
diff --git a/public/bubble.jpg b/public/bubble.jpg
deleted file mode 100644
index 22e44c049b61e7b56281e8a74504855959970617..0000000000000000000000000000000000000000
Binary files a/public/bubble.jpg and /dev/null differ
diff --git a/public/favicon.ico b/public/favicon.ico
index 060fa8ce26f545dd54e28b76401e5bc7a55b7c92..73beb52bd315adbbda0ee88ddb1e521dd830d96f 100644
Binary files a/public/favicon.ico and b/public/favicon.ico differ
diff --git a/public/favicon/favicon-114-precomposed.png b/public/favicon/favicon-114-precomposed.png
deleted file mode 100644
index be8953b99cc353a6ea9047e83a8c28a627cff46c..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-114-precomposed.png and /dev/null differ
diff --git a/public/favicon/favicon-120-precomposed.png b/public/favicon/favicon-120-precomposed.png
deleted file mode 100644
index 3aab950a1f0268f0642e87e8fec63ef8f064c4da..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-120-precomposed.png and /dev/null differ
diff --git a/public/favicon/favicon-144-precomposed.png b/public/favicon/favicon-144-precomposed.png
deleted file mode 100644
index e29c5d95a6d22dd36aea448c8eda8df03e875815..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-144-precomposed.png and /dev/null differ
diff --git a/public/favicon/favicon-152-precomposed.png b/public/favicon/favicon-152-precomposed.png
deleted file mode 100644
index 6201f9f8fe506e8562741ab990c33c7a39d714d9..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-152-precomposed.png and /dev/null differ
diff --git a/public/favicon/favicon-180-precomposed.png b/public/favicon/favicon-180-precomposed.png
deleted file mode 100644
index 241a60c82c96c8665d4935632a1db121b8a99387..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-180-precomposed.png and /dev/null differ
diff --git a/public/favicon/favicon-192.png b/public/favicon/favicon-192.png
deleted file mode 100644
index ecc6cbefdfc1232e92b40cc616cc34388b0360da..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-192.png and /dev/null differ
diff --git a/public/favicon/favicon-32.png b/public/favicon/favicon-32.png
deleted file mode 100644
index 4076fa1b3ea9a28897aeb9dbb37bc8bfa4f1a624..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-32.png and /dev/null differ
diff --git a/public/favicon/favicon-36.png b/public/favicon/favicon-36.png
deleted file mode 100644
index 4bb5a3262eff1c5ba29e3e06320b9d76a07565af..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-36.png and /dev/null differ
diff --git a/public/favicon/favicon-48.png b/public/favicon/favicon-48.png
deleted file mode 100644
index 69d6328355ed22cf0dca75f558351e1f585af8bb..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-48.png and /dev/null differ
diff --git a/public/favicon/favicon-57.png b/public/favicon/favicon-57.png
deleted file mode 100644
index 91ac87f90441ddbe723a45cf7e0b16eded515d21..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-57.png and /dev/null differ
diff --git a/public/favicon/favicon-60.png b/public/favicon/favicon-60.png
deleted file mode 100644
index cf5ee0bea0c30fea6addb1134d8bfeb7ca888e0d..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-60.png and /dev/null differ
diff --git a/public/favicon/favicon-72-precomposed.png b/public/favicon/favicon-72-precomposed.png
deleted file mode 100644
index 4e957de6a9137ec73ab51ba9317e8c13d1e45bf5..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-72-precomposed.png and /dev/null differ
diff --git a/public/favicon/favicon-72.png b/public/favicon/favicon-72.png
deleted file mode 100644
index 4e957de6a9137ec73ab51ba9317e8c13d1e45bf5..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-72.png and /dev/null differ
diff --git a/public/favicon/favicon-76.png b/public/favicon/favicon-76.png
deleted file mode 100644
index 7eb8efbd1087dc53784116053667c49073e08d3d..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-76.png and /dev/null differ
diff --git a/public/favicon/favicon-96.png b/public/favicon/favicon-96.png
deleted file mode 100644
index 5098aed1cda10c42eabcacb721d17b62dc5a8f3a..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon-96.png and /dev/null differ
diff --git a/public/favicon/favicon.ico b/public/favicon/favicon.ico
deleted file mode 100644
index 060fa8ce26f545dd54e28b76401e5bc7a55b7c92..0000000000000000000000000000000000000000
Binary files a/public/favicon/favicon.ico and /dev/null differ
diff --git a/public/favicon/manifest.json b/public/favicon/manifest.json
deleted file mode 100644
index d0d92afbc1530e91966f13b737cea8885bf5a111..0000000000000000000000000000000000000000
--- a/public/favicon/manifest.json
+++ /dev/null
@@ -1,41 +0,0 @@
-{
- "name": "pollo",
- "icons": [
- {
- "src": "\/favicon-36.png",
- "sizes": "36x36",
- "type": "image\/png",
- "density": 0.75
- },
- {
- "src": "\/favicon-48.png",
- "sizes": "48x48",
- "type": "image\/png",
- "density": 1
- },
- {
- "src": "\/favicon-72.png",
- "sizes": "72x72",
- "type": "image\/png",
- "density": 1.5
- },
- {
- "src": "\/favicon-96.png",
- "sizes": "96x96",
- "type": "image\/png",
- "density": 2
- },
- {
- "src": "\/favicon-144.png",
- "sizes": "144x144",
- "type": "image\/png",
- "density": 3
- },
- {
- "src": "\/favicon-192.png",
- "sizes": "192x192",
- "type": "image\/png",
- "density": 4
- }
- ]
-}
diff --git a/public/huggingface-avatar.jpeg b/public/huggingface-avatar.jpeg
deleted file mode 100644
index 54a5d6ef0a3cf061cf1602db61321c6a1626453c..0000000000000000000000000000000000000000
Binary files a/public/huggingface-avatar.jpeg and /dev/null differ
diff --git a/public/icon.png b/public/icon.png
index ecc6cbefdfc1232e92b40cc616cc34388b0360da..d1bcb2646f92b6f65ac4bab1570dc9520ac4f266 100644
Binary files a/public/icon.png and b/public/icon.png differ
diff --git a/public/mask.png b/public/mask.png
deleted file mode 100644
index 5a1047425e0047f0449aabee676019c801cd7cf3..0000000000000000000000000000000000000000
Binary files a/public/mask.png and /dev/null differ
diff --git a/public/next.svg b/public/next.svg
deleted file mode 100644
index 5174b28c565c285e3e312ec5178be64fbeca8398..0000000000000000000000000000000000000000
--- a/public/next.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/public/report.jpg b/public/report.jpg
deleted file mode 100644
index 3b61d4dd3994f57296b020e99dbf1e043cb5e98a..0000000000000000000000000000000000000000
Binary files a/public/report.jpg and /dev/null differ
diff --git a/public/vercel.svg b/public/vercel.svg
deleted file mode 100644
index d2f84222734f27b623d1c80dda3561b04d1284af..0000000000000000000000000000000000000000
--- a/public/vercel.svg
+++ /dev/null
@@ -1 +0,0 @@
-
\ No newline at end of file
diff --git a/src/app/server/actions/README.md b/src/app/api/actions/README.md
similarity index 100%
rename from src/app/server/actions/README.md
rename to src/app/api/actions/README.md
diff --git a/src/app/server/actions/ai-tube-hf/README.md b/src/app/api/actions/ai-tube-hf/README.md
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/README.md
rename to src/app/api/actions/ai-tube-hf/README.md
diff --git a/src/app/server/actions/ai-tube-hf/deleteFileFromDataset.ts b/src/app/api/actions/ai-tube-hf/deleteFileFromDataset.ts
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/deleteFileFromDataset.ts
rename to src/app/api/actions/ai-tube-hf/deleteFileFromDataset.ts
diff --git a/src/app/server/actions/ai-tube-hf/deleteVideoRequest.ts b/src/app/api/actions/ai-tube-hf/deleteVideoRequest.ts
similarity index 87%
rename from src/app/server/actions/ai-tube-hf/deleteVideoRequest.ts
rename to src/app/api/actions/ai-tube-hf/deleteVideoRequest.ts
index 8650561b501689255b0a7232f7768a1f8f8c96e6..93a6e8d20e87f225629ee4f15a1818b4702a366b 100644
--- a/src/app/server/actions/ai-tube-hf/deleteVideoRequest.ts
+++ b/src/app/api/actions/ai-tube-hf/deleteVideoRequest.ts
@@ -2,7 +2,7 @@
import { MediaInfo } from "@/types/general"
import { deleteFileFromDataset } from "./deleteFileFromDataset"
-import { formatPromptFileName } from "../utils/formatPromptFileName"
+import { formatPromptFileName } from "../../utils/formatPromptFileName"
export async function deleteVideoRequest({
video,
diff --git a/src/app/server/actions/ai-tube-hf/downloadClapProject.ts b/src/app/api/actions/ai-tube-hf/downloadClapProject.ts
similarity index 94%
rename from src/app/server/actions/ai-tube-hf/downloadClapProject.ts
rename to src/app/api/actions/ai-tube-hf/downloadClapProject.ts
index 03c6b993b53b9df545cfafd484eba797d3714e68..954107756b911ccaa4b498d2d9c7eb9af4601a72 100644
--- a/src/app/server/actions/ai-tube-hf/downloadClapProject.ts
+++ b/src/app/api/actions/ai-tube-hf/downloadClapProject.ts
@@ -6,8 +6,8 @@ import { ChannelInfo, MediaInfo, VideoRequest } from "@/types/general"
import { defaultVideoModel } from "@/app/config"
import { parseClap } from "@/lib/clap/parseClap"
-import { parseVideoModelName } from "../utils/parseVideoModelName"
-import { computeOrientationProjectionWidthHeight } from "../utils/computeOrientationProjectionWidthHeight"
+import { parseVideoModelName } from "../../utils/parseVideoModelName"
+import { computeOrientationProjectionWidthHeight } from "../../utils/computeOrientationProjectionWidthHeight"
import { downloadFileAsBlob } from "./downloadFileAsBlob"
diff --git a/src/app/server/actions/ai-tube-hf/downloadFileAsBlob.ts b/src/app/api/actions/ai-tube-hf/downloadFileAsBlob.ts
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/downloadFileAsBlob.ts
rename to src/app/api/actions/ai-tube-hf/downloadFileAsBlob.ts
diff --git a/src/app/server/actions/ai-tube-hf/downloadFileAsText.ts b/src/app/api/actions/ai-tube-hf/downloadFileAsText.ts
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/downloadFileAsText.ts
rename to src/app/api/actions/ai-tube-hf/downloadFileAsText.ts
diff --git a/src/app/server/actions/ai-tube-hf/downloadPlainText.ts b/src/app/api/actions/ai-tube-hf/downloadPlainText.ts
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/downloadPlainText.ts
rename to src/app/api/actions/ai-tube-hf/downloadPlainText.ts
diff --git a/src/app/server/actions/ai-tube-hf/extendVideosWithStats.ts b/src/app/api/actions/ai-tube-hf/extendVideosWithStats.ts
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/extendVideosWithStats.ts
rename to src/app/api/actions/ai-tube-hf/extendVideosWithStats.ts
diff --git a/src/app/server/actions/ai-tube-hf/getChannel.ts b/src/app/api/actions/ai-tube-hf/getChannel.ts
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/getChannel.ts
rename to src/app/api/actions/ai-tube-hf/getChannel.ts
diff --git a/src/app/server/actions/ai-tube-hf/getChannelVideos.ts b/src/app/api/actions/ai-tube-hf/getChannelVideos.ts
similarity index 95%
rename from src/app/server/actions/ai-tube-hf/getChannelVideos.ts
rename to src/app/api/actions/ai-tube-hf/getChannelVideos.ts
index 89422e26a262ac50e3e6ec62778db457f6c8ee1c..c6c4098458d426500492aa1d162afd0172b69d73 100644
--- a/src/app/server/actions/ai-tube-hf/getChannelVideos.ts
+++ b/src/app/api/actions/ai-tube-hf/getChannelVideos.ts
@@ -6,7 +6,7 @@ import { getVideoRequestsFromChannel } from "./getVideoRequestsFromChannel"
import { adminApiKey } from "../config"
import { getVideoIndex } from "./getVideoIndex"
import { extendVideosWithStats } from "./extendVideosWithStats"
-import { computeOrientationProjectionWidthHeight } from "../utils/computeOrientationProjectionWidthHeight"
+import { computeOrientationProjectionWidthHeight } from "../../utils/computeOrientationProjectionWidthHeight"
import { defaultVideoModel } from "@/app/config"
// return
diff --git a/src/app/server/actions/ai-tube-hf/getChannels.ts b/src/app/api/actions/ai-tube-hf/getChannels.ts
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/getChannels.ts
rename to src/app/api/actions/ai-tube-hf/getChannels.ts
diff --git a/src/app/server/actions/ai-tube-hf/getCredentials.ts b/src/app/api/actions/ai-tube-hf/getCredentials.ts
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/getCredentials.ts
rename to src/app/api/actions/ai-tube-hf/getCredentials.ts
diff --git a/src/app/server/actions/ai-tube-hf/getPrivateChannels.ts b/src/app/api/actions/ai-tube-hf/getPrivateChannels.ts
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/getPrivateChannels.ts
rename to src/app/api/actions/ai-tube-hf/getPrivateChannels.ts
diff --git a/src/app/server/actions/ai-tube-hf/getTags.ts b/src/app/api/actions/ai-tube-hf/getTags.ts
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/getTags.ts
rename to src/app/api/actions/ai-tube-hf/getTags.ts
diff --git a/src/app/server/actions/ai-tube-hf/getVideo.ts b/src/app/api/actions/ai-tube-hf/getVideo.ts
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/getVideo.ts
rename to src/app/api/actions/ai-tube-hf/getVideo.ts
diff --git a/src/app/server/actions/ai-tube-hf/getVideoIndex.ts b/src/app/api/actions/ai-tube-hf/getVideoIndex.ts
similarity index 100%
rename from src/app/server/actions/ai-tube-hf/getVideoIndex.ts
rename to src/app/api/actions/ai-tube-hf/getVideoIndex.ts
diff --git a/src/app/server/actions/ai-tube-hf/getVideoRequestsFromChannel.ts b/src/app/api/actions/ai-tube-hf/getVideoRequestsFromChannel.ts
similarity index 95%
rename from src/app/server/actions/ai-tube-hf/getVideoRequestsFromChannel.ts
rename to src/app/api/actions/ai-tube-hf/getVideoRequestsFromChannel.ts
index 540bec49deb5981d03214e323c6cd0b8eba5e26c..a36b7ef2e7cb529f43d3fecfbd5c6a57955a2b21 100644
--- a/src/app/server/actions/ai-tube-hf/getVideoRequestsFromChannel.ts
+++ b/src/app/api/actions/ai-tube-hf/getVideoRequestsFromChannel.ts
@@ -3,10 +3,10 @@
import { ChannelInfo, VideoRequest } from "@/types/general"
import { getCredentials } from "./getCredentials"
import { listFiles } from "@/lib/huggingface/hub/src"
-import { parsePromptFileName } from "../utils/parsePromptFileName"
+import { parsePromptFileName } from "../../utils/parsePromptFileName"
import { downloadFileAsText } from "./downloadFileAsText"
-import { parseDatasetPrompt } from "../utils/parseDatasetPrompt"
-import { computeOrientationProjectionWidthHeight } from "../utils/computeOrientationProjectionWidthHeight"
+import { parseDatasetPrompt } from "../../utils/parseDatasetPrompt"
+import { computeOrientationProjectionWidthHeight } from "../../utils/computeOrientationProjectionWidthHeight"
import { downloadClapProject } from "./downloadClapProject"
/**
diff --git a/src/app/server/actions/ai-tube-hf/getVideos.ts b/src/app/api/actions/ai-tube-hf/getVideos.ts
similarity index 93%
rename from src/app/server/actions/ai-tube-hf/getVideos.ts
rename to src/app/api/actions/ai-tube-hf/getVideos.ts
index 5dac5b4bbb1201cfb7f2c67f67768a17aef8ee4c..479b521eed08423e6eaa8855f9d3f14dc440f3ce 100644
--- a/src/app/server/actions/ai-tube-hf/getVideos.ts
+++ b/src/app/api/actions/ai-tube-hf/getVideos.ts
@@ -7,8 +7,8 @@ import { MediaInfo } from "@/types/general"
import { getVideoIndex } from "./getVideoIndex"
import { extendVideosWithStats } from "./extendVideosWithStats"
-import { isHighQuality } from "../utils/isHighQuality"
-import { isAntisocial } from "../utils/isAntisocial"
+import { isHighQuality } from "../../utils/isHighQuality"
+import { isAntisocial } from "../../utils/isAntisocial"
const HARD_LIMIT = 100
@@ -19,7 +19,7 @@ export async function getVideos({
niceToHaveTags = [],
sortBy = "date",
ignoreVideoIds = [],
- maxVideos = HARD_LIMIT,
+ maxNbMedias = HARD_LIMIT,
neverThrow = false,
renewCache = true,
}: {
@@ -42,7 +42,7 @@ export async function getVideos({
// eg. videos already watched, or disliked etc
ignoreVideoIds?: string[]
- maxVideos?: number
+ maxNbMedias?: number
neverThrow?: boolean
@@ -118,9 +118,9 @@ export async function getVideos({
)
// if we don't have enough videos
- if (videosMatchingFilters.length < maxVideos) {
+ if (videosMatchingFilters.length < maxNbMedias) {
// count how many we need
- const nbMissingVideos = maxVideos - videosMatchingFilters.length
+ const nbMissingVideos = maxNbMedias - videosMatchingFilters.length
// then we try to fill the gap with valid videos from other topics
const videosToUseAsFiller = allPotentiallyValidVideos
@@ -138,7 +138,7 @@ export async function getVideos({
const sanitizedVideos = videosMatchingFilters.filter(v => !isAntisocial(v))
// we enforce the max limit of HARD_LIMIT (eg. 100)
- const limitedNumberOfVideos = sanitizedVideos.slice(0, Math.min(HARD_LIMIT, maxVideos))
+ const limitedNumberOfVideos = sanitizedVideos.slice(0, Math.min(HARD_LIMIT, maxNbMedias))
// we ask Redis for the freshest stats
const videosWithStats = await extendVideosWithStats(limitedNumberOfVideos)
diff --git a/src/app/server/actions/ai-tube-hf/parseChannel.ts b/src/app/api/actions/ai-tube-hf/parseChannel.ts
similarity index 97%
rename from src/app/server/actions/ai-tube-hf/parseChannel.ts
rename to src/app/api/actions/ai-tube-hf/parseChannel.ts
index bbbf60498c1956fce3298ad40582ab23957ae3d2..655716c0d064a450bf3c0bba7e6982d1c1493972 100644
--- a/src/app/server/actions/ai-tube-hf/parseChannel.ts
+++ b/src/app/api/actions/ai-tube-hf/parseChannel.ts
@@ -1,7 +1,7 @@
"use server"
import { Credentials, downloadFile, whoAmI } from "@/lib/huggingface/hub/src"
-import { parseDatasetReadme } from "@/app/server/actions/utils/parseDatasetReadme"
+import { parseDatasetReadme } from "@/app/api/utils/parseDatasetReadme"
import { ChannelInfo, VideoGenerationModel, VideoOrientation } from "@/types/general"
import { adminCredentials } from "../config"
diff --git a/src/app/server/actions/ai-tube-hf/uploadVideoRequestToDataset.ts b/src/app/api/actions/ai-tube-hf/uploadVideoRequestToDataset.ts
similarity index 94%
rename from src/app/server/actions/ai-tube-hf/uploadVideoRequestToDataset.ts
rename to src/app/api/actions/ai-tube-hf/uploadVideoRequestToDataset.ts
index e7aca9d4209f2c497f1ed65180e15813d319d7ab..ab23bd0ef6d2f32b9416d6425c04766e5943b40e 100644
--- a/src/app/server/actions/ai-tube-hf/uploadVideoRequestToDataset.ts
+++ b/src/app/api/actions/ai-tube-hf/uploadVideoRequestToDataset.ts
@@ -4,8 +4,8 @@ import { Blob } from "buffer"
import { Credentials, uploadFile, whoAmI } from "@/lib/huggingface/hub/src"
import { ChannelInfo, VideoGenerationModel, MediaInfo, VideoOrientation, VideoRequest } from "@/types/general"
-import { formatPromptFileName } from "../utils/formatPromptFileName"
-import { computeOrientationProjectionWidthHeight } from "../utils/computeOrientationProjectionWidthHeight"
+import { formatPromptFileName } from "../../utils/formatPromptFileName"
+import { computeOrientationProjectionWidthHeight } from "../../utils/computeOrientationProjectionWidthHeight"
/**
* Save the video request to the user's own dataset
diff --git a/src/app/server/actions/comments.ts b/src/app/api/actions/comments.ts
similarity index 100%
rename from src/app/server/actions/comments.ts
rename to src/app/api/actions/comments.ts
diff --git a/src/app/server/actions/config.ts b/src/app/api/actions/config.ts
similarity index 100%
rename from src/app/server/actions/config.ts
rename to src/app/api/actions/config.ts
diff --git a/src/app/server/actions/redis.ts b/src/app/api/actions/redis.ts
similarity index 100%
rename from src/app/server/actions/redis.ts
rename to src/app/api/actions/redis.ts
diff --git a/src/app/server/actions/stats.ts b/src/app/api/actions/stats.ts
similarity index 100%
rename from src/app/server/actions/stats.ts
rename to src/app/api/actions/stats.ts
diff --git a/src/app/server/actions/submitVideoRequest.ts b/src/app/api/actions/submitVideoRequest.ts
similarity index 100%
rename from src/app/server/actions/submitVideoRequest.ts
rename to src/app/api/actions/submitVideoRequest.ts
diff --git a/src/app/server/actions/users.ts b/src/app/api/actions/users.ts
similarity index 100%
rename from src/app/server/actions/users.ts
rename to src/app/api/actions/users.ts
diff --git a/src/app/api/generators/clap/addLatentScenesToClap.ts b/src/app/api/generators/clap/addLatentScenesToClap.ts
new file mode 100644
index 0000000000000000000000000000000000000000..461106cb78d04bc034d267dcab9a5522b56f7735
--- /dev/null
+++ b/src/app/api/generators/clap/addLatentScenesToClap.ts
@@ -0,0 +1,105 @@
+"use server"
+
+import { newClap } from "@/lib/clap/newClap"
+import { newSegment } from "@/lib/clap/newSegment"
+
+import { LatentScenes } from "./types"
+import { serializeClap } from "@/lib/clap/serializeClap"
+import { getEmptyClap } from "@/lib/clap/emptyClap"
+import { ClapProject } from "@/lib/clap/types"
+
+let defaultSegmentDurationInMs = 2000
+
+/**
+ * This generates a fully valid Clap blob (compressed archive)
+ *
+ * @param param0
+ * @returns
+ */
+export async function addLatentScenesToClap({
+ scenes = [],
+ clap,
+ debug = false
+}: {
+ scenes?: LatentScenes
+ clap: ClapProject
+ debug?: boolean
+}): Promise {
+
+ if (!Array.isArray(scenes) || !scenes?.length) {
+ return clap
+ }
+
+ let startTimeInMs = 0
+ let endTimeInMs = defaultSegmentDurationInMs
+
+ clap.segments.push(newSegment({
+ track: 0,
+ startTimeInMs,
+ endTimeInMs,
+ category: "interface",
+ prompt: "",
+ label: "fish",
+ outputType: "interface",
+ }))
+
+ for (const { characters, locations, actions } of scenes) {
+
+ startTimeInMs = endTimeInMs
+ endTimeInMs = startTimeInMs + defaultSegmentDurationInMs
+ let track = 0
+
+ for (const character of characters) {
+ clap.segments.push(newSegment({
+ track: track++,
+ startTimeInMs,
+ endTimeInMs,
+ category: "characters",
+ prompt: character,
+ label: character,
+ outputType: "text",
+ }))
+ }
+
+ for (const location of locations) {
+ clap.segments.push(newSegment({
+ track: track++,
+ startTimeInMs,
+ endTimeInMs,
+ category: "location",
+ prompt: location,
+ label: location,
+ outputType: "text",
+ }))
+ }
+
+ for (const action of actions) {
+ clap.segments.push(newSegment({
+ track: track++,
+ startTimeInMs,
+ endTimeInMs,
+ category: "action",
+ prompt: action,
+ label: action,
+ outputType: "text",
+ }))
+ }
+
+ clap.segments.push(newSegment({
+ track: track++,
+ startTimeInMs,
+ endTimeInMs,
+ category: "video",
+ prompt: "video",
+ label: "video",
+ outputType: "video",
+ }))
+ }
+
+ if (debug) {
+ console.log("latentScenesToClap: unpacked Clap content = ", JSON.stringify(clap, null, 2))
+ }
+
+
+ return clap
+}
\ No newline at end of file
diff --git a/src/app/api/generators/clap/continueClap.ts b/src/app/api/generators/clap/continueClap.ts
new file mode 100644
index 0000000000000000000000000000000000000000..8e2c8708bbc15651e2a646d4fce5f3e0529cd2cb
--- /dev/null
+++ b/src/app/api/generators/clap/continueClap.ts
@@ -0,0 +1,66 @@
+"use server"
+
+
+import { LatentScenes } from "./types"
+import { addLatentScenesToClap } from "./addLatentScenesToClap"
+import { getLatentScenes } from "./getLatentScenes"
+import { serializeClap } from "@/lib/clap/serializeClap"
+import { newClap } from "@/lib/clap/newClap"
+import { getEmptyClap } from "@/lib/clap/emptyClap"
+import { ClapProject } from "@/lib/clap/types"
+
+/**
+ * Imagine the continuity of a Clap file
+ *
+ * This serves multiple purpose, such as being able to create
+ * long stories in a more streamed way
+ *
+ * This should integrate multiple factors such as the event history, actions etc
+ *
+ * Be careful however as the context will grow at the same time as the story
+ * (it's the same issue as in the AI Comic Factory)
+ * so it may become harder and/or slower to perform the query
+ */
+export async function continueClap({
+ clap,
+ mode = "replace", // "append"
+ debug = false
+}: {
+ clap: ClapProject
+
+ // whether to replace or append the content
+ // replacing is the most efficient way to do things (smaller files)
+ // so it is the default mode
+ mode: "replace" | "append"
+
+ debug?: boolean
+}): Promise {
+
+ // TODO a prompt like "imagine the next steps from.."
+ const prompt = ""
+
+ const scenes: LatentScenes = await getLatentScenes({
+ prompt,
+ debug,
+ })
+
+ // by default we always replace the content,
+ // so we need to remove the previous one
+ if (mode !== "append") {
+ clap.scenes = []
+ }
+
+ clap = await addLatentScenesToClap({
+ clap,
+ scenes,
+ debug,
+ })
+
+ // a Clap must always be transported as a zipped file
+ // technically, it could also be transported as text
+ // (and gzipped automatically between the HTTP server and browser)
+ // but I think it is better to keep the idea of a dedicated file format
+ const archive = await serializeClap(clap)
+
+ return archive
+}
\ No newline at end of file
diff --git a/src/app/api/generators/clap/generateClap.ts b/src/app/api/generators/clap/generateClap.ts
new file mode 100644
index 0000000000000000000000000000000000000000..ac4faef7c552c1aa33dba8d7cd56716393c1d52d
--- /dev/null
+++ b/src/app/api/generators/clap/generateClap.ts
@@ -0,0 +1,61 @@
+"use server"
+
+import { serializeClap } from "@/lib/clap/serializeClap"
+import { newClap } from "@/lib/clap/newClap"
+import { getEmptyClap } from "@/lib/clap/emptyClap"
+
+import { LatentScenes } from "./types"
+import { addLatentScenesToClap } from "./addLatentScenesToClap"
+import { getLatentScenes } from "./getLatentScenes"
+
+/**
+ * Generate a Clap file from scratch using a prompt
+ */
+export async function generateClap({
+ prompt = "",
+ debug = false
+}: {
+ prompt?: string
+ debug?: boolean
+} = {
+ prompt: "",
+ debug: false,
+}): Promise {
+
+ const empty = await getEmptyClap()
+
+ if (!prompt?.length) {
+ return empty
+ }
+
+ let clap = newClap({
+ meta: {
+ title: "Latent content", // TODO "
+ description: "",
+ licence: "non commercial",
+ orientation: "landscape",
+ width: 1024,
+ height: 576,
+ defaultVideoModel: "SDXL",
+ extraPositivePrompt: [],
+ screenplay: "",
+ streamType: "interactive"
+ }
+ })
+
+ const scenes: LatentScenes = await getLatentScenes({
+ prompt,
+ debug,
+ })
+
+
+ clap = await addLatentScenesToClap({
+ clap,
+ scenes,
+ debug,
+ })
+
+ const archive = await serializeClap(clap)
+
+ return archive
+}
\ No newline at end of file
diff --git a/src/app/api/generators/clap/getLatentScenes.ts b/src/app/api/generators/clap/getLatentScenes.ts
new file mode 100644
index 0000000000000000000000000000000000000000..3726f964684eaa063e7818953432789ec16e89ae
--- /dev/null
+++ b/src/app/api/generators/clap/getLatentScenes.ts
@@ -0,0 +1,58 @@
+"use server"
+
+import YAML from "yaml"
+
+import { predict as predictWithHuggingFace } from "@/app/api/providers/huggingface/predictWithHuggingFace"
+import { predict as predictWithOpenAI } from "@/app/api/providers/openai/predictWithOpenAI"
+
+import { LatentScenes } from "./types"
+import { getSystemPrompt } from "./getSystemPrompt"
+import { unknownObjectToLatentScenes } from "./unknownObjectToLatentScenes"
+import { parseRawStringToYAML } from "../../utils/parseRawStringToYAML"
+
+export async function getLatentScenes({
+ prompt = "",
+ debug = false
+}: {
+ prompt?: string
+ debug?: boolean
+} = {}): Promise {
+
+ // abort early
+ if (!prompt) {
+ return []
+ }
+
+ const systemPrompt = getSystemPrompt()
+
+ const userPrompt = `generate a short story about: ${prompt}`
+
+ let scenes: LatentScenes = []
+ try {
+ // we use Hugging Face for now, as our users might try funny things,
+ // which could get us banned from OpenAI
+ let rawString = await predictWithHuggingFace({
+ systemPrompt,
+ userPrompt,
+ nbMaxNewTokens: 1200,
+ prefix: "",
+ })
+
+ if (debug) {
+ console.log("getLatentScenes: rawString = " + rawString)
+ }
+
+ const maybeLatentScenes = parseRawStringToYAML(rawString, [])
+
+ scenes = unknownObjectToLatentScenes(maybeLatentScenes)
+
+ if (debug) {
+ console.log(`getLatentScenes: scenes = ` + JSON.stringify(scenes, null, 2))
+ }
+ } catch (err) {
+ scenes = []
+ console.error(`getLatentScenes failed (${err})`)
+ }
+
+ return scenes
+}
\ No newline at end of file
diff --git a/src/app/api/generators/clap/getSystemPrompt.ts b/src/app/api/generators/clap/getSystemPrompt.ts
new file mode 100644
index 0000000000000000000000000000000000000000..49439e4117afa20701976c13f35d5ad9e605226b
--- /dev/null
+++ b/src/app/api/generators/clap/getSystemPrompt.ts
@@ -0,0 +1,53 @@
+
+export const getSystemPrompt = () => {
+ return `# Context
+You are a backend engine able to generate interactive projects in YAML.
+
+# Schema
+
+You will be given instructions to describe a story, and you need to return a YAML describing each scene as "character", "location", and "action".
+
+Here is a description of the schema in TypeScript for convenience (but you need to always reply using YAML):
+
+For the writing style of the location, please try to use the Stable Diffusion convention for prompts.
+
+\`\`\`typescript
+{
+ characters: string[] // list of characters visible in the scene
+ location: string
+ actions: string[]
+}[]
+\`\`\`
+
+# Samples
+
+Here are some basic sample outputs. In reality, you should create longer stories.
+For brevity the location is very short in the example, but in reality you should write stable diffusion prompts descriptions.
+
+## a short story about a frog turning into a princess, she becomes happy but there is a cliffhanger at the end of the episode
+
+\`\`\`yaml
+- characters: ["Fiona the Frog"]
+ location: A misty frog pond, mysterious, beautiful.
+ actions: "Fiona the Frog lived alone, spending her days hopping and swimming around the edges of Misty Pond."
+- characters: ["Fiona the Frog", "Ella the Elderly Witch"]
+ location: Pond, sunny, riverbank, herbs, morning light, beautiful.
+ actions: "One sunny morning, Fiona encountered Ella the Elderly Witch who was gathering herbs by the pond."
+- characters: ["Fiona the Frog", "Ella the Elderly Witch"]
+ location: Pond in the background, sunny, morning light, beautiful, bokeh
+ actions: "Ella, feeling pity for the lonely frog, decided to cast a magical spell. She whispered enchanted words and sprinkled Fiona with sparkling dust."
+- characters: ["Fiona the Frog"]
+ location: Glowing circle of magic, emitting light, on the grass, at night
+ actions: "Suddenly Fiona is feeling a whirl of sensations and her form starts changing under the glistening moonlight."
+- characters: ["Princess Fiona"]
+ location: Royal palace gardens, beautiful, french garden, medieval, in the morning.
+ actions: "As the magic settled, Fiona found herself transformed into a human princess, standing in the lush gardens of a grand palace."
+- characters: ["Princess Fiona", "Prince Henry"]
+ location: Royal palace, in the court, medieval, during the day.
+ actions: "Prince Henry is charming Princess Fiona, he wonders where she is coming from."
+- characters: ["Princess Fiona", "Prince Henry"]
+ location: Inside the royal palace, large medieval ball room, during a banquet.
+ actions: "Princess Fiona kisses the Prince, they are finally happy."
+\`\`\`
+`
+}
\ No newline at end of file
diff --git a/src/app/api/generators/clap/types.ts b/src/app/api/generators/clap/types.ts
new file mode 100644
index 0000000000000000000000000000000000000000..d5ad7af6a74d9fbbce953348d25a1e74040766ae
--- /dev/null
+++ b/src/app/api/generators/clap/types.ts
@@ -0,0 +1,7 @@
+export type LatentScene = {
+ characters: string[]
+ locations: string[]
+ actions: string[]
+}
+
+export type LatentScenes = LatentScene[]
\ No newline at end of file
diff --git a/src/app/api/generators/clap/unknownObjectToLatentScenes.ts b/src/app/api/generators/clap/unknownObjectToLatentScenes.ts
new file mode 100644
index 0000000000000000000000000000000000000000..d643e81577b5b0fa428fc4a5314ccc758b30a61e
--- /dev/null
+++ b/src/app/api/generators/clap/unknownObjectToLatentScenes.ts
@@ -0,0 +1,20 @@
+import { parseStringArray } from "../../utils/parseStringArray"
+import { LatentScene, LatentScenes } from "./types"
+
+/**
+ * Process a YAML result from the LLM to make sure it is a LatentScenes
+ *
+ * @param something
+ * @returns
+ */
+export function unknownObjectToLatentScenes(something: any): LatentScenes {
+ let scenes: LatentScenes = []
+ if (Array.isArray(something)) {
+ scenes = something.map(thing => ({
+ characters: parseStringArray(thing && (thing?.characters || thing?.character)),
+ locations: parseStringArray(thing && (thing?.locations || thing?.location)),
+ actions: parseStringArray(thing && (thing?.actions || thing?.action)),
+ } as LatentScene))
+ }
+ return scenes
+}
\ No newline at end of file
diff --git a/src/app/api/generators/image/generateImageWithVideochain.ts b/src/app/api/generators/image/generateImageWithVideochain.ts
new file mode 100644
index 0000000000000000000000000000000000000000..56e5db78857998f8c8fe51b521ef1ec8295eb434
--- /dev/null
+++ b/src/app/api/generators/image/generateImageWithVideochain.ts
@@ -0,0 +1,151 @@
+"use server"
+
+import { RenderRequest, RenderedScene } from "@/types/general"
+
+// note: there is no / at the end in the variable
+// so we have to add it ourselves if needed
+const apiUrl = `${process.env.VIDEOCHAIN_API_URL || ""}`
+const apiKey = `${process.env.VIDEOCHAIN_API_KEY || ""}`
+
+export async function newRender({
+ prompt,
+ negativePrompt,
+ nbFrames,
+ nbSteps,
+ width,
+ height,
+ turbo,
+ shouldRenewCache,
+ seed,
+}: {
+ prompt: string
+ negativePrompt: string
+ nbFrames: number
+ nbSteps: number
+ width: number
+ height: number
+ turbo: boolean
+ shouldRenewCache: boolean
+ seed?: number
+}) {
+ if (!prompt) {
+ console.error(`cannot call the rendering API without a prompt, aborting..`)
+ throw new Error(`cannot call the rendering API without a prompt, aborting..`)
+ }
+
+ const cacheKey = `render/${JSON.stringify({ prompt })}`
+
+ // return await Gorgon.get(cacheKey, async () => {
+
+ let defaulResult: RenderedScene = {
+ renderId: "",
+ status: "error",
+ assetUrl: "",
+ durationInMs: 0,
+ maskUrl: "",
+ error: "failed to fetch the data",
+ alt: "",
+ segments: []
+ }
+
+ try {
+ // console.log(`calling POST ${apiUrl}/render with seed ${seed} and prompt: ${prompt}`)
+
+ const res = await fetch(`${apiUrl}/render`, {
+ method: "POST",
+ headers: {
+ Accept: "application/json",
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${apiKey}`,
+ },
+ body: JSON.stringify({
+ prompt,
+ negativePrompt,
+ // nbFrames: 8 and nbSteps: 15 --> ~10 sec generation
+ nbFrames, // when nbFrames is 1, we will only generate static images
+ nbSteps, // 20 = fast, 30 = better, 50 = best
+ width,
+ height,
+ seed,
+ actionnables: [],
+ segmentation: "disabled", // one day we will remove this param, to make it automatic
+ upscalingFactor: 1, // let's disable upscaling right now
+ turbo, // always use turbo mode (it's for images only anyway)
+ // also what could be done iw that we could use the width and height to control this
+ cache: shouldRenewCache ? "renew" : "use"
+ } as Partial),
+ cache: 'no-store',
+ // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
+ // next: { revalidate: 1 }
+ })
+
+ // console.log("res:", res)
+ // The return value is *not* serialized
+ // You can return Date, Map, Set, etc.
+
+ // Recommendation: handle errors
+ if (res.status !== 200) {
+ // This will activate the closest `error.js` Error Boundary
+ throw new Error('Failed to fetch data')
+ }
+
+ const response = (await res.json()) as RenderedScene
+ // console.log("response:", response)
+ return response
+ } catch (err) {
+ // console.error(err)
+ // Gorgon.clear(cacheKey)
+ return defaulResult
+ }
+}
+
+export async function getRender(renderId: string) {
+ if (!renderId) {
+ console.error(`cannot call the rendering API without a renderId, aborting..`)
+ throw new Error(`cannot call the rendering API without a renderId, aborting..`)
+ }
+
+ let defaulResult: RenderedScene = {
+ renderId: "",
+ status: "error",
+ assetUrl: "",
+ durationInMs: 0,
+ maskUrl: "",
+ error: "failed to fetch the data",
+ alt: "",
+ segments: []
+ }
+
+ try {
+ // console.log(`calling GET ${apiUrl}/render with renderId: ${renderId}`)
+ const res = await fetch(`${apiUrl}/render/${renderId}`, {
+ method: "GET",
+ headers: {
+ Accept: "application/json",
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${apiKey}`,
+ },
+ cache: 'no-store',
+ // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
+ // next: { revalidate: 1 }
+ })
+
+ // console.log("res:", res)
+ // The return value is *not* serialized
+ // You can return Date, Map, Set, etc.
+
+ // Recommendation: handle errors
+ if (res.status !== 200) {
+ // This will activate the closest `error.js` Error Boundary
+ throw new Error('Failed to fetch data')
+ }
+
+ const response = (await res.json()) as RenderedScene
+ // console.log("response:", response)
+ return response
+ } catch (err) {
+ console.error(err)
+ // Gorgon.clear(cacheKey)
+ return defaulResult
+ }
+}
\ No newline at end of file
diff --git a/src/app/api/generators/search/getLatentSearchResults.ts b/src/app/api/generators/search/getLatentSearchResults.ts
new file mode 100644
index 0000000000000000000000000000000000000000..e7235086df74b63d00f98dcff884bfca60106265
--- /dev/null
+++ b/src/app/api/generators/search/getLatentSearchResults.ts
@@ -0,0 +1,59 @@
+"use server"
+
+import YAML from "yaml"
+
+import { predict as predictWithHuggingFace } from "@/app/api/providers/huggingface/predictWithHuggingFace"
+import { predict as predictWithOpenAI } from "@/app/api/providers/openai/predictWithOpenAI"
+import { LatentSearchResults } from "./types"
+import { getSystemPrompt } from "./getSystemPrompt"
+import { parseRawStringToYAML } from "../../utils/parseRawStringToYAML"
+import { unknownObjectToLatentSearchResults } from "./unknownObjectToLatentSearchResults"
+
+export async function getLatentSearchResults({
+ prompt = "",
+ debug = false
+}: {
+ prompt?: string
+ debug?: boolean
+} = {}): Promise {
+
+ // abort early
+ if (!prompt) {
+ return []
+ }
+
+ const systemPrompt = getSystemPrompt()
+
+ const nbSearchResults = 8
+
+ const userPrompt = `${nbSearchResults} search results for "${prompt}"`
+
+ let results: LatentSearchResults = []
+ try {
+ // we use Hugging Face for now, as our users might try funny things,
+ // which could get us banned from OpenAI
+ let rawString = await predictWithHuggingFace({
+ systemPrompt,
+ userPrompt,
+ nbMaxNewTokens: 1200,
+ prefix: "",
+ })
+
+ if (debug) {
+ console.log("getLatentSearchResults: rawString = " + rawString)
+ }
+
+ const maybeLatentSearchResults = parseRawStringToYAML(rawString, [])
+
+ results = unknownObjectToLatentSearchResults(maybeLatentSearchResults)
+
+ if (debug) {
+ console.log(`getLatentSearchResults: scenes = ` + JSON.stringify(results, null, 2))
+ }
+ } catch (err) {
+ results = []
+ console.error(`getLatentSearchResults failed (${err})`)
+ }
+
+ return results
+}
\ No newline at end of file
diff --git a/src/app/api/generators/search/getSystemPrompt.ts b/src/app/api/generators/search/getSystemPrompt.ts
new file mode 100644
index 0000000000000000000000000000000000000000..74a8b87817e66ee8e9160aacb7aeba99c06d8f2c
--- /dev/null
+++ b/src/app/api/generators/search/getSystemPrompt.ts
@@ -0,0 +1,43 @@
+
+export const getSystemPrompt = () => {
+ return `# Context
+You are a backend engine of a video sharing platform called AiTube, able to generate search results in YAML.
+You should generate realistic results, similar to real video platforms and social media.
+
+# Schema
+
+You will be given instructions to describe a search query, and you need to return a YAML describing each search result as "title", "thumbnail", and "tags".
+
+Here is a description of the schema in TypeScript for convenience (but you need to always reply using YAML):
+
+\`\`\`typescript
+{
+ label: string // title of the video
+ summary: string // summary of the video
+ thumbnail: string // a stable diffusion or dall-e prompt, to describe the video thumbnail
+ tags: string[] // a list of tags
+}[]
+\`\`\`
+
+# Samples
+
+Here are some basic sample outputs
+
+## 3 search results for "tiktok recipes"
+
+\`\`\`yaml
+- label: I'm Testing Viral TikTok Recipes So You Don't Have To
+ summary: Video from an influencer, reviewing weird recipes that are becoming viral in TikTok. The video has a funny tone.
+ thumbnail: young woman, an influencer opening the mouth, very surprised, eating weird pink spaghetti, portrait, dramatic pose, high quality
+ tags: ["cooking", "review"]
+- label: I went on a TikTok Food Hack Marathon And It Made Me 🤢
+ summary: Funny video about an influencer reviewing viral TikTok recipes, but becomes hillarously sick as they are very bad. As an influencer video, it is made to maximize engagement and thus it exagerates everything.
+ thumbnail: an influencer being sick, nauseous, pixelated food plate, spectacular, grandiose
+ tags: ["food"]
+- label: I've Tried 10 TikTok Food Recipes 🌮 and This Was Surprising
+ summary: Video about an influencer who tried 10 recipes from TikTok, which turned out to be complete disaster, but in an hillarous way. The video is made to maximalize the dramatic effect and views.
+ thumbnail: an influencer shrugging, very expressive, mouth open, over a plate of weird hotdogs, dramatic pose
+ tags: ["food", "cooking", "review"]
+\`\`\`
+`
+}
\ No newline at end of file
diff --git a/src/app/api/generators/search/searchResultToMediaInfo.ts b/src/app/api/generators/search/searchResultToMediaInfo.ts
new file mode 100644
index 0000000000000000000000000000000000000000..8a4eddfed9a252646046b11b009e91bad1cafd76
--- /dev/null
+++ b/src/app/api/generators/search/searchResultToMediaInfo.ts
@@ -0,0 +1,231 @@
+import { v4 as uuidv4 } from "uuid"
+
+import {
+ ChannelInfo,
+ MediaInfo,
+ VideoStatus,
+ VideoGenerationModel,
+ MediaProjection,
+ VideoOrientation
+} from "@/types/general"
+
+import { LatentSearchResult, LatentSearchResults } from "./types"
+import { newRender } from "../../providers/videochain/renderWithVideoChain"
+
+const channel: ChannelInfo = {
+ /**
+ * We actually use the dataset ID for the channel ID.
+ *
+ */
+ id: "d25efcc1-3cc2-4b41-9f41-e3a93300ae5f",
+
+ /**
+ * The name used in the URL for the channel
+ *
+ * eg: my-time-travel-journeys
+ */
+ slug: "latent",
+
+ /**
+ * username id of the Hugging Face dataset
+ *
+ * ex: f9a38286ec3436a45edd2cca
+ */
+ // DISABLED FOR NOW
+ // datasetUserId: string
+
+ /**
+ * username slug of the Hugging Face dataset
+ *
+ * eg: jbilcke-hf
+ */
+ datasetUser: "",
+
+ /**
+ * dataset slug of the Hugging Face dataset
+ *
+ * eg: ai-tube-my-time-travel-journeys
+ */
+ datasetName: "",
+
+ label: "Latent",
+
+ description: "Latent",
+
+ thumbnail: "",
+
+ model: "SDXL",
+
+ lora: "",
+
+ style: "",
+
+ voice: "",
+
+ music: "",
+
+ /**
+ * The system prompt
+ */
+ prompt: "",
+
+ likes: 0,
+
+ tags: [],
+
+ updatedAt: new Date().toISOString(),
+
+ /**
+ * Default video orientation
+ */
+ orientation: "landscape"
+}
+
+export async function searchResultToMediaInfo(searchResult: LatentSearchResult): Promise {
+
+ const renderResult = await newRender({
+ prompt: searchResult.thumbnail,
+ negativePrompt: "",
+ nbFrames: 1,
+ nbSteps: 4,
+ width: 1024,
+ height: 576,
+ turbo: true,
+ shouldRenewCache: false,
+ seed: searchResult.seed,
+ })
+
+ const thumbnailUrl: string = renderResult.assetUrl || ""
+
+ const mediaInfo: MediaInfo = {
+ /**
+ * UUID (v4)
+ */
+ id: uuidv4(),
+
+ /**
+ * Status of the media
+ */
+ status: "published",
+
+ /**
+ * Human readable title for the media
+ */
+ label: searchResult.label,
+
+ /**
+ * Human readable description for the media
+ */
+ description: searchResult.summary,
+
+ /**
+ * Content prompt
+ */
+ prompt: searchResult.summary,
+
+ /**
+ * URL to the media thumbnail
+ */
+ thumbnailUrl,
+
+ /**
+ * URL to a clap file
+ */
+ clapUrl: "",
+
+ assetUrl: "",
+
+ /**
+ * This is contain the storage URL of the higher-resolution content
+ */
+ assetUrlHd: "",
+
+ /**
+ * Counter for the number of views
+ *
+ * Note: should be managed by the index to prevent cheating
+ */
+ numberOfViews: 0,
+
+ /**
+ * Counter for the number of likes
+ *
+ * Note: should be managed by the index to prevent cheating
+ */
+ numberOfLikes: 0,
+
+ /**
+ * Counter for the number of dislikes
+ *
+ * Note: should be managed by the index to prevent cheating
+ */
+ numberOfDislikes: 0,
+
+ /**
+ * When was the media updated
+ */
+ updatedAt: new Date().toISOString(),
+
+ /**
+ * Arbotrary string tags to label the content
+ */
+ tags: searchResult.tags,
+
+ /**
+ * Model name
+ */
+ model: "SDXL",
+
+ /**
+ * LoRA name
+ */
+ lora: "",
+
+ /**
+ * style name
+ */
+ style: "",
+
+ /**
+ * Music prompt
+ */
+ music: "",
+
+ /**
+ * Voice prompt
+ */
+ voice: "",
+
+ /**
+ * The channel
+ */
+ channel,
+
+ /**
+ * Media duration (in seconds)
+ */
+ duration: 60,
+
+ /**
+ * Media width (eg. 1024)
+ */
+ width: 1024,
+
+ /**
+ * Media height (eg. 576)
+ */
+ height: 576,
+
+ /**
+ * General media aspect ratio
+ */
+ orientation: "landscape",
+
+ /**
+ * Media projection (cartesian by default)
+ */
+ projection: "latent"
+ }
+
+ return mediaInfo
+}
\ No newline at end of file
diff --git a/src/app/api/generators/search/types.ts b/src/app/api/generators/search/types.ts
new file mode 100644
index 0000000000000000000000000000000000000000..80c33f6e6c84102a679c1e1c7ab1540341c053a1
--- /dev/null
+++ b/src/app/api/generators/search/types.ts
@@ -0,0 +1,9 @@
+export type LatentSearchResult = {
+ label: string
+ summary: string
+ thumbnail: string
+ tags: string[]
+ seed: number // static seed is necessary to ensure result consistency for the thumbnail
+}
+
+export type LatentSearchResults = LatentSearchResult[]
\ No newline at end of file
diff --git a/src/app/api/generators/search/unknownObjectToLatentSearchResults.ts b/src/app/api/generators/search/unknownObjectToLatentSearchResults.ts
new file mode 100644
index 0000000000000000000000000000000000000000..3db87ac5fa4d15b4b4159b2101f81096cdc9bd9e
--- /dev/null
+++ b/src/app/api/generators/search/unknownObjectToLatentSearchResults.ts
@@ -0,0 +1,20 @@
+import { generateSeed } from "@/lib/utils/generateSeed"
+import { parseString } from "../../utils/parseString"
+import { parseStringArray } from "../../utils/parseStringArray"
+import { LatentSearchResult, LatentSearchResults } from "./types"
+
+export function unknownObjectToLatentSearchResults(something: any): LatentSearchResults {
+ let results: LatentSearchResults = []
+
+ if (Array.isArray(something)) {
+ results = something.map(thing => ({
+ label: parseString(thing && (thing?.label || thing?.title)),
+ summary: parseString(thing && (thing?.summary || thing?.description || thing?.synopsis)),
+ thumbnail: parseString(thing && (thing?.thumbnail)),
+ tags: parseStringArray(thing && (thing?.tag)),
+ seed: generateSeed(), // a seed is necessary for consistency between search results and viewer
+ } as LatentSearchResult))
+ }
+
+ return results
+}
\ No newline at end of file
diff --git a/src/app/api/media/[mediaId]/route.ts b/src/app/api/media/[mediaId]/route.ts
index 46a7b54728ad8f482d42b04945d1b01d13c1e6c3..a855731a98a2c437600b3f5da6bca36bd7c87edd 100644
--- a/src/app/api/media/[mediaId]/route.ts
+++ b/src/app/api/media/[mediaId]/route.ts
@@ -1,6 +1,6 @@
import { NextResponse, NextRequest } from "next/server"
-import { getVideo } from "@/app/server/actions/ai-tube-hf/getVideo"
+import { getVideo } from "@/app/api/actions/ai-tube-hf/getVideo"
import { parseMediaProjectionType } from "@/lib/utils/parseMediaProjectionType";
export async function GET(req: NextRequest) {
@@ -18,7 +18,7 @@ export async function GET(req: NextRequest) {
${media.label} - AiTube
-
+
diff --git a/src/app/api/providers/anthropic/predictWithAnthropic.txt b/src/app/api/providers/anthropic/predictWithAnthropic.txt
new file mode 100644
index 0000000000000000000000000000000000000000..653d5ce5d04382d3bc9e8c8ee92cf591af5c0696
--- /dev/null
+++ b/src/app/api/providers/anthropic/predictWithAnthropic.txt
@@ -0,0 +1,47 @@
+"use server"
+
+import { LLMPredictionFunctionParams } from '@/types';
+import Anthropic from '@anthropic-ai/sdk';
+import { MessageParam } from '@anthropic-ai/sdk/resources';
+
+export async function predict({
+ systemPrompt,
+ userPrompt,
+ nbMaxNewTokens,
+ llmVendorConfig
+}: LLMPredictionFunctionParams): Promise {
+ const anthropicApiKey = `${
+ llmVendorConfig.apiKey ||
+ process.env.AUTH_ANTHROPIC_API_KEY ||
+ ""
+ }`
+ const anthropicApiModel = `${
+ llmVendorConfig.modelId ||
+ process.env.LLM_ANTHROPIC_API_MODEL ||
+ "claude-3-opus-20240229"
+ }`
+
+ const anthropic = new Anthropic({
+ apiKey: anthropicApiKey,
+ })
+
+ const messages: MessageParam[] = [
+ { role: "user", content: userPrompt },
+ ]
+
+ try {
+ const res = await anthropic.messages.create({
+ messages: messages,
+ // stream: false,
+ system: systemPrompt,
+ model: anthropicApiModel,
+ // temperature: 0.8,
+ max_tokens: nbMaxNewTokens,
+ })
+
+ return res.content[0]?.text || ""
+ } catch (err) {
+ console.error(`error during generation: ${err}`)
+ return ""
+ }
+}
\ No newline at end of file
diff --git a/src/app/api/providers/groq/predictWithGroq.txt b/src/app/api/providers/groq/predictWithGroq.txt
new file mode 100644
index 0000000000000000000000000000000000000000..60225319633f031a576a9983a501d45d8c60ad7e
--- /dev/null
+++ b/src/app/api/providers/groq/predictWithGroq.txt
@@ -0,0 +1,46 @@
+"use server"
+
+import { LLMPredictionFunctionParams } from "@/types"
+import Groq from "groq-sdk"
+
+export async function predict({
+ systemPrompt,
+ userPrompt,
+ nbMaxNewTokens,
+ llmVendorConfig
+}: LLMPredictionFunctionParams): Promise {
+ const groqApiKey = `${
+ llmVendorConfig.apiKey ||
+ process.env.AUTH_GROQ_API_KEY ||
+ ""
+ }`
+ const groqApiModel = `${
+ llmVendorConfig.modelId ||
+ process.env.LLM_GROQ_API_MODEL ||
+ "mixtral-8x7b-32768"
+ }`
+
+ const groq = new Groq({
+ apiKey: groqApiKey,
+ })
+
+ const messages: Groq.Chat.Completions.CompletionCreateParams.Message[] = [
+ { role: "system", content: systemPrompt },
+ { role: "user", content: userPrompt },
+ ]
+
+ try {
+ const res = await groq.chat.completions.create({
+ messages: messages,
+ model: groqApiModel,
+ stream: false,
+ temperature: 0.5,
+ max_tokens: nbMaxNewTokens,
+ })
+
+ return res.choices[0].message.content || ""
+ } catch (err) {
+ console.error(`error during generation: ${err}`)
+ return ""
+ }
+}
\ No newline at end of file
diff --git a/src/app/api/providers/huggingface/predictWithHuggingFace.ts b/src/app/api/providers/huggingface/predictWithHuggingFace.ts
new file mode 100644
index 0000000000000000000000000000000000000000..59f9795f9f2db0b93e3cb05329dbaae4f9b97da8
--- /dev/null
+++ b/src/app/api/providers/huggingface/predictWithHuggingFace.ts
@@ -0,0 +1,87 @@
+
+import { HfInference } from "@huggingface/inference"
+
+import { createZephyrPrompt } from "@/lib/prompts/createZephyrPrompt"
+
+import { LLMPredictionFunctionParams } from "../types"
+
+export async function predict({
+ systemPrompt,
+ userPrompt,
+ nbMaxNewTokens,
+ prefix,
+}: LLMPredictionFunctionParams): Promise {
+
+ const hf = new HfInference(process.env.ADMIN_HUGGING_FACE_API_TOKEN)
+
+ let instructions = ""
+ try {
+ for await (const output of hf.textGenerationStream({
+ // model: "mistralai/Mixtral-8x7B-v0.1",
+ model: "mistralai/Mixtral-8x7B-Instruct-v0.1",
+ inputs: createZephyrPrompt([
+ { role: "system", content: systemPrompt },
+ { role: "user", content: userPrompt }
+ ]) + '\n' + prefix,
+
+ parameters: {
+ do_sample: true,
+ max_new_tokens: nbMaxNewTokens,
+ return_full_text: false,
+ }
+ })) {
+ instructions += output.token.text
+ // process.stdout.write(output.token.text)
+ if (
+ instructions.includes("") ||
+ instructions.includes("") ||
+ instructions.includes("/s>") ||
+ instructions.includes("[INST]") ||
+ instructions.includes("[/INST]") ||
+ instructions.includes("") ||
+ instructions.includes("<>") ||
+ instructions.includes("") ||
+ instructions.includes("<>") ||
+ instructions.includes("<|user|>") ||
+ instructions.includes("<|end|>") ||
+ instructions.includes("<|system|>") ||
+ instructions.includes("<|assistant|>")
+ ) {
+ break
+ }
+ }
+ } catch (err) {
+ // console.error(`error during generation: ${err}`)
+
+ // a common issue with Llama-2 might be that the model receives too many requests
+ if (`${err}` === "Error: Model is overloaded") {
+ instructions = ``
+ }
+ }
+
+ // need to do some cleanup of the garbage the LLM might have gave us
+ let result =
+ instructions
+ .replaceAll("<|end|>", "")
+ .replaceAll("", "")
+ .replaceAll("", "")
+ .replaceAll("/s>", "")
+ .replaceAll("[INST]", "")
+ .replaceAll("[/INST]", "")
+ .replaceAll("", "")
+ .replaceAll("<>", "")
+ .replaceAll("", "")
+ .replaceAll("<>", "")
+ .replaceAll("<|system|>", "")
+ .replaceAll("<|user|>", "")
+ .replaceAll("<|all|>", "")
+ .replaceAll("<|assistant|>", "")
+ .replaceAll('""', '"')
+ .trim()
+
+ if (prefix && !result.startsWith(prefix)) {
+ result = prefix + result
+ }
+
+ return result
+}
diff --git a/src/app/api/providers/openai/predictWithOpenAI.ts b/src/app/api/providers/openai/predictWithOpenAI.ts
new file mode 100644
index 0000000000000000000000000000000000000000..7ab926c5afbd4cc12a4c3ed38e853a117fc9ca49
--- /dev/null
+++ b/src/app/api/providers/openai/predictWithOpenAI.ts
@@ -0,0 +1,44 @@
+"use server"
+
+import { OpenAI } from "openai"
+import { ChatCompletionMessageParam } from "openai/resources"
+
+import { LLMPredictionFunctionParams } from "../types"
+
+export async function predict({
+ systemPrompt,
+ userPrompt,
+ nbMaxNewTokens,
+}: LLMPredictionFunctionParams): Promise {
+ const openaiApiKey = `${process.env.AUTH_OPENAI_API_KEY || ""}`
+ const openaiApiModel = "gpt-4-turbo"
+ const openaiApiBaseUrl = "https://api.openai.com/v1"
+ if (!openaiApiKey) { throw new Error(`missing OpenAI API key`) }
+
+ const openai = new OpenAI({
+ apiKey: openaiApiKey,
+ baseURL: openaiApiBaseUrl,
+ })
+
+ const messages: ChatCompletionMessageParam[] = [
+ { role: "system", content: systemPrompt },
+ { role: "user", content: userPrompt },
+ ]
+
+ try {
+ const res = await openai.chat.completions.create({
+ messages: messages,
+ stream: false,
+ model: openaiApiModel,
+ temperature: 0.8,
+ max_tokens: nbMaxNewTokens,
+
+ // TODO: use the nbPanels to define a max token limit
+ })
+
+ return res.choices[0].message.content || ""
+ } catch (err) {
+ console.error(`error during generation: ${err}`)
+ return ""
+ }
+}
\ No newline at end of file
diff --git a/src/app/api/providers/types.ts b/src/app/api/providers/types.ts
new file mode 100644
index 0000000000000000000000000000000000000000..df2cc659f939619f923f901d3b02449641b42e0e
--- /dev/null
+++ b/src/app/api/providers/types.ts
@@ -0,0 +1,21 @@
+
+// LLMEngine = the actual engine to use (eg. hugging face)
+export type LLMVendor =
+ | "HUGGINGFACE"
+ | "OPENAI"
+ | "GROQ"
+ | "ANTHROPIC"
+
+export type LLMVendorConfig = {
+ vendor: LLMVendor
+ apiKey: string
+ modelId: string
+}
+
+export type LLMPredictionFunctionParams = {
+ systemPrompt: string
+ userPrompt: string
+ nbMaxNewTokens: number
+ prefix?: string
+ // llmVendorConfig: LLMVendorConfig
+}
diff --git a/src/app/api/providers/videochain/renderWithVideoChain.ts b/src/app/api/providers/videochain/renderWithVideoChain.ts
new file mode 100644
index 0000000000000000000000000000000000000000..26534679507f8b8a48bfdd1ef72fd38b9b8864ed
--- /dev/null
+++ b/src/app/api/providers/videochain/renderWithVideoChain.ts
@@ -0,0 +1,153 @@
+"use server"
+
+import { RenderRequest, RenderedScene } from "@/types/general"
+
+// note: there is no / at the end in the variable
+// so we have to add it ourselves if needed
+const apiUrl = `${process.env.VIDEOCHAIN_API_URL || ""}`
+const apiKey = `${process.env.VIDEOCHAIN_API_KEY || ""}`
+
+export async function newRender({
+ prompt,
+ negativePrompt,
+ nbFrames,
+ nbSteps,
+ width,
+ height,
+ turbo,
+ shouldRenewCache,
+ seed,
+}: {
+ prompt: string
+ negativePrompt: string
+ nbFrames: number
+ nbSteps: number
+ width: number
+ height: number
+ turbo: boolean
+ shouldRenewCache: boolean
+ seed?: number
+}) {
+ if (!prompt) {
+ console.error(`cannot call the rendering API without a prompt, aborting..`)
+ throw new Error(`cannot call the rendering API without a prompt, aborting..`)
+ }
+
+ const cacheKey = `render/${JSON.stringify({ prompt })}`
+
+ // return await Gorgon.get(cacheKey, async () => {
+
+ let defaulResult: RenderedScene = {
+ renderId: "",
+ status: "error",
+ assetUrl: "",
+ durationInMs: 0,
+ maskUrl: "",
+ error: "failed to fetch the data",
+ alt: "",
+ segments: []
+ }
+
+ // console.log("fetch api:", `${apiUrl}/render`)
+ try {
+ // console.log(`calling POST ${apiUrl}/render with seed ${seed} and prompt: ${prompt}`)
+
+ const res = await fetch(`${apiUrl}/render`, {
+ method: "POST",
+ headers: {
+ Accept: "application/json",
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${apiKey}`,
+ },
+ body: JSON.stringify({
+ prompt,
+ negativePrompt,
+ // nbFrames: 8 and nbSteps: 15 --> ~10 sec generation
+ nbFrames, // when nbFrames is 1, we will only generate static images
+ nbSteps, // 20 = fast, 30 = better, 50 = best
+ width,
+ height,
+ seed,
+ actionnables: [],
+ segmentation: "disabled", // one day we will remove this param, to make it automatic
+ upscalingFactor: 1, // let's disable upscaling right now
+ turbo, // always use turbo mode (it's for images only anyway)
+ // also what could be done iw that we could use the width and height to control this
+ cache: "ignore", // shouldRenewCache ? "renew" : "use",
+ wait: true,
+ } as Partial),
+ cache: 'no-store',
+ // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
+ // next: { revalidate: 1 }
+ })
+
+ // console.log("res:", res)
+ // The return value is *not* serialized
+ // You can return Date, Map, Set, etc.
+
+ // Recommendation: handle errors
+ if (res.status !== 200) {
+ // This will activate the closest `error.js` Error Boundary
+ throw new Error('Failed to fetch data')
+ }
+
+ const response = (await res.json()) as RenderedScene
+ // console.log("response:", response)
+ return response
+ } catch (err) {
+ // console.error(err)
+ // Gorgon.clear(cacheKey)
+ return defaulResult
+ }
+}
+
+export async function getRender(renderId: string) {
+ if (!renderId) {
+ console.error(`cannot call the rendering API without a renderId, aborting..`)
+ throw new Error(`cannot call the rendering API without a renderId, aborting..`)
+ }
+
+ let defaulResult: RenderedScene = {
+ renderId: "",
+ status: "error",
+ assetUrl: "",
+ durationInMs: 0,
+ maskUrl: "",
+ error: "failed to fetch the data",
+ alt: "",
+ segments: []
+ }
+
+ try {
+ // console.log(`calling GET ${apiUrl}/render with renderId: ${renderId}`)
+ const res = await fetch(`${apiUrl}/render/${renderId}`, {
+ method: "GET",
+ headers: {
+ Accept: "application/json",
+ "Content-Type": "application/json",
+ Authorization: `Bearer ${apiKey}`,
+ },
+ cache: 'no-store',
+ // we can also use this (see https://vercel.com/blog/vercel-cache-api-nextjs-cache)
+ // next: { revalidate: 1 }
+ })
+
+ // console.log("res:", res)
+ // The return value is *not* serialized
+ // You can return Date, Map, Set, etc.
+
+ // Recommendation: handle errors
+ if (res.status !== 200) {
+ // This will activate the closest `error.js` Error Boundary
+ throw new Error('Failed to fetch data')
+ }
+
+ const response = (await res.json()) as RenderedScene
+ // console.log("response:", response)
+ return response
+ } catch (err) {
+ console.error(err)
+ // Gorgon.clear(cacheKey)
+ return defaulResult
+ }
+}
\ No newline at end of file
diff --git a/src/app/api/resolvers/clap/route.ts b/src/app/api/resolvers/clap/route.ts
new file mode 100644
index 0000000000000000000000000000000000000000..f518adbfadb1548c4f080fb0a1aaddcad3353c1f
--- /dev/null
+++ b/src/app/api/resolvers/clap/route.ts
@@ -0,0 +1,24 @@
+import { NextResponse, NextRequest } from "next/server"
+import queryString from "query-string"
+import { generateClap } from "../../generators/clap/generateClap"
+
+export async function GET(req: NextRequest) {
+
+const qs = queryString.parseUrl(req.url || "")
+const query = (qs || {}).query
+
+let prompt = ""
+ try {
+ prompt = decodeURIComponent(query?.p?.toString() || "").trim()
+ } catch (err) {}
+ if (!prompt) {
+ return NextResponse.json({ error: 'no prompt provided' }, { status: 400 });
+ }
+
+ const blob = await generateClap({ prompt })
+
+ return new NextResponse(blob, {
+ status: 200,
+ headers: new Headers({ "content-type": "application/x-gzip" }),
+ })
+}
diff --git a/src/app/api/resolvers/image/route.ts b/src/app/api/resolvers/image/route.ts
new file mode 100644
index 0000000000000000000000000000000000000000..a5d8923c919e44eb99125cf331cd12d100e7ea9f
--- /dev/null
+++ b/src/app/api/resolvers/image/route.ts
@@ -0,0 +1,65 @@
+import { NextResponse, NextRequest } from "next/server"
+import queryString from "query-string"
+
+import { newRender, getRender } from "../../providers/videochain/renderWithVideoChain"
+import { generateSeed } from "@/lib/utils/generateSeed"
+import { sleep } from "@/lib/utils/sleep"
+import { getContentType } from "@/lib/data/getContentType"
+
+export async function GET(req: NextRequest) {
+
+const qs = queryString.parseUrl(req.url || "")
+const query = (qs || {}).query
+
+let prompt = ""
+ try {
+ prompt = decodeURIComponent(query?.p?.toString() || "").trim()
+ } catch (err) {}
+ if (!prompt) {
+ return NextResponse.json({ error: 'no prompt provided' }, { status: 400 });
+ }
+
+ // console.log("calling await newRender")
+
+ let render = await newRender({
+ prompt,
+ negativePrompt: "blurry, cropped, bad quality",
+ nbFrames: 1,
+ nbSteps: 8,
+ width: 1024,
+ height: 576,
+ turbo: true,
+ shouldRenewCache: true,
+ seed: generateSeed()
+ })
+
+ let attempts = 10
+
+ while (attempts-- > 0) {
+ if (render.status === "completed") {
+ return NextResponse.json(render, {
+ status: 200,
+ statusText: "OK",
+ })
+
+ }
+
+ if (render.status === "error") {
+ return NextResponse.json(render, {
+ status: 200,
+ statusText: "OK",
+ })
+ }
+
+ await sleep(1000) // minimum wait time
+
+ console.log("asking getRender")
+ render = await getRender(render.renderId)
+ }
+
+ return NextResponse.json({
+ "error": "failed to call VideoChain (timeout expired)"
+ }, {
+ status: 500,
+ })
+}
diff --git a/src/app/api/resolvers/interface/route.ts b/src/app/api/resolvers/interface/route.ts
new file mode 100644
index 0000000000000000000000000000000000000000..aa907540481ccfc9e066439a14d4350abaa3c925
--- /dev/null
+++ b/src/app/api/resolvers/interface/route.ts
@@ -0,0 +1,36 @@
+import { NextResponse, NextRequest } from "next/server"
+import queryString from "query-string"
+import { predict } from "../../providers/huggingface/predictWithHuggingFace"
+import { systemPrompt } from "./systemPrompt"
+
+export async function GET(req: NextRequest) {
+
+const qs = queryString.parseUrl(req.url || "")
+const query = (qs || {}).query
+
+let prompt = ""
+ try {
+ prompt = decodeURIComponent(query?.p?.toString() || "").trim()
+ } catch (err) {}
+ if (!prompt) {
+ return NextResponse.json({ error: 'no prompt provided' }, { status: 400 });
+ }
+
+ const userPrompt = `HTML snippet to generate: ${prompt}`
+
+ const html = await predict({
+ systemPrompt,
+ userPrompt,
+ nbMaxNewTokens: 400,
+ prefix: "