jbilcke-hf HF staff commited on
Commit
3b780fb
1 Parent(s): 8c7d08c
Dockerfile CHANGED
@@ -2,8 +2,20 @@ FROM node:20-alpine AS base
2
 
3
  # Install dependencies only when needed
4
  FROM base AS deps
 
5
  # Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
6
  RUN apk add --no-cache libc6-compat
 
 
 
 
 
 
 
 
 
 
 
7
  WORKDIR /app
8
 
9
  # Install dependencies based on the preferred package manager
 
2
 
3
  # Install dependencies only when needed
4
  FROM base AS deps
5
+
6
  # Check https://github.com/nodejs/docker-node/tree/b4117f9333da4138b03a546ec926ef50a31506c3#nodealpine to understand why libc6-compat might be needed.
7
  RUN apk add --no-cache libc6-compat
8
+
9
+ # for dev mode
10
+ RUN apk add git git-lfs procps htop vim nano
11
+
12
+ RUN apk add alpine-sdk pkgconfig
13
+
14
+ # For FFMPEG and gl concat
15
+ RUN apk add curl python3 python3-dev libx11-dev libsm-dev libxrender libxext-dev mesa-dev xvfb libxi-dev glew-dev
16
+
17
+ RUN apk add ffmpeg
18
+
19
  WORKDIR /app
20
 
21
  # Install dependencies based on the preferred package manager
package-lock.json CHANGED
@@ -66,6 +66,7 @@
66
  "gsplat": "^1.2.4",
67
  "hash-wasm": "^4.11.0",
68
  "jose": "^5.2.4",
 
69
  "lodash.debounce": "^4.0.8",
70
  "lucide-react": "^0.260.0",
71
  "markdown-yaml-metadata-parser": "^3.0.0",
@@ -6087,6 +6088,11 @@
6087
  "url": "https://github.com/sponsors/panva"
6088
  }
6089
  },
 
 
 
 
 
6090
  "node_modules/js-sha3": {
6091
  "version": "0.8.0",
6092
  "resolved": "https://registry.npmjs.org/js-sha3/-/js-sha3-0.8.0.tgz",
 
66
  "gsplat": "^1.2.4",
67
  "hash-wasm": "^4.11.0",
68
  "jose": "^5.2.4",
69
+ "js-base64": "^3.7.7",
70
  "lodash.debounce": "^4.0.8",
71
  "lucide-react": "^0.260.0",
72
  "markdown-yaml-metadata-parser": "^3.0.0",
 
6088
  "url": "https://github.com/sponsors/panva"
6089
  }
6090
  },
6091
+ "node_modules/js-base64": {
6092
+ "version": "3.7.7",
6093
+ "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-3.7.7.tgz",
6094
+ "integrity": "sha512-7rCnleh0z2CkXhH67J8K1Ytz0b2Y+yxTPL+/KOJoa20hfnVQ/3/T6W/KflYI4bRHRagNeXeU2bkNGI3v1oS/lw=="
6095
+ },
6096
  "node_modules/js-sha3": {
6097
  "version": "0.8.0",
6098
  "resolved": "https://registry.npmjs.org/js-sha3/-/js-sha3-0.8.0.tgz",
package.json CHANGED
@@ -67,6 +67,7 @@
67
  "gsplat": "^1.2.4",
68
  "hash-wasm": "^4.11.0",
69
  "jose": "^5.2.4",
 
70
  "lodash.debounce": "^4.0.8",
71
  "lucide-react": "^0.260.0",
72
  "markdown-yaml-metadata-parser": "^3.0.0",
 
67
  "gsplat": "^1.2.4",
68
  "hash-wasm": "^4.11.0",
69
  "jose": "^5.2.4",
70
+ "js-base64": "^3.7.7",
71
  "lodash.debounce": "^4.0.8",
72
  "lucide-react": "^0.260.0",
73
  "markdown-yaml-metadata-parser": "^3.0.0",
src/app/api/generators/search/getNewMediaInfo.ts CHANGED
@@ -80,11 +80,9 @@ export function getNewMediaInfo(params: Partial<MediaInfo> = {}): MediaInfo {
80
  updatedAt: new Date().toISOString(),
81
 
82
  /**
83
- * Arbotrary string tags to label the content
84
  */
85
- tags: Array.isArray(params.tags) ? [
86
- ...params.tags,
87
- ] : [],
88
 
89
  /**
90
  * Model name
@@ -139,7 +137,9 @@ export function getNewMediaInfo(params: Partial<MediaInfo> = {}): MediaInfo {
139
  /**
140
  * Media projection (cartesian by default)
141
  */
142
- projection: "latent"
 
 
143
  }
144
 
145
  return mediaInfo
 
80
  updatedAt: new Date().toISOString(),
81
 
82
  /**
83
+ * Arbitrary string tags to label the content
84
  */
85
+ tags: [],
 
 
86
 
87
  /**
88
  * Model name
 
137
  /**
138
  * Media projection (cartesian by default)
139
  */
140
+ projection: "latent",
141
+
142
+ ...params,
143
  }
144
 
145
  return mediaInfo
src/app/api/providers/huggingface/predictWithHuggingFace.ts CHANGED
@@ -31,7 +31,7 @@ export async function predict({
31
  }
32
  })) {
33
  instructions += output.token.text
34
- // process.stdout.write(output.token.text)
35
  if (
36
  instructions.includes("</s>") ||
37
  instructions.includes("<s>") ||
 
31
  }
32
  })) {
33
  instructions += output.token.text
34
+ process.stdout.write(output.token.text)
35
  if (
36
  instructions.includes("</s>") ||
37
  instructions.includes("<s>") ||
src/app/api/utils/parseRawStringToYAML.ts CHANGED
@@ -8,8 +8,9 @@ export function parseRawStringToYAML<T>(input: any, defaultValue: T) {
8
  .replaceAll("```yaml\n", "")
9
  .replaceAll("```yaml", "")
10
 
11
- // we remove everything after the last ```
12
- rawString = rawString.split('```')[0].trim()
 
13
 
14
  const something: any = YAML.parse(rawString)
15
 
 
8
  .replaceAll("```yaml\n", "")
9
  .replaceAll("```yaml", "")
10
 
11
+ // we remove everything after the last ``` (or ``)
12
+ rawString = rawString.split(/```?/)[0].trim()
13
+
14
 
15
  const something: any = YAML.parse(rawString)
16
 
src/app/api/v1/create/route.ts CHANGED
@@ -25,7 +25,7 @@ export async function POST(req: NextRequest) {
25
 
26
  const prompt = `${request?.prompt || ""}`.trim()
27
 
28
- console.log("[api/generate/story] request:", request)
29
 
30
  if (!prompt.length) { throw new Error(`please provide a prompt`) }
31
 
@@ -46,11 +46,11 @@ export async function POST(req: NextRequest) {
46
  prefix: "```yaml\n",
47
  })
48
 
49
- console.log("[api/generate/story] rawString: ", rawString)
50
 
51
  const shots = parseRawStringToYAML<LatentStory[]>(rawString, [])
52
 
53
- console.log(`[api/generate/story] generated ${shots.length} shots`)
54
 
55
  // this is approximate - TTS generation will determine the final duration of each shot
56
  const defaultSegmentDurationInMs = 7000
@@ -76,7 +76,7 @@ export async function POST(req: NextRequest) {
76
 
77
  for (const { title, image, voice } of shots) {
78
 
79
- console.log(`[api/generate/story] - ${title}`)
80
 
81
  // note: it would be nice if we could have a convention saying that
82
  // track 0 is for videos and track 1 storyboards
 
25
 
26
  const prompt = `${request?.prompt || ""}`.trim()
27
 
28
+ console.log("[api/v1/create] request:", request)
29
 
30
  if (!prompt.length) { throw new Error(`please provide a prompt`) }
31
 
 
46
  prefix: "```yaml\n",
47
  })
48
 
49
+ console.log("[api/v1/create] rawString: ", rawString)
50
 
51
  const shots = parseRawStringToYAML<LatentStory[]>(rawString, [])
52
 
53
+ console.log(`[api/v1/create] generated ${shots.length} shots`)
54
 
55
  // this is approximate - TTS generation will determine the final duration of each shot
56
  const defaultSegmentDurationInMs = 7000
 
76
 
77
  for (const { title, image, voice } of shots) {
78
 
79
+ console.log(`[api/v1/create] - ${title}`)
80
 
81
  // note: it would be nice if we could have a convention saying that
82
  // track 0 is for videos and track 1 storyboards
src/app/api/v1/create/systemPrompt.ts CHANGED
@@ -35,5 +35,5 @@ but if the user asks for large numbers, it should be ignored (our limit is 32).
35
  voice: "Now my dog is eating my birtday cake. Please send help."
36
  \`\`\
37
 
38
- # You turn!
39
  `
 
35
  voice: "Now my dog is eating my birtday cake. Please send help."
36
  \`\`\
37
 
38
+ # Your turn:
39
  `
src/app/api/v1/create/userPrompt.ts DELETED
File without changes
src/app/api/v1/edit/dialogues/processShot.ts CHANGED
@@ -40,14 +40,16 @@ export async function processShot({
40
 
41
  const { durationInMs, durationInSec, hasAudio } = await getMediaInfo(shotDialogueSegment.assetUrl)
42
 
43
- shotDialogueSegment.assetDurationInMs = durationInMs
44
- shotSegment.assetDurationInMs = durationInMs
45
-
46
- // we update the duration of all the segments for this shot
47
- // (it is possible that this makes the two previous lines redundant)
48
- clap.segments.filter(s => {
49
- s.assetDurationInMs = durationInMs
50
- })
 
 
51
 
52
  } catch (err) {
53
  console.log(`[api/generate/dialogues] processShot: failed to generate audio: ${err}`)
 
40
 
41
  const { durationInMs, durationInSec, hasAudio } = await getMediaInfo(shotDialogueSegment.assetUrl)
42
 
43
+ if (hasAudio && durationInMs > 1000) {
44
+ shotDialogueSegment.assetDurationInMs = durationInMs
45
+ shotSegment.assetDurationInMs = durationInMs
46
+
47
+ // we update the duration of all the segments for this shot
48
+ // (it is possible that this makes the two previous lines redundant)
49
+ clap.segments.filter(s => {
50
+ s.assetDurationInMs = durationInMs
51
+ })
52
+ }
53
 
54
  } catch (err) {
55
  console.log(`[api/generate/dialogues] processShot: failed to generate audio: ${err}`)
src/app/api/v1/edit/storyboards/processShot.ts CHANGED
@@ -41,7 +41,7 @@ export async function processShot({
41
  clap.segments.push(shotStoryboardSegment)
42
  }
43
 
44
- console.log(`[api/generate/storyboards] processShot: generated storyboard segment [${shotSegment.startTimeInMs}:${shotSegment.endTimeInMs}]`)
45
  }
46
  if (!shotStoryboardSegment) { throw new Error(`failed to generate a newSegment`) }
47
 
@@ -49,12 +49,12 @@ export async function processShot({
49
  if (!shotStoryboardSegment?.prompt) {
50
  // storyboard is missing, let's generate it
51
  shotStoryboardSegment.prompt = getVideoPrompt(shotSegments, clap.entityIndex, ["high quality", "crisp", "detailed"])
52
- console.log(`[api/generate/storyboards] processShot: generating storyboard prompt: ${shotStoryboardSegment.prompt}`)
53
  }
54
 
55
  // TASK 3: GENERATE MISSING STORYBOARD BITMAP
56
  if (!shotStoryboardSegment.assetUrl) {
57
- // console.log(`[api/generate/storyboards] generating image..`)
58
 
59
  try {
60
  shotStoryboardSegment.assetUrl = await generateStoryboard({
@@ -64,13 +64,13 @@ export async function processShot({
64
  })
65
  shotStoryboardSegment.assetSourceType = getClapAssetSourceType(shotStoryboardSegment.assetUrl)
66
  } catch (err) {
67
- console.log(`[api/generate/storyboards] processShot: failed to generate an image: ${err}`)
68
  throw err
69
  }
70
 
71
- console.log(`[api/generate/storyboards] processShot: generated storyboard image: ${shotStoryboardSegment?.assetUrl?.slice?.(0, 50)}...`)
72
  } else {
73
- console.log(`[api/generate/storyboards] processShot: there is already a storyboard image: ${shotStoryboardSegment?.assetUrl?.slice?.(0, 50)}...`)
74
  }
75
 
76
  }
 
41
  clap.segments.push(shotStoryboardSegment)
42
  }
43
 
44
+ console.log(`[api/v1/edit/storyboards] processShot: generated storyboard segment [${shotSegment.startTimeInMs}:${shotSegment.endTimeInMs}]`)
45
  }
46
  if (!shotStoryboardSegment) { throw new Error(`failed to generate a newSegment`) }
47
 
 
49
  if (!shotStoryboardSegment?.prompt) {
50
  // storyboard is missing, let's generate it
51
  shotStoryboardSegment.prompt = getVideoPrompt(shotSegments, clap.entityIndex, ["high quality", "crisp", "detailed"])
52
+ console.log(`[api/v1/edit/storyboards] processShot: generating storyboard prompt: ${shotStoryboardSegment.prompt}`)
53
  }
54
 
55
  // TASK 3: GENERATE MISSING STORYBOARD BITMAP
56
  if (!shotStoryboardSegment.assetUrl) {
57
+ // console.log(`[api/v1/edit/storyboards] generating image..`)
58
 
59
  try {
60
  shotStoryboardSegment.assetUrl = await generateStoryboard({
 
64
  })
65
  shotStoryboardSegment.assetSourceType = getClapAssetSourceType(shotStoryboardSegment.assetUrl)
66
  } catch (err) {
67
+ console.log(`[api/v1/edit/storyboards] processShot: failed to generate an image: ${err}`)
68
  throw err
69
  }
70
 
71
+ console.log(`[api/v1/edit/storyboards] processShot: generated storyboard image: ${shotStoryboardSegment?.assetUrl?.slice?.(0, 50)}...`)
72
  } else {
73
+ console.log(`[api/v1/edit/storyboards] processShot: there is already a storyboard image: ${shotStoryboardSegment?.assetUrl?.slice?.(0, 50)}...`)
74
  }
75
 
76
  }
src/app/api/v1/edit/storyboards/route.ts CHANGED
@@ -23,10 +23,10 @@ export async function POST(req: NextRequest) {
23
 
24
  if (!clap?.segments) { throw new Error(`no segment found in the provided clap!`) }
25
 
26
- console.log(`[api/generate/storyboards] detected ${clap.segments.length} segments`)
27
 
28
  const shotsSegments: ClapSegment[] = clap.segments.filter(s => s.category === "camera")
29
- console.log(`[api/generate/storyboards] detected ${shotsSegments.length} shots`)
30
 
31
  if (shotsSegments.length > 32) {
32
  throw new Error(`Error, this endpoint being synchronous, it is designed for short stories only (max 32 shots).`)
@@ -40,7 +40,7 @@ export async function POST(req: NextRequest) {
40
  })
41
  ))
42
 
43
- // console.log(`[api/generate/storyboards] returning the clap augmented with storyboards`)
44
 
45
  return new NextResponse(await serializeClap(clap), {
46
  status: 200,
 
23
 
24
  if (!clap?.segments) { throw new Error(`no segment found in the provided clap!`) }
25
 
26
+ console.log(`[api/v1/edit/storyboards] detected ${clap.segments.length} segments`)
27
 
28
  const shotsSegments: ClapSegment[] = clap.segments.filter(s => s.category === "camera")
29
+ console.log(`[api/v1/edit/storyboards] detected ${shotsSegments.length} shots`)
30
 
31
  if (shotsSegments.length > 32) {
32
  throw new Error(`Error, this endpoint being synchronous, it is designed for short stories only (max 32 shots).`)
 
40
  })
41
  ))
42
 
43
+ // console.log(`[api/v1/edit/storyboards] returning the clap augmented with storyboards`)
44
 
45
  return new NextResponse(await serializeClap(clap), {
46
  status: 200,
src/app/api/v1/search/index.ts ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "use server"
2
+
3
+ import YAML from "yaml"
4
+
5
+ import { predict } from "@/app/api/providers/huggingface/predictWithHuggingFace"
6
+ import { parseRawStringToYAML } from "@/app/api/utils/parseRawStringToYAML"
7
+
8
+ import { systemPromptForBasicSearchResults, systemPromptForExtendedSearchResults } from "./systemPrompt"
9
+ import type { BasicSearchResult, ExtendedSearchResult } from "./types"
10
+
11
+ export async function search({
12
+ prompt = "",
13
+ nbResults = 4
14
+ }: {
15
+ prompt: string
16
+ nbResults: number
17
+ }): Promise<BasicSearchResult[]> {
18
+ const userPrompt = `${
19
+ Math.max(1, Math.min(8, nbResults))
20
+ } search results about: ${
21
+ prompt || "various trending genres"
22
+ }`
23
+
24
+ // TODO use streaming for the Hugging Face prediction
25
+ const rawString = await predict({
26
+ systemPrompt: systemPromptForBasicSearchResults,
27
+ userPrompt,
28
+ nbMaxNewTokens: nbResults * 80,
29
+ prefix: "```yaml\n",
30
+ })
31
+
32
+ console.log("rawString: ", rawString)
33
+
34
+ const results = parseRawStringToYAML<BasicSearchResult[]>(rawString, [])
35
+
36
+ return results
37
+ }
38
+
39
+ export async function extend({
40
+ basicResults = [],
41
+ }: {
42
+ basicResults: BasicSearchResult[]
43
+ }): Promise<ExtendedSearchResult[]> {
44
+ const userPrompt = YAML.stringify(basicResults)
45
+
46
+
47
+ // TODO use streaming for the Hugging Face prediction
48
+ const rawString = await predict({
49
+ systemPrompt: systemPromptForExtendedSearchResults,
50
+ userPrompt,
51
+ nbMaxNewTokens: basicResults.length * 200,
52
+ prefix: "```yaml\n",
53
+ })
54
+
55
+ console.log("rawString: ", rawString)
56
+
57
+ const results = parseRawStringToYAML<ExtendedSearchResult[]>(rawString, [])
58
+
59
+ return results
60
+ }
src/app/api/v1/search/route.ts ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { NextResponse, NextRequest } from "next/server"
2
+ import queryString from "query-string"
3
+ import { BasicSearchResult, ExtendedSearchResult } from "./types"
4
+ import { extend, search } from "."
5
+
6
+ export type LatentSearchMode =
7
+ | "basic"
8
+ | "extended"
9
+
10
+ // we hide/wrap the micro-service under a unified AiTube API
11
+ export async function GET(req: NextRequest, res: NextResponse) {
12
+
13
+ const qs = queryString.parseUrl(req.url || "")
14
+ const query = (qs || {}).query
15
+
16
+ let mode: LatentSearchMode = "basic"
17
+ try {
18
+ mode = decodeURIComponent(query?.m?.toString() || "basic").trim() as LatentSearchMode
19
+ } catch (err) {}
20
+
21
+
22
+ if (mode === "basic") {
23
+ let prompt = ""
24
+ try {
25
+ prompt = decodeURIComponent(query?.p?.toString() || "").trim() as string
26
+ } catch (err) {}
27
+
28
+ const basicSearchResults: BasicSearchResult[] = await search({
29
+ prompt,
30
+ nbResults: 4
31
+ })
32
+
33
+ console.log(`[api/v1/search] found ${basicSearchResults.length} basic search results`)
34
+ console.log(`[api/v1/search]`, basicSearchResults)
35
+
36
+ return NextResponse.json(basicSearchResults, {
37
+ status: 200,
38
+ statusText: "OK",
39
+ })
40
+ } else if (mode === "extended") {
41
+
42
+ let basicResults: BasicSearchResult[] = []
43
+ try {
44
+ const rawString = decodeURIComponent(query?.e?.toString() || "").trim() as string
45
+ const maybeExistingResults = JSON.parse(rawString)
46
+ if (Array.isArray(maybeExistingResults)) {
47
+ basicResults = maybeExistingResults
48
+ }
49
+ } catch (err) {}
50
+
51
+ const extendedSearchResults: ExtendedSearchResult[] = await extend({
52
+ basicResults
53
+ })
54
+
55
+ console.log(`[api/v1/search] extended ${extendedSearchResults.length} search results`)
56
+
57
+ console.log(`[api/v1/search]`, extendedSearchResults)
58
+
59
+ return NextResponse.json(extendedSearchResults, {
60
+ status: 200,
61
+ statusText: "OK",
62
+ })
63
+ } else {
64
+ /*
65
+ return NextResponse.json([], {
66
+ status: 200,
67
+ statusText: "OK",
68
+ })
69
+ */
70
+ throw new Error(`Please specify the mode.`)
71
+ }
72
+ }
src/app/api/v1/search/systemPrompt.ts ADDED
@@ -0,0 +1,92 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export const systemPromptForBasicSearchResults: string =
2
+ `# Context
3
+ You are a server-side function generating search results from a single text input (a "prompt").
4
+ You have a database of new, never-seen-before videos, series, tv shows, movies, documentaries, music videos, found footage, news videos, videos created by influencers, game review, trailers of all kind, tutorials, education videos, user-made short videos etc.. anything you could find on both Netflix or YouTube, really.
5
+ You do not contain any existing IP (intellectual property) content.
6
+
7
+ # Task
8
+ Your mission is to generate a sequence of search results that closely match the demand by the user.
9
+
10
+ You will be provided a "prompt" (for the story) and max number of results.
11
+
12
+ Each result object is composed of:
13
+ - title: title of the video
14
+ - tags: to describe the categories and genres
15
+ # Examples
16
+
17
+ You most reply by writing/completing a YAML list of objects.
18
+ Here is a short example, the prompt was "4 search results about: cows"
19
+ Note how we asked for "4 search results" for previty, but it's possible that it will omitted. You should return 5 results by default.
20
+ but if the user asks for large numbers, it should be ignored (our limit is 8).
21
+ If the user ask for a specific cateogry eg "<something> recipes" obviously you should only show recipes to cook the thing et.
22
+ Try to be imaginative!
23
+
24
+ \`\`\`
25
+ - title: "The Cows"
26
+ tags: ["feature film", "drama", "coming of age"]
27
+ - title: "Happy cows 🐮 mooing and grazing 🌱"
28
+ tags: ["short video", "nature", "community"]
29
+ - title: "Ganja Dog - Got Milk?"
30
+ tags: ["music", "music video", "hip-hop"]
31
+ - title: "How Cows Work"
32
+ tags: ["short video", "education", "influencer"]
33
+ \`\`\
34
+
35
+ # Your turn:
36
+ `
37
+
38
+ export const systemPromptForExtendedSearchResults: string =
39
+ `# Context
40
+ You are a server-side function generating search results from partial results.
41
+ You have a database of new, never-seen-before videos, series, tv shows, movies, documentaries, music videos, found footage, news videos, videos created by influencers, game review, trailers of all kind, tutorials, education videos, user-made short videos etc.. anything you could find on both Netflix or YouTube, really.
42
+ You do not contain any existing IP (intellectual property) content.
43
+
44
+ # Task
45
+ Your mission is to generate a sequence of search results that best complements and extends the partial results.
46
+
47
+ You will be provided those partial results as YAML, and you need to return YAML.
48
+
49
+ An extended search result should contain those fields:
50
+ - title: title of the video
51
+ - cover: you must describe it using a Stable Diffusion prompt - about ~300 characters - using simple descriptive words and adjectives. Describe facts about characters, location, lights, texture, camera orientation, colors, clothes, movements etc. But don't give your opinion, don't talk about the emotions it evokes etc.
52
+ - description: in 2 or 3 sentences please describe the genre, category, visual style, synopsis (You must only describe the content, so don't add any info about the director, author, release year.)
53
+ - tags: to describe the categories and genres
54
+ # Examples
55
+
56
+ You most reply by writing/completing a YAML list of objects.
57
+ Here is a short example, using this data as input:
58
+
59
+ \`\`\`
60
+ - title: "The Cows"
61
+ tags: ["feature film", "drama", "coming of age"]
62
+ - title: "Happy cows 🐮 mooing and grazing 🌱"
63
+ tags: ["short video", "nature", "community"]
64
+ - title: "Ganja Dog - Got Milk?"
65
+ tags: ["music", "music video", "hip-hop"]
66
+ - title: "How Cows Work"
67
+ tags: ["short video", "education", "influencer"]
68
+ \`\`\
69
+
70
+ And here is one of the many possibilities:
71
+
72
+ \`\`\`
73
+ - title: "The Cows"
74
+ description: "a drama about Pete, young man growing up in a farm. His mom died, and his dad (who is sick) expects him to take over the cow farm, but Pete doesn't want to, and dreams of a different life, in the city. He often goes to the city at night, meet girls, but when he comes back late, and unable to work properly in the morning (when he makes mistake, forget to feed the cows etc) his dad grows angrier. Pete doesn't know his dad is sick, though. Near the end, the dad dies, and Pete will have to make some difficult life-changing decisions."
75
+ cover: "poster of a movie called “The Cows”, mysterious image of a farmer, alone in a grass field, in the morning, cows in the background, sad, angry, mist, award-winning, film poster, movie poster"
76
+ tags: ["feature film", "drama", "coming of age"]
77
+ - title: "Happy cows 🐮 mooing and grazing 🌱"
78
+ description: various 4K footage of cows grazing, walking, mooing, in other words being happy.
79
+ cover: "well-lit photo of cows, grazing in a field, peaceful, instagram, award winning"
80
+ tags: ["short video", "nature", "community"]
81
+ - title: "Ganja Dog - Got Milk?"
82
+ description: "a music video by Ganja Dog, a dog who is a famous rapper and influencer. The clip is shot in a farm, with Ganja Dog rapping and dancing, showing off its cars, clothes, with various people. There are various dramatic camera effects. The lyrics should be funny eg “Witch, I'm a Cow, I go Moo..”, “I'm In the Mood For Some Milk Yo” etc"
83
+ cover: "medium-shot of a dog rapper, showing off his dollars, in a cow farm, rapping, rich, cows, dogs, puppies, trending, influencer, dramatic, music video, rapping"
84
+ tags: ["music", "music video", "hip-hop"]
85
+ - title: "How Cows Work"
86
+ description: "a video explaining how cows work, made using cheap stock footage and an AI voice-over. It should show the history of cows, how they evolved, why human use them, how a farm work, how grass gets turned into milk, differences in breeds, the economics etc"
87
+ cover: "picture of a cow, drawn like a blueprint, cow factory, cow machine, clickbait youtube thumbnail made by an influencer, overly dramatic, unrealistic, fake, dramatic, overplayed, too much"
88
+ tags: ["short video", "education", "influencer"]
89
+ \`\`\
90
+
91
+ # Your turn:
92
+ `
src/app/api/v1/search/types.ts ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ // for latent search we need to get rough results fast
3
+ export type BasicSearchResult = {
4
+ // note: the absence of ID means this is a new, latent result
5
+ id?: string
6
+ title: string
7
+ tags: string[]
8
+ }
9
+
10
+ export type ExtendedSearchResult = {
11
+ id?: string
12
+ title: string
13
+ description: string
14
+ cover: string
15
+ tags: string[]
16
+ }
src/app/{dream → latent}/embed/page.tsx RENAMED
File without changes
src/app/{dream → latent}/samples.tsx RENAMED
File without changes
src/app/{dream/spoiler.tsx → latent/search/page.tsx} RENAMED
@@ -1,13 +1,15 @@
1
- import { LatentQueryProps } from "@/types/general"
2
  import { clapToDataUri, generateClapFromSimpleStory } from "@aitube/clap"
3
 
4
- import { Main } from "../main"
5
- import { getNewMediaInfo } from "../api/generators/search/getNewMediaInfo"
6
- import { getToken } from "../api/auth/getToken"
 
 
7
 
8
  // https://jmswrnr.com/blog/protecting-next-js-api-routes-query-parameters
9
 
10
- export default async function DreamPage({
11
  searchParams: {
12
  l: latentContent,
13
  },
@@ -15,7 +17,8 @@ export default async function DreamPage({
15
  }: LatentQueryProps) {
16
  const jwtToken = await getToken({ user: "anonymous" })
17
 
18
- // const latentSearchResult = JSON.parse(atob(`${latentContent}`)) as LatentSearchResult
 
19
 
20
  // this will hallucinate the thumbnail on the fly - maybe we should cache it
21
  // const latentMedia = await searchResultToMediaInfo(latentSearchResult)
 
1
+ import { encode, decode } from 'js-base64'
2
  import { clapToDataUri, generateClapFromSimpleStory } from "@aitube/clap"
3
 
4
+ import { LatentQueryProps } from "@/types/general"
5
+
6
+ import { Main } from "../../main"
7
+ import { getNewMediaInfo } from "../../api/generators/search/getNewMediaInfo"
8
+ import { getToken } from "../../api/auth/getToken"
9
 
10
  // https://jmswrnr.com/blog/protecting-next-js-api-routes-query-parameters
11
 
12
+ export default async function LatentSearchPage({
13
  searchParams: {
14
  l: latentContent,
15
  },
 
17
  }: LatentQueryProps) {
18
  const jwtToken = await getToken({ user: "anonymous" })
19
 
20
+
21
+ // const latentSearchResult = JSON.parse(decodee(`${latentContent}`)) as LatentSearchResult
22
 
23
  // this will hallucinate the thumbnail on the fly - maybe we should cache it
24
  // const latentMedia = await searchResultToMediaInfo(latentSearchResult)
src/app/latent/watch/page.tsx ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { encode, decode } from 'js-base64'
2
+
3
+ import { LatentQueryProps } from "@/types/general"
4
+ import { BasicSearchResult, ExtendedSearchResult } from "@/app/api/v1/search/types"
5
+
6
+ import { Main } from "../../main"
7
+ import { getNewMediaInfo } from "../../api/generators/search/getNewMediaInfo"
8
+ import { getToken } from "../../api/auth/getToken"
9
+
10
+ import { extend } from "@/app/api/v1/search"
11
+
12
+ // https://jmswrnr.com/blog/protecting-next-js-api-routes-query-parameters
13
+
14
+ export default async function DreamPage({
15
+ searchParams: {
16
+ p: prompt,
17
+ },
18
+ ...rest
19
+ }: LatentQueryProps) {
20
+ const jwtToken = await getToken({ user: "anonymous" })
21
+ console.log(`[/latent/watch] prompt =`, prompt)
22
+ let basicResult = JSON.parse(decode(`${prompt || ""}`)) as BasicSearchResult
23
+
24
+ console.log("[/latent/watch] basicResult:", basicResult)
25
+
26
+ // note that we should generate a longer synopsis from the autocomplete result
27
+ //
28
+ // however that is a slow process, maybe not great for a server-side rendering task,
29
+ // so idk
30
+ const extendedResults: ExtendedSearchResult[] = await extend({
31
+ basicResults: [ basicResult ]
32
+ })
33
+ console.log(`[/latent/watch] extendedResults =`, extendedResults)
34
+
35
+ const extendedResult = extendedResults.at(0)
36
+
37
+ if (!extendedResult || !Array.isArray(extendedResult.tags)) {
38
+ console.error(`failed to generated an extended result, aborting`)
39
+ throw new Error(`Server error`)
40
+ }
41
+ // const latentSearchResult = JSON.parse(decode(`${latentContent}`)) as LatentSearchResult
42
+
43
+ // TODO: we should hallucinate the thumbnail at this stage, and on the fly
44
+ // this is useful to do it on the server-side so we can share the link on social media etc
45
+ //
46
+ // maybe we should cache the image
47
+ // const latentMedia = await searchResultToMediaInfo(latentSearchResult)
48
+
49
+ const latentMedia = getNewMediaInfo({
50
+ label: extendedResult.title,
51
+ description: extendedResult.description,
52
+ prompt: extendedResult.description,
53
+ tags: [...extendedResult.tags],
54
+ })
55
+
56
+ console.log(`[/latent/watch] generated media: `, latentMedia)
57
+ // now, generating the .clap is another story, it will be much more intensive
58
+ // so we will generate it later in async, in the client-side
59
+
60
+ return (
61
+ <Main latentMedia={latentMedia} jwtToken={jwtToken} />
62
+ )
63
+ }
src/app/main.tsx CHANGED
@@ -19,6 +19,7 @@ import { PublicMediaEmbedView } from "./views/public-media-embed-view"
19
  import { PublicMediaView } from "./views/public-media-view"
20
  import { PublicLatentMediaEmbedView } from "./views/public-latent-media-embed-view"
21
  import { PublicLatentMediaView } from "./views/public-latent-media-view"
 
22
 
23
  // this is where we transition from the server-side space
24
  // and the client-side space
@@ -80,6 +81,8 @@ export function Main({
80
  const setPublicTracks = useStore(s => s.setPublicTracks)
81
  const setPublicTrack = useStore(s => s.setPublicTrack)
82
 
 
 
83
  useEffect(() => {
84
  if (typeof jwtToken !== "string" && !jwtToken) { return }
85
  setJwtToken(jwtToken)
@@ -153,13 +156,13 @@ export function Main({
153
  console.log(latentMedia)
154
  setPublicLatentMedia(latentMedia)
155
  if (!latentMedia || !latentMedia?.id) { return }
156
- if (pathname === "/dream/embed") { return }
157
- if (pathname !== "/dream") {
158
  // console.log("we are on huggingface apparently!")
159
  // router.replace(`/watch?v=${publicMedia.id}`)
160
 
161
  // TODO: add params in the URL to represent the latent result
162
- router.replace(`/dream`)
163
  }
164
  }, [latentMedia?.id])
165
 
@@ -196,6 +199,7 @@ export function Main({
196
  {view === "public_media" && <PublicMediaView />}
197
 
198
  {/* latent content is the content that "doesn't exist" (is generated by the AI) */}
 
199
  {view === "public_latent_media_embed" && <PublicLatentMediaEmbedView />}
200
  {view === "public_latent_media" && <PublicLatentMediaView />}
201
 
 
19
  import { PublicMediaView } from "./views/public-media-view"
20
  import { PublicLatentMediaEmbedView } from "./views/public-latent-media-embed-view"
21
  import { PublicLatentMediaView } from "./views/public-latent-media-view"
22
+ import { PublicLatentSearchView } from "./views/public-latent-search-view"
23
 
24
  // this is where we transition from the server-side space
25
  // and the client-side space
 
81
  const setPublicTracks = useStore(s => s.setPublicTracks)
82
  const setPublicTrack = useStore(s => s.setPublicTrack)
83
 
84
+ console.log("[main.tsx] latentMedia = ", latentMedia)
85
+
86
  useEffect(() => {
87
  if (typeof jwtToken !== "string" && !jwtToken) { return }
88
  setJwtToken(jwtToken)
 
156
  console.log(latentMedia)
157
  setPublicLatentMedia(latentMedia)
158
  if (!latentMedia || !latentMedia?.id) { return }
159
+ if (pathname === "/latent/embed") { return }
160
+ if (pathname !== "/latent/watch") {
161
  // console.log("we are on huggingface apparently!")
162
  // router.replace(`/watch?v=${publicMedia.id}`)
163
 
164
  // TODO: add params in the URL to represent the latent result
165
+ router.replace(`/latent/watch`)
166
  }
167
  }, [latentMedia?.id])
168
 
 
199
  {view === "public_media" && <PublicMediaView />}
200
 
201
  {/* latent content is the content that "doesn't exist" (is generated by the AI) */}
202
+ {view === "public_latent_search" && <PublicLatentSearchView />}
203
  {view === "public_latent_media_embed" && <PublicLatentMediaEmbedView />}
204
  {view === "public_latent_media" && <PublicLatentMediaView />}
205
 
src/app/state/useStore.ts CHANGED
@@ -3,6 +3,7 @@
3
  import { create } from "zustand"
4
 
5
  import { ChannelInfo, MediaInfo, InterfaceDisplayMode, InterfaceView, InterfaceMenuMode, InterfaceHeaderMode, CommentInfo, UserInfo } from "@/types/general"
 
6
 
7
  export const useStore = create<{
8
  displayMode: InterfaceDisplayMode
@@ -31,8 +32,8 @@ export const useStore = create<{
31
  searchAutocompleteQuery: string
32
  setSearchAutocompleteQuery: (searchAutocompleteQuery?: string) => void
33
 
34
- searchAutocompleteResults: MediaInfo[]
35
- setSearchAutocompleteResults: (searchAutocompleteResults: MediaInfo[]) => void
36
 
37
  searchResults: MediaInfo[]
38
  setSearchResults: (searchResults: MediaInfo[]) => void
@@ -118,8 +119,9 @@ export const useStore = create<{
118
  "/embed": "public_media_embed",
119
  "/music": "public_music_videos",
120
  "/channels": "public_channels",
121
- "/dream": "public_latent_media",
122
- "/dream/embed": "public_latent_media_embed",
 
123
  "/channel": "public_channel",
124
 
125
  // those are reserved for future use
@@ -149,8 +151,8 @@ export const useStore = create<{
149
  set({ showAutocompleteBox })
150
  },
151
 
152
- searchAutocompleteResults: [] as MediaInfo[],
153
- setSearchAutocompleteResults: (searchAutocompleteResults: MediaInfo[]) => {
154
  set({ searchAutocompleteResults })
155
  },
156
 
 
3
  import { create } from "zustand"
4
 
5
  import { ChannelInfo, MediaInfo, InterfaceDisplayMode, InterfaceView, InterfaceMenuMode, InterfaceHeaderMode, CommentInfo, UserInfo } from "@/types/general"
6
+ import { BasicSearchResult } from "../api/v1/search/types"
7
 
8
  export const useStore = create<{
9
  displayMode: InterfaceDisplayMode
 
32
  searchAutocompleteQuery: string
33
  setSearchAutocompleteQuery: (searchAutocompleteQuery?: string) => void
34
 
35
+ searchAutocompleteResults: BasicSearchResult[]
36
+ setSearchAutocompleteResults: (searchAutocompleteResults: BasicSearchResult[]) => void
37
 
38
  searchResults: MediaInfo[]
39
  setSearchResults: (searchResults: MediaInfo[]) => void
 
119
  "/embed": "public_media_embed",
120
  "/music": "public_music_videos",
121
  "/channels": "public_channels",
122
+ "/latent/search": "public_latent_search",
123
+ "/latent/watch": "public_latent_media",
124
+ "/latent/embed": "public_latent_media_embed",
125
  "/channel": "public_channel",
126
 
127
  // those are reserved for future use
 
151
  set({ showAutocompleteBox })
152
  },
153
 
154
+ searchAutocompleteResults: [] as BasicSearchResult[],
155
+ setSearchAutocompleteResults: (searchAutocompleteResults: BasicSearchResult[]) => {
156
  set({ searchAutocompleteResults })
157
  },
158
 
src/app/views/public-latent-search-view/index.tsx ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ "use client"
2
+
3
+ import { useEffect, useTransition } from "react"
4
+
5
+ import { useStore } from "@/app/state/useStore"
6
+ import { cn } from "@/lib/utils/cn"
7
+ import { MediaInfo } from "@/types/general"
8
+ import { getVideos } from "@/app/api/actions/ai-tube-hf/getVideos"
9
+ import { VideoList } from "@/components/interface/video-list"
10
+
11
+ export function PublicLatentSearchView() {
12
+ const [_isPending, startTransition] = useTransition()
13
+ const setView = useStore(s => s.setView)
14
+ const currentTag = useStore(s => s.currentTag)
15
+ const setPublicMedias = useStore(s => s.setPublicMedias)
16
+ const setPublicMedia = useStore(s => s.setPublicMedia)
17
+ const publicMedias = useStore(s => s.publicMedias)
18
+
19
+ useEffect(() => {
20
+ startTransition(async () => {
21
+ const medias = await getVideos({
22
+ sortBy: "date",
23
+ mandatoryTags: currentTag ? [currentTag] : [],
24
+ maxNbMedias: 25
25
+ })
26
+
27
+ // due to some caching on the first function.. we update with fresh data!
28
+ // const updatedVideos = await extendVideosWithStats(medias)
29
+
30
+ setPublicMedias(medias)
31
+ })
32
+ }, [currentTag])
33
+
34
+ const handleSelect = (media: MediaInfo) => {
35
+ setView("public_latent_media")
36
+ setPublicMedia(media)
37
+ }
38
+
39
+ return (
40
+ <div className={cn(
41
+ `sm:pr-4`
42
+ )}>
43
+ <VideoList
44
+ items={publicMedias}
45
+ onSelect={handleSelect}
46
+ />
47
+ </div>
48
+ )
49
+ }
src/components/interface/latent-engine/core/engine.tsx CHANGED
@@ -14,7 +14,7 @@ import { localStorageKeys } from "@/app/state/localStorageKeys"
14
  import { defaultSettings } from "@/app/state/defaultSettings"
15
  import { useStore } from "@/app/state/useStore"
16
  import { ClapProject, generateClapFromSimpleStory, serializeClap } from "@aitube/clap"
17
- import { theSimps } from "@/app/dream/samples"
18
 
19
  function LatentEngine({
20
  media,
@@ -76,29 +76,27 @@ function LatentEngine({
76
  const videoLayerRef = useRef<HTMLDivElement>(null)
77
  const segmentationLayerRef = useRef<HTMLDivElement>(null)
78
 
79
- const mediaUrl = media.clapUrl || media.assetUrlHd || media.assetUrl
80
-
81
  useEffect(() => {
82
- if (!stateRef.current.isInitialized && mediaUrl) {
83
  stateRef.current.isInitialized = true
84
 
85
  const fn = async () => {
86
- // TODO julian
87
- // there is a bug, we can't unpack the .clap when it's from a data-uri :/
 
 
88
 
89
- // open(mediaUrl)
90
  const mockClap: ClapProject = generateClapFromSimpleStory({
91
- story: theSimps
 
 
92
  })
93
- const mockArchive: Blob = await serializeClap(mockClap)
94
- // for some reason conversion to data uri doesn't work
95
- // const mockDataUri = await blobToDataUri(mockArchive, "application/x-gzip")
96
- // console.log("mockDataUri:", mockDataUri)
97
- open(mockArchive)
98
  }
99
  fn()
100
  }
101
- }, [mediaUrl])
102
 
103
  const isPlayingRef = useRef(isPlaying)
104
  isPlayingRef.current = isPlaying
 
14
  import { defaultSettings } from "@/app/state/defaultSettings"
15
  import { useStore } from "@/app/state/useStore"
16
  import { ClapProject, generateClapFromSimpleStory, serializeClap } from "@aitube/clap"
17
+ import { theSimps } from "@/app/latent/samples"
18
 
19
  function LatentEngine({
20
  media,
 
76
  const videoLayerRef = useRef<HTMLDivElement>(null)
77
  const segmentationLayerRef = useRef<HTMLDivElement>(null)
78
 
 
 
79
  useEffect(() => {
80
+ if (!stateRef.current.isInitialized) {
81
  stateRef.current.isInitialized = true
82
 
83
  const fn = async () => {
84
+ // if we have a clapUrl (eg. from the database) then we use that
85
+ // otherwise we generate it ourselves, chunk by chunk (since we're live)
86
+
87
+ // TODO Julian work on the chunk mechanism
88
 
 
89
  const mockClap: ClapProject = generateClapFromSimpleStory({
90
+ story: theSimps,
91
+ showIntroPoweredByEngine: false,
92
+ showIntroDisclaimerAboutAI: false
93
  })
94
+
95
+ open(mockClap)
 
 
 
96
  }
97
  fn()
98
  }
99
+ }, [media.id])
100
 
101
  const isPlayingRef = useRef(isPlaying)
102
  isPlayingRef.current = isPlaying
src/components/interface/latent-search-input/index.tsx ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { useRef, useState, useTransition } from "react"
2
+ import Link from "next/link"
3
+ import { encode, decode } from 'js-base64'
4
+ // import throttle from "@jcoreio/async-throttle"
5
+ import debounce from "lodash.debounce"
6
+ import { GoSearch } from "react-icons/go"
7
+
8
+ import { useStore } from "@/app/state/useStore"
9
+ import { cn } from "@/lib/utils/cn"
10
+ import { Input } from "@/components/ui/input"
11
+ import { Button } from "@/components/ui/button"
12
+ import { MediaInfo } from "@/types/general"
13
+ import { search } from "@/app/api/v1/search"
14
+
15
+ export function LatentSearchInput() {
16
+ const [_pending, startTransition] = useTransition()
17
+
18
+ const setSearchAutocompleteQuery = useStore(s => s.setSearchAutocompleteQuery)
19
+ const showAutocompleteBox = useStore(s => s.showAutocompleteBox)
20
+ const setShowAutocompleteBox = useStore(s => s.setShowAutocompleteBox)
21
+
22
+ const searchAutocompleteResults = useStore(s => s.searchAutocompleteResults)
23
+ const setSearchAutocompleteResults = useStore(s => s.setSearchAutocompleteResults)
24
+
25
+ const setSearchQuery = useStore(s => s.setSearchQuery)
26
+
27
+ const [searchDraft, setSearchDraft] = useState("")
28
+
29
+ const ref = useRef<HTMLInputElement>(null)
30
+
31
+
32
+ // called when pressing enter or clicking on search
33
+ const debouncedSearch = debounce((query: string) => {
34
+ startTransition(async () => {
35
+ console.log(`searching the latent space for "${query}"..`)
36
+
37
+ if (query.length < 2) { console.log("search term is too short") }
38
+
39
+ console.log("imaginating medias..")
40
+
41
+ const imaginedMedias = await search({
42
+ prompt: query,
43
+ nbResults: 4
44
+ })
45
+
46
+ console.log(`imagined ${imaginedMedias.length} results:`, imaginedMedias)
47
+
48
+ setSearchAutocompleteResults(imaginedMedias.map(item => ({
49
+ title: item.title,
50
+ tags: item.tags,
51
+ })))
52
+
53
+ // TODO: only close the show autocomplete box if we found something
54
+ // setShowAutocompleteBox(false)
55
+ })
56
+ }, 500)
57
+
58
+ // called when pressing enter or clicking on search
59
+ const handleSearch = () => {
60
+ ref.current?.focus()
61
+ setSearchQuery(searchDraft)
62
+ setShowAutocompleteBox(true)
63
+ debouncedSearch(searchDraft)
64
+ }
65
+
66
+ return (
67
+ <div className="flex flex-row flex-grow w-[380px] lg:w-[600px]">
68
+
69
+ <div className="flex flex-row w-full">
70
+ <Input
71
+ ref={ref}
72
+ placeholder="Search the latent space"
73
+ className={cn(
74
+ `bg-neutral-900 text-neutral-200 dark:bg-neutral-900 dark:text-neutral-200`,
75
+ `rounded-l-full rounded-r-none`,
76
+
77
+ // we increase the line height to better catch the clicks
78
+ `py-0 h-10 leading-7`,
79
+
80
+ `border-neutral-700 dark:border-neutral-700 border-r-0`,
81
+
82
+ )}
83
+ onFocus={() => {
84
+ handleSearch()
85
+ }}
86
+ onChange={(e) => {
87
+ setSearchDraft(e.target.value)
88
+ setShowAutocompleteBox(true)
89
+ // handleSearch()
90
+ }}
91
+ onKeyDown={({ key }) => {
92
+ if (key === 'Enter') {
93
+ handleSearch()
94
+ }
95
+ }}
96
+ value={searchDraft}
97
+ />
98
+ <Button
99
+ className={cn(
100
+ `rounded-l-none rounded-r-full border border-neutral-700 dark:border-neutral-700`,
101
+ `cursor-pointer`,
102
+ `transition-all duration-200 ease-in-out`,
103
+ `text-neutral-200 dark:text-neutral-200 bg-neutral-800 dark:bg-neutral-800 hover:bg-neutral-700 disabled:bg-neutral-900`
104
+ )}
105
+ onClick={() => {
106
+ handleSearch()
107
+ // console.log("submit")
108
+ // setShowAutocompleteBox(false)
109
+ // setSearchDraft("")
110
+ }}
111
+ disabled={false}
112
+ >
113
+ <GoSearch className="w-6 h-6" />
114
+ </Button>
115
+ </div>
116
+ <div
117
+ className={cn(
118
+ `absolute z-50 ml-1`,
119
+
120
+ // please keep this in sync with the parent
121
+ `w-[320px] lg:w-[540px]`,
122
+
123
+ `text-neutral-200 dark:text-neutral-200 bg-neutral-900 dark:bg-neutral-900`,
124
+ `border border-neutral-800 dark:border-neutral-800`,
125
+ `rounded-xl shadow-2xl`,
126
+ `flex flex-col p-2 space-y-1`,
127
+
128
+ `transition-all duration-200 ease-in-out`,
129
+ showAutocompleteBox
130
+ ? `opacity-100 scale-100 mt-11 pointer-events-auto`
131
+ : `opacity-0 scale-95 mt-6 pointer-events-none`
132
+ )}
133
+ >
134
+ {searchAutocompleteResults.length === 0 ? <div>Nothing to show, type something and press enter</div> : null}
135
+ {searchAutocompleteResults.map(item => (
136
+ <Link key={item.id} href={
137
+ item.id
138
+ ? `${process.env.NEXT_PUBLIC_DOMAIN}/watch?v=${item.id}`
139
+ : `${process.env.NEXT_PUBLIC_DOMAIN}/latent/watch?p=${encode(JSON.stringify(item))}`
140
+ }>
141
+ <div
142
+ className={cn(
143
+ `dark:hover:bg-neutral-800 hover:bg-neutral-800`,
144
+ `text-sm`,
145
+ `px-3 py-2`,
146
+ `rounded-xl`
147
+ )}
148
+
149
+ >
150
+ {item.title}
151
+ </div>
152
+ </Link>
153
+ ))}
154
+ </div>
155
+ </div>
156
+ )
157
+ }
src/components/interface/media-player/index.tsx CHANGED
@@ -25,7 +25,6 @@ export function MediaPlayer({
25
  console.log("MediaPlayer called for \"" + media?.label + "\"")
26
 
27
  if (!media) { return null }
28
- if (!media?.assetUrl && !media?.clapUrl) { return null }
29
 
30
  // uncomment one of those to forcefully test the .clap player from an external .clap file
31
  // media.assetUrlHd = "https://huggingface.co/datasets/jbilcke/ai-tube-cinema/tree/main/404.clap"
@@ -48,6 +47,8 @@ export function MediaPlayer({
48
  )
49
  }
50
 
 
 
51
  if (projectionType === "gaussian") {
52
  // note: for AutoSizer to work properly it needs to be inside a normal div with no display: "flex"
53
  return (
 
25
  console.log("MediaPlayer called for \"" + media?.label + "\"")
26
 
27
  if (!media) { return null }
 
28
 
29
  // uncomment one of those to forcefully test the .clap player from an external .clap file
30
  // media.assetUrlHd = "https://huggingface.co/datasets/jbilcke/ai-tube-cinema/tree/main/404.clap"
 
47
  )
48
  }
49
 
50
+ if (!media?.assetUrl && !media?.assetUrlHd) { return null }
51
+
52
  if (projectionType === "gaussian") {
53
  // note: for AutoSizer to work properly it needs to be inside a normal div with no display: "flex"
54
  return (
src/components/interface/search-input/index.tsx CHANGED
@@ -1,5 +1,6 @@
1
  import { useRef, useState, useTransition } from "react"
2
  import Link from "next/link"
 
3
  // import throttle from "@jcoreio/async-throttle"
4
  import debounce from "lodash.debounce"
5
  import { GoSearch } from "react-icons/go"
@@ -32,7 +33,7 @@ export function SearchInput() {
32
  startTransition(async () => {
33
  console.log(`searching for "${query}"..`)
34
 
35
- const videos = await getVideos({
36
  query,
37
  sortBy: "match",
38
  maxNbMedias: 8,
@@ -40,8 +41,14 @@ export function SearchInput() {
40
  renewCache: false, // bit of optimization
41
  })
42
 
43
- console.log(`got ${videos.length} results!`)
44
- setSearchAutocompleteResults(videos)
 
 
 
 
 
 
45
 
46
  // TODO: only close the show autocomplete box if we found something
47
  // setShowAutocompleteBox(false)
@@ -125,8 +132,12 @@ export function SearchInput() {
125
  )}
126
  >
127
  {searchAutocompleteResults.length === 0 ? <div>Nothing to show, type something and press enter</div> : null}
128
- {searchAutocompleteResults.map(media => (
129
- <Link key={media.id} href={`${process.env.NEXT_PUBLIC_DOMAIN}/watch?v=${media.id}`}>
 
 
 
 
130
  <div
131
  className={cn(
132
  `dark:hover:bg-neutral-800 hover:bg-neutral-800`,
@@ -136,7 +147,7 @@ export function SearchInput() {
136
  )}
137
 
138
  >
139
- {media.label}
140
  </div>
141
  </Link>
142
  ))}
 
1
  import { useRef, useState, useTransition } from "react"
2
  import Link from "next/link"
3
+ import { encode, decode } from 'js-base64'
4
  // import throttle from "@jcoreio/async-throttle"
5
  import debounce from "lodash.debounce"
6
  import { GoSearch } from "react-icons/go"
 
33
  startTransition(async () => {
34
  console.log(`searching for "${query}"..`)
35
 
36
+ const medias = await getVideos({
37
  query,
38
  sortBy: "match",
39
  maxNbMedias: 8,
 
41
  renewCache: false, // bit of optimization
42
  })
43
 
44
+ console.log(`got ${medias.length} results!`)
45
+ setSearchAutocompleteResults(medias.map(item => ({
46
+ id: item.id,
47
+ title: item.label,
48
+ tags: item.tags,
49
+ })))
50
+
51
+
52
 
53
  // TODO: only close the show autocomplete box if we found something
54
  // setShowAutocompleteBox(false)
 
132
  )}
133
  >
134
  {searchAutocompleteResults.length === 0 ? <div>Nothing to show, type something and press enter</div> : null}
135
+ {searchAutocompleteResults.map(item => (
136
+ <Link key={item.id} href={
137
+ item.id
138
+ ? `${process.env.NEXT_PUBLIC_DOMAIN}/watch?v=${item.id}`
139
+ : `${process.env.NEXT_PUBLIC_DOMAIN}/latent/watch?p=${encode(JSON.stringify(item))}`
140
+ }>
141
  <div
142
  className={cn(
143
  `dark:hover:bg-neutral-800 hover:bg-neutral-800`,
 
147
  )}
148
 
149
  >
150
+ {item.title}
151
  </div>
152
  </Link>
153
  ))}
src/components/interface/top-header/index.tsx CHANGED
@@ -11,6 +11,7 @@ import Link from 'next/link'
11
  import { Input } from '@/components/ui/input'
12
  import { Button } from '@/components/ui/button'
13
  import { SearchInput } from '../search-input'
 
14
  import { pathway } from '@/lib/fonts'
15
 
16
  export function TopHeader() {
@@ -122,7 +123,11 @@ export function TopHeader() {
122
  `px-4 py-2 w-max-64`,
123
  `text-neutral-400 text-2xs sm:text-xs lg:text-sm italic`
124
  )}>
125
- <SearchInput />
 
 
 
 
126
  </div>
127
  <div className={cn("w-32 xl:w-42")}>
128
  <span>
 
11
  import { Input } from '@/components/ui/input'
12
  import { Button } from '@/components/ui/button'
13
  import { SearchInput } from '../search-input'
14
+ import { LatentSearchInput } from '../latent-search-input'
15
  import { pathway } from '@/lib/fonts'
16
 
17
  export function TopHeader() {
 
123
  `px-4 py-2 w-max-64`,
124
  `text-neutral-400 text-2xs sm:text-xs lg:text-sm italic`
125
  )}>
126
+ {view === "public_latent_search" ||
127
+ view === "public_latent_media" ||
128
+ view === "public_latent_media_embed"
129
+ ? <LatentSearchInput />
130
+ : <SearchInput />}
131
  </div>
132
  <div className={cn("w-32 xl:w-42")}>
133
  <span>
src/types/general.ts CHANGED
@@ -636,6 +636,7 @@ export type InterfaceView =
636
  | "public_media" // public view of an individual media (video, gaussian splat, clap video)
637
  | "public_media_embed" // for integration into twitter etc
638
  | "public_music_videos" // public music videos - it's a special category, because music is *cool*
 
639
  | "public_latent_media" // public view of an individual dream (a latent media, so it's not a "real" file)
640
  | "public_latent_media_embed" // for integration into twitter etc (which would be hardcore for our server load.. so maybe not)
641
  | "public_gaming" // for AiTube Gaming
@@ -712,7 +713,7 @@ export type AppQueryProps = {
712
  export type LatentQueryProps = {
713
  params: { jwtToken: string }
714
  searchParams: {
715
- l?: string | string[], // latent content (serialized to a base64 json)
716
  [key: string]: string | string[] | undefined
717
  }
718
  }
 
636
  | "public_media" // public view of an individual media (video, gaussian splat, clap video)
637
  | "public_media_embed" // for integration into twitter etc
638
  | "public_music_videos" // public music videos - it's a special category, because music is *cool*
639
+ | "public_latent_search" // public view of latent search results
640
  | "public_latent_media" // public view of an individual dream (a latent media, so it's not a "real" file)
641
  | "public_latent_media_embed" // for integration into twitter etc (which would be hardcore for our server load.. so maybe not)
642
  | "public_gaming" // for AiTube Gaming
 
713
  export type LatentQueryProps = {
714
  params: { jwtToken: string }
715
  searchParams: {
716
+ p?: string | string[], // search result content (serialized to a base64 json)
717
  [key: string]: string | string[] | undefined
718
  }
719
  }