jbilcke-hf HF staff commited on
Commit
db70195
1 Parent(s): d2f7c95

aitube client 0.0.19

Browse files
package-lock.json CHANGED
@@ -9,7 +9,7 @@
9
  "version": "0.0.0",
10
  "dependencies": {
11
  "@aitube/clap": "0.0.14",
12
- "@aitube/client": "0.0.17",
13
  "@aitube/engine": "0.0.4",
14
  "@huggingface/hub": "0.12.3-oauth",
15
  "@huggingface/inference": "^2.6.7",
@@ -130,9 +130,9 @@
130
  }
131
  },
132
  "node_modules/@aitube/client": {
133
- "version": "0.0.17",
134
- "resolved": "https://registry.npmjs.org/@aitube/client/-/client-0.0.17.tgz",
135
- "integrity": "sha512-waRA1k2pqKI7uOXUnBs6y056JY2h7LO+kzKDcHBiNSAyC0ZvSvP7VqTia2fxpF99rik6HgmC3N0AslObP4T6Zw==",
136
  "dependencies": {
137
  "query-string": "^9.0.0"
138
  },
@@ -6709,9 +6709,9 @@
6709
  }
6710
  },
6711
  "node_modules/openai/node_modules/@types/node": {
6712
- "version": "18.19.32",
6713
- "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.32.tgz",
6714
- "integrity": "sha512-2bkg93YBSDKk8DLmmHnmj/Rwr18TLx7/n+I23BigFwgexUJoMHZOd8X1OFxuF/W3NN0S2W2E5sVabI5CPinNvA==",
6715
  "dependencies": {
6716
  "undici-types": "~5.26.4"
6717
  }
 
9
  "version": "0.0.0",
10
  "dependencies": {
11
  "@aitube/clap": "0.0.14",
12
+ "@aitube/client": "0.0.19",
13
  "@aitube/engine": "0.0.4",
14
  "@huggingface/hub": "0.12.3-oauth",
15
  "@huggingface/inference": "^2.6.7",
 
130
  }
131
  },
132
  "node_modules/@aitube/client": {
133
+ "version": "0.0.19",
134
+ "resolved": "https://registry.npmjs.org/@aitube/client/-/client-0.0.19.tgz",
135
+ "integrity": "sha512-SKsctZEHRmS3Z63jOUnEp7pGNbmDKyGDTgnoS/GCVciLlWknSC6ht9NJasgEuje06kfgF2Uaj/hWC57JHfoe3A==",
136
  "dependencies": {
137
  "query-string": "^9.0.0"
138
  },
 
6709
  }
6710
  },
6711
  "node_modules/openai/node_modules/@types/node": {
6712
+ "version": "18.19.33",
6713
+ "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.33.tgz",
6714
+ "integrity": "sha512-NR9+KrpSajr2qBVp/Yt5TU/rp+b5Mayi3+OlMlcg2cVCfRmcG5PWZ7S4+MG9PZ5gWBoc9Pd0BKSRViuBCRPu0A==",
6715
  "dependencies": {
6716
  "undici-types": "~5.26.4"
6717
  }
package.json CHANGED
@@ -11,7 +11,7 @@
11
  },
12
  "dependencies": {
13
  "@aitube/clap": "0.0.14",
14
- "@aitube/client": "0.0.17",
15
  "@aitube/engine": "0.0.4",
16
  "@huggingface/hub": "0.12.3-oauth",
17
  "@huggingface/inference": "^2.6.7",
 
11
  },
12
  "dependencies": {
13
  "@aitube/clap": "0.0.14",
14
+ "@aitube/client": "0.0.19",
15
  "@aitube/engine": "0.0.4",
16
  "@huggingface/hub": "0.12.3-oauth",
17
  "@huggingface/inference": "^2.6.7",
src/app/api/parsers/parseTurbo.ts ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ export function parseTurbo(
2
+ input?: any,
3
+ defaultValue: boolean = false
4
+ ): boolean {
5
+ let value = defaultValue
6
+
7
+ try {
8
+ let maybeTurbo = decodeURIComponent(`${input || value}`).trim()
9
+
10
+ value = !!maybeTurbo
11
+
12
+ } catch (err) {}
13
+
14
+ return value
15
+ }
src/app/api/v1/edit/dialogues/processShot.ts CHANGED
@@ -16,12 +16,14 @@ export async function processShot({
16
  shotSegment,
17
  existingClap,
18
  newerClap,
19
- mode
 
20
  }: {
21
  shotSegment: ClapSegment
22
  existingClap: ClapProject
23
  newerClap: ClapProject
24
  mode: ClapCompletionMode
 
25
  }): Promise<void> {
26
 
27
  const shotSegments: ClapSegment[] = filterSegments(
 
16
  shotSegment,
17
  existingClap,
18
  newerClap,
19
+ mode,
20
+ turbo,
21
  }: {
22
  shotSegment: ClapSegment
23
  existingClap: ClapProject
24
  newerClap: ClapProject
25
  mode: ClapCompletionMode
26
+ turbo: boolean
27
  }): Promise<void> {
28
 
29
  const shotSegments: ClapSegment[] = filterSegments(
src/app/api/v1/edit/dialogues/route.ts CHANGED
@@ -8,6 +8,7 @@ import queryString from "query-string"
8
  import { parseCompletionMode } from "@/app/api/parsers/parseCompletionMode"
9
  import { throwIfInvalidToken } from "@/app/api/v1/auth/throwIfInvalidToken"
10
  import { ClapCompletionMode } from "@aitube/client"
 
11
 
12
  // a helper to generate speech for a Clap
13
  export async function POST(req: NextRequest) {
@@ -17,6 +18,7 @@ export async function POST(req: NextRequest) {
17
  const query = (qs || {}).query
18
 
19
  const mode = parseCompletionMode(query?.c)
 
20
 
21
  const blob = await req.blob()
22
 
@@ -41,7 +43,8 @@ export async function POST(req: NextRequest) {
41
  shotSegment,
42
  existingClap,
43
  newerClap,
44
- mode
 
45
  })
46
  ))
47
 
 
8
  import { parseCompletionMode } from "@/app/api/parsers/parseCompletionMode"
9
  import { throwIfInvalidToken } from "@/app/api/v1/auth/throwIfInvalidToken"
10
  import { ClapCompletionMode } from "@aitube/client"
11
+ import { parseTurbo } from "@/app/api/parsers/parseTurbo"
12
 
13
  // a helper to generate speech for a Clap
14
  export async function POST(req: NextRequest) {
 
18
  const query = (qs || {}).query
19
 
20
  const mode = parseCompletionMode(query?.c)
21
+ const turbo = parseTurbo(query?.t)
22
 
23
  const blob = await req.blob()
24
 
 
43
  shotSegment,
44
  existingClap,
45
  newerClap,
46
+ mode,
47
+ turbo,
48
  })
49
  ))
50
 
src/app/api/v1/edit/entities/generateImageID.ts CHANGED
@@ -9,10 +9,12 @@ import { getNegativePrompt, getPositivePrompt } from "@/app/api/utils/imagePromp
9
  export async function generateImageID({
10
  prompt,
11
  // negativePrompt,
 
12
  seed,
13
  }: {
14
  prompt: string
15
  // negativePrompt?: string
 
16
  seed?: number
17
  }) {
18
 
@@ -38,8 +40,8 @@ export async function generateImageID({
38
  // and maybe not use the "turbo" - but I'm not sure
39
  width,
40
  height,
41
- nbSteps: 8,
42
- turbo: true,
43
 
44
  shouldRenewCache: true,
45
  seed: seed || generateSeed()
 
9
  export async function generateImageID({
10
  prompt,
11
  // negativePrompt,
12
+ turbo = false,
13
  seed,
14
  }: {
15
  prompt: string
16
  // negativePrompt?: string
17
+ turbo?: boolean
18
  seed?: number
19
  }) {
20
 
 
40
  // and maybe not use the "turbo" - but I'm not sure
41
  width,
42
  height,
43
+ nbSteps: turbo ? 8 : 25,
44
+ turbo,
45
 
46
  shouldRenewCache: true,
47
  seed: seed || generateSeed()
src/app/api/v1/edit/entities/index.ts CHANGED
@@ -11,12 +11,14 @@ export async function editEntities({
11
  existingClap,
12
  newerClap,
13
  entityPrompts = [],
14
- mode = ClapCompletionMode.PARTIAL
 
15
  }: {
16
  existingClap: ClapProject
17
  newerClap: ClapProject
18
  entityPrompts?: ClapEntityPrompt[]
19
  mode?: ClapCompletionMode
 
20
  }) {
21
 
22
  // note that we can only handle either FULL or PARTIAL
@@ -33,7 +35,8 @@ export async function editEntities({
33
 
34
  const entityPromptsWithShots = await generateEntityPrompts({
35
  prompt: existingClap.meta.description,
36
- latentStory: await clapToLatentStory(existingClap)
 
37
  })
38
 
39
  const allShots = existingClap.segments.filter(s => s.category === "camera")
@@ -137,7 +140,8 @@ export async function editEntities({
137
  if (!entity.imageId) {
138
  entity.imageId = await generateImageID({
139
  prompt: entity.imagePrompt,
140
- seed: entity.seed
 
141
  })
142
  entity.imageSourceType = getClapAssetSourceType(entity.imageId)
143
  entityHasBeenModified = true
 
11
  existingClap,
12
  newerClap,
13
  entityPrompts = [],
14
+ mode = ClapCompletionMode.PARTIAL,
15
+ turbo = false,
16
  }: {
17
  existingClap: ClapProject
18
  newerClap: ClapProject
19
  entityPrompts?: ClapEntityPrompt[]
20
  mode?: ClapCompletionMode
21
+ turbo?: boolean
22
  }) {
23
 
24
  // note that we can only handle either FULL or PARTIAL
 
35
 
36
  const entityPromptsWithShots = await generateEntityPrompts({
37
  prompt: existingClap.meta.description,
38
+ latentStory: await clapToLatentStory(existingClap),
39
+ turbo,
40
  })
41
 
42
  const allShots = existingClap.segments.filter(s => s.category === "camera")
 
140
  if (!entity.imageId) {
141
  entity.imageId = await generateImageID({
142
  prompt: entity.imagePrompt,
143
+ seed: entity.seed,
144
+ turbo,
145
  })
146
  entity.imageSourceType = getClapAssetSourceType(entity.imageId)
147
  entityHasBeenModified = true
src/app/api/v1/edit/entities/route.ts CHANGED
@@ -8,6 +8,7 @@ import { throwIfInvalidToken } from "@/app/api/v1/auth/throwIfInvalidToken"
8
 
9
  import { editEntities } from "."
10
  import { ClapCompletionMode } from "@aitube/client"
 
11
 
12
  export async function POST(req: NextRequest) {
13
  console.log("Hello!")
@@ -17,6 +18,8 @@ export async function POST(req: NextRequest) {
17
  const query = (qs || {}).query
18
 
19
  const mode = parseCompletionMode(query?.c)
 
 
20
  // const prompt = parsePrompt(query?.p)
21
 
22
  const entityPrompts = parseClapEntityPrompts(query?.e)
@@ -31,7 +34,8 @@ export async function POST(req: NextRequest) {
31
  existingClap,
32
  newerClap,
33
  entityPrompts,
34
- mode
 
35
  })
36
 
37
  console.log(`[api/edit/entities] returning the newer clap extended with the entities`)
 
8
 
9
  import { editEntities } from "."
10
  import { ClapCompletionMode } from "@aitube/client"
11
+ import { parseTurbo } from "@/app/api/parsers/parseTurbo"
12
 
13
  export async function POST(req: NextRequest) {
14
  console.log("Hello!")
 
18
  const query = (qs || {}).query
19
 
20
  const mode = parseCompletionMode(query?.c)
21
+ const turbo = parseTurbo(query?.t)
22
+
23
  // const prompt = parsePrompt(query?.p)
24
 
25
  const entityPrompts = parseClapEntityPrompts(query?.e)
 
34
  existingClap,
35
  newerClap,
36
  entityPrompts,
37
+ mode,
38
+ turbo,
39
  })
40
 
41
  console.log(`[api/edit/entities] returning the newer clap extended with the entities`)
src/app/api/v1/edit/entities/systemPrompt.ts CHANGED
@@ -5,27 +5,28 @@ The video are meant to be shared on social media platform (Instagram, TikTok, Sn
5
  Each video is composed of a sequence of shots (a dozen in average), with a voice over and text.
6
 
7
  # Task
8
- You mission is to generate a list of entities/assets (characters, locations etc) associated with each shot.
9
 
10
  # Important
11
 
12
  - You MUST reply by writing/completing a YAML list of objects.
13
- - Copy the structure of the examples, but not their content: come up with your own original ideal, you should be creativeç
14
 
15
  # Output schema:
16
 
17
  name: name of the entity
18
- category: can be "character" or "location"
 
 
 
 
19
  image: a description of the entity (you must describe it using a Stable Diffusion prompt - about ~300 chars - using simple descriptive words and adjectives. Describe facts about characters, location, lights, texture, camera orientation, colors, clothes, movements etc. But don't give your opinion, don't talk about the emotions it evokes etc.)
20
  audio: a textual description of what and how the entity sounds like
21
  shots: an array containing the shot IDs where the entity is present
22
 
23
  # Short example
24
-
25
  Given the following inputs:
26
-
27
  "A king goes to see a witch to ask if or how he can win an upcoming and challenging battle"
28
-
29
  \`\`\`yaml
30
  - shot: 1
31
  title: "King Arthus seeks the witch's guidance to win his imminent battle."
@@ -42,13 +43,18 @@ Given the following inputs:
42
  \`\`\
43
 
44
  An example YAML output from the server-side function can be:
45
-
46
  \`\`\`yaml
47
- - name: "Castle's Courtyard"
 
 
 
48
  category: "location"
49
  image: "A medieval castle courtyard, ashlar walls, soldiers and horses, cloudy sky"
50
  audio: "Background noises of voices, horses, birds, wind, carriages"
51
- shots: [1, 2, 3]
 
 
 
52
  - name: "King Arthus"
53
  category: "character"
54
  image: 1 middle-aged king, pepper-and-salt hair, beared. Dressed in golden armor and a dark purple cape. Majestic, imposing."
 
5
  Each video is composed of a sequence of shots (a dozen in average), with a voice over and text.
6
 
7
  # Task
8
+ You mission is to generate a list of entities/assets associated with each shot.
9
 
10
  # Important
11
 
12
  - You MUST reply by writing/completing a YAML list of objects.
13
+ - Copy the structure of the examples, but not their content: come up with your own original ideas. Be creative!
14
 
15
  # Output schema:
16
 
17
  name: name of the entity
18
+ category: ${
19
+ // T IS FASTER TO JUST GENERATE CHARACTERS FOR NOW
20
+ `can only be "character" for now`
21
+ // can be either "character" or "location"
22
+ }
23
  image: a description of the entity (you must describe it using a Stable Diffusion prompt - about ~300 chars - using simple descriptive words and adjectives. Describe facts about characters, location, lights, texture, camera orientation, colors, clothes, movements etc. But don't give your opinion, don't talk about the emotions it evokes etc.)
24
  audio: a textual description of what and how the entity sounds like
25
  shots: an array containing the shot IDs where the entity is present
26
 
27
  # Short example
 
28
  Given the following inputs:
 
29
  "A king goes to see a witch to ask if or how he can win an upcoming and challenging battle"
 
30
  \`\`\`yaml
31
  - shot: 1
32
  title: "King Arthus seeks the witch's guidance to win his imminent battle."
 
43
  \`\`\
44
 
45
  An example YAML output from the server-side function can be:
 
46
  \`\`\`yaml
47
+ ${
48
+ // DISABLED: IT IS FASTER TO JUST GENERATE CHARACTERS FOR NOW
49
+ /*
50
+ `- name: "Castle's Courtyard"
51
  category: "location"
52
  image: "A medieval castle courtyard, ashlar walls, soldiers and horses, cloudy sky"
53
  audio: "Background noises of voices, horses, birds, wind, carriages"
54
+ shots: [1, 2, 3]`
55
+ */
56
+ ''
57
+ }
58
  - name: "King Arthus"
59
  category: "character"
60
  image: 1 middle-aged king, pepper-and-salt hair, beared. Dressed in golden armor and a dark purple cape. Majestic, imposing."
src/app/api/v1/edit/storyboards/generateStoryboard.ts CHANGED
@@ -10,12 +10,14 @@ export async function generateStoryboard({
10
  width,
11
  height,
12
  seed,
 
13
  }: {
14
  prompt: string
15
  // negativePrompt?: string
16
  width?: number
17
  height?: number
18
  seed?: number
 
19
  }): Promise<string> {
20
 
21
  width = getValidNumber(width, 256, 8192, 512)
@@ -30,10 +32,10 @@ export async function generateStoryboard({
30
  negativePrompt,
31
  nbFrames: 1,
32
  nbFPS: 1,
33
- nbSteps: 8,
34
  width,
35
  height,
36
- turbo: true,
37
  shouldRenewCache: true,
38
  seed: seed || generateSeed()
39
  })
 
10
  width,
11
  height,
12
  seed,
13
+ turbo = false,
14
  }: {
15
  prompt: string
16
  // negativePrompt?: string
17
  width?: number
18
  height?: number
19
  seed?: number
20
+ turbo?: boolean
21
  }): Promise<string> {
22
 
23
  width = getValidNumber(width, 256, 8192, 512)
 
32
  negativePrompt,
33
  nbFrames: 1,
34
  nbFPS: 1,
35
+ nbSteps: turbo ? 8 : 25,
36
  width,
37
  height,
38
+ turbo,
39
  shouldRenewCache: true,
40
  seed: seed || generateSeed()
41
  })
src/app/api/v1/edit/storyboards/processShot.ts CHANGED
@@ -19,12 +19,14 @@ export async function processShot({
19
  shotSegment,
20
  existingClap,
21
  newerClap,
22
- mode
 
23
  }: {
24
  shotSegment: ClapSegment
25
  existingClap: ClapProject
26
  newerClap: ClapProject
27
  mode: ClapCompletionMode
 
28
  }): Promise<void> {
29
 
30
  const shotSegments: ClapSegment[] = filterSegments(
@@ -82,6 +84,7 @@ export async function processShot({
82
  prompt: getPositivePrompt(shotStoryboardSegment.prompt),
83
  width: existingClap.meta.width,
84
  height: existingClap.meta.height,
 
85
  })
86
  shotStoryboardSegment.assetSourceType = getClapAssetSourceType(shotStoryboardSegment.assetUrl)
87
  } catch (err) {
 
19
  shotSegment,
20
  existingClap,
21
  newerClap,
22
+ mode,
23
+ turbo,
24
  }: {
25
  shotSegment: ClapSegment
26
  existingClap: ClapProject
27
  newerClap: ClapProject
28
  mode: ClapCompletionMode
29
+ turbo: boolean
30
  }): Promise<void> {
31
 
32
  const shotSegments: ClapSegment[] = filterSegments(
 
84
  prompt: getPositivePrompt(shotStoryboardSegment.prompt),
85
  width: existingClap.meta.width,
86
  height: existingClap.meta.height,
87
+ turbo,
88
  })
89
  shotStoryboardSegment.assetSourceType = getClapAssetSourceType(shotStoryboardSegment.assetUrl)
90
  } catch (err) {
src/app/api/v1/edit/storyboards/route.ts CHANGED
@@ -7,6 +7,7 @@ import { throwIfInvalidToken } from "@/app/api/v1/auth/throwIfInvalidToken"
7
 
8
  import { processShot } from "./processShot"
9
  import { ClapCompletionMode } from "@aitube/client"
 
10
 
11
  // a helper to generate storyboards for a Clap
12
  // this is mostly used by external apps such as the Stories Factory
@@ -22,6 +23,7 @@ export async function POST(req: NextRequest) {
22
  const query = (qs || {}).query
23
 
24
  const mode = parseCompletionMode(query?.c)
 
25
 
26
  const blob = await req.blob()
27
 
@@ -47,6 +49,7 @@ export async function POST(req: NextRequest) {
47
  existingClap,
48
  newerClap,
49
  mode,
 
50
  })
51
  ))
52
 
 
7
 
8
  import { processShot } from "./processShot"
9
  import { ClapCompletionMode } from "@aitube/client"
10
+ import { parseTurbo } from "@/app/api/parsers/parseTurbo"
11
 
12
  // a helper to generate storyboards for a Clap
13
  // this is mostly used by external apps such as the Stories Factory
 
23
  const query = (qs || {}).query
24
 
25
  const mode = parseCompletionMode(query?.c)
26
+ const turbo = parseTurbo(query?.t)
27
 
28
  const blob = await req.blob()
29
 
 
49
  existingClap,
50
  newerClap,
51
  mode,
52
+ turbo,
53
  })
54
  ))
55
 
src/app/api/v1/edit/videos/generateVideo.ts CHANGED
@@ -10,12 +10,14 @@ export async function generateVideo({
10
  width,
11
  height,
12
  seed,
 
13
  }: {
14
  prompt: string
15
  // negativePrompt?: string
16
  width?: number
17
  height?: number
18
  seed?: number
 
19
  }): Promise<string> {
20
 
21
  // we want to keep it vertical
@@ -31,10 +33,10 @@ export async function generateVideo({
31
  negativePrompt,
32
  nbFrames: 80,
33
  nbFPS: 24,
34
- nbSteps: 4,
35
  width,
36
  height,
37
- turbo: true,
38
  shouldRenewCache: true,
39
  seed: seed || generateSeed()
40
  })
 
10
  width,
11
  height,
12
  seed,
13
+ turbo = false,
14
  }: {
15
  prompt: string
16
  // negativePrompt?: string
17
  width?: number
18
  height?: number
19
  seed?: number
20
+ turbo?: boolean
21
  }): Promise<string> {
22
 
23
  // we want to keep it vertical
 
33
  negativePrompt,
34
  nbFrames: 80,
35
  nbFPS: 24,
36
+ nbSteps: turbo ? 4 : 8,
37
  width,
38
  height,
39
+ turbo,
40
  shouldRenewCache: true,
41
  seed: seed || generateSeed()
42
  })
src/app/api/v1/edit/videos/processShot.ts CHANGED
@@ -19,12 +19,14 @@ export async function processShot({
19
  shotSegment,
20
  existingClap,
21
  newerClap,
22
- mode
 
23
  }: {
24
  shotSegment: ClapSegment
25
  existingClap: ClapProject
26
  newerClap: ClapProject
27
  mode: ClapCompletionMode
 
28
  }): Promise<void> {
29
  const shotSegments: ClapSegment[] = filterSegments(
30
  ClapSegmentFilteringMode.START,
@@ -85,7 +87,8 @@ export async function processShot({
85
  shotVideoSegment.assetUrl = await generateVideo({
86
  prompt: getPositivePrompt(shotVideoSegment.prompt),
87
  width: existingClap.meta.width,
88
- height: existingClap.meta.height,
 
89
  })
90
  shotVideoSegment.assetSourceType = getClapAssetSourceType(shotVideoSegment.assetUrl)
91
  } catch (err) {
 
19
  shotSegment,
20
  existingClap,
21
  newerClap,
22
+ mode,
23
+ turbo,
24
  }: {
25
  shotSegment: ClapSegment
26
  existingClap: ClapProject
27
  newerClap: ClapProject
28
  mode: ClapCompletionMode
29
+ turbo: boolean
30
  }): Promise<void> {
31
  const shotSegments: ClapSegment[] = filterSegments(
32
  ClapSegmentFilteringMode.START,
 
87
  shotVideoSegment.assetUrl = await generateVideo({
88
  prompt: getPositivePrompt(shotVideoSegment.prompt),
89
  width: existingClap.meta.width,
90
+ height: existingClap.meta.height,,
91
+ turbo,
92
  })
93
  shotVideoSegment.assetSourceType = getClapAssetSourceType(shotVideoSegment.assetUrl)
94
  } catch (err) {
src/app/api/v1/edit/videos/route.ts CHANGED
@@ -7,6 +7,7 @@ import { parseCompletionMode } from "@/app/api/parsers/parseCompletionMode"
7
  import { throwIfInvalidToken } from "@/app/api/v1/auth/throwIfInvalidToken"
8
 
9
  import { processShot } from "./processShot"
 
10
 
11
  // a helper to generate videos for a Clap
12
  // this is mostly used by external apps such as the Stories Factory
@@ -22,7 +23,8 @@ export async function POST(req: NextRequest) {
22
  const query = (qs || {}).query
23
 
24
  const mode = parseCompletionMode(query?.c)
25
-
 
26
  const blob = await req.blob()
27
 
28
  const existingClap: ClapProject = await parseClap(blob)
@@ -46,7 +48,8 @@ export async function POST(req: NextRequest) {
46
  shotSegment,
47
  existingClap,
48
  newerClap,
49
- mode
 
50
  })
51
  ))
52
 
 
7
  import { throwIfInvalidToken } from "@/app/api/v1/auth/throwIfInvalidToken"
8
 
9
  import { processShot } from "./processShot"
10
+ import { parseTurbo } from "@/app/api/parsers/parseTurbo"
11
 
12
  // a helper to generate videos for a Clap
13
  // this is mostly used by external apps such as the Stories Factory
 
23
  const query = (qs || {}).query
24
 
25
  const mode = parseCompletionMode(query?.c)
26
+ const turbo = parseTurbo(query?.t)
27
+
28
  const blob = await req.blob()
29
 
30
  const existingClap: ClapProject = await parseClap(blob)
 
48
  shotSegment,
49
  existingClap,
50
  newerClap,
51
+ mode,
52
+ turbo,
53
  })
54
  ))
55
 
src/app/api/v1/export/route.ts CHANGED
@@ -3,6 +3,7 @@ import queryString from "query-string"
3
 
4
  import { parseSupportedExportFormat } from "@/app/api/parsers/parseSupportedExportFormat"
5
  import { throwIfInvalidToken } from "@/app/api/v1/auth/throwIfInvalidToken"
 
6
 
7
  // we hide/wrap the micro-service under a unified AiTube API
8
  export async function POST(req: NextRequest, res: NextResponse) {
@@ -12,6 +13,11 @@ export async function POST(req: NextRequest, res: NextResponse) {
12
  const query = (qs || {}).query
13
 
14
  const format = parseSupportedExportFormat(query?.f)
 
 
 
 
 
15
 
16
  // let's call our micro-service, which is currently open bar.
17
  const result = await fetch(
 
3
 
4
  import { parseSupportedExportFormat } from "@/app/api/parsers/parseSupportedExportFormat"
5
  import { throwIfInvalidToken } from "@/app/api/v1/auth/throwIfInvalidToken"
6
+ import { parseTurbo } from "../../parsers/parseTurbo"
7
 
8
  // we hide/wrap the micro-service under a unified AiTube API
9
  export async function POST(req: NextRequest, res: NextResponse) {
 
13
  const query = (qs || {}).query
14
 
15
  const format = parseSupportedExportFormat(query?.f)
16
+ const turbo = parseTurbo(query?.t)
17
+
18
+ // the AI Tube Clap Exporter doesn't support turbo mode
19
+ // this could be implemented by reducing the resolution, for instance
20
+ // or rather, the non-turbo mode could be the one where we upscale
21
 
22
  // let's call our micro-service, which is currently open bar.
23
  const result = await fetch(