jbilcke-hf HF Staff commited on
Commit
c4b97ab
·
1 Parent(s): 666c3ce

story + system prompts + bpm + framerate + ratio

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. packages/api-client/src/api/editClapVideos.ts +4 -4
  2. packages/app/public/samples/claps/empty_project.yaml +6 -3
  3. packages/app/src/app/api/assistant/samples.ts +2 -2
  4. packages/app/src/app/api/resolve/providers/aitube/index.ts +7 -7
  5. packages/app/src/app/api/resolve/providers/bigmodel/index.ts +2 -2
  6. packages/app/src/app/api/resolve/providers/comfy-comfydeploy/index.ts +1 -1
  7. packages/app/src/app/api/resolve/providers/comfy-comfyicu/index.ts +1 -1
  8. packages/app/src/app/api/resolve/providers/comfy-huggingface/index.ts +1 -1
  9. packages/app/src/app/api/resolve/providers/comfy-replicate/index.ts +1 -1
  10. packages/app/src/app/api/resolve/providers/comfyui/index.ts +2 -3
  11. packages/app/src/app/api/resolve/providers/falai/index.ts +5 -5
  12. packages/app/src/app/api/resolve/providers/falai/runFaceSwap.ts +1 -1
  13. packages/app/src/app/api/resolve/providers/gradio/index.ts +1 -1
  14. packages/app/src/app/api/resolve/providers/huggingface/index.ts +1 -1
  15. packages/app/src/app/api/resolve/providers/letzai/index.ts +1 -1
  16. packages/app/src/app/api/resolve/providers/modelslab/index.ts +1 -1
  17. packages/app/src/app/api/resolve/providers/piapi/index.ts +5 -5
  18. packages/app/src/app/api/resolve/providers/replicate/index.ts +4 -4
  19. packages/app/src/app/api/resolve/providers/replicate/runFaceSwap.ts +1 -1
  20. packages/app/src/app/api/resolve/providers/stabilityai/generateImage.ts +3 -3
  21. packages/app/src/app/api/resolve/providers/stabilityai/index.ts +1 -1
  22. packages/app/src/app/embed/EmbeddedPlayer.tsx +5 -4
  23. packages/app/src/components/editors/FilterEditor/FilterViewer/index.tsx +4 -4
  24. packages/app/src/components/editors/ProjectEditor/index.tsx +26 -12
  25. packages/app/src/components/monitor/DynamicPlayer/StoryboardBuffer.tsx +1 -1
  26. packages/app/src/components/monitor/PlayerControls/index.tsx +2 -2
  27. packages/app/src/components/toolbars/editors-menu/EditorsSideMenu.tsx +2 -10
  28. packages/app/src/components/toolbars/editors-menu/EditorsSideMenuItem.tsx +6 -7
  29. packages/app/src/components/toolbars/top-menu/IsBusy/index.tsx +1 -1
  30. packages/app/src/components/toolbars/top-menu/TopMenuLogo/index.tsx +1 -1
  31. packages/app/src/components/toolbars/top-menu/assistant/index.tsx +2 -2
  32. packages/app/src/components/ui/menubar.tsx +1 -1
  33. packages/app/src/lib/utils/decodeOutput.ts +1 -1
  34. packages/app/src/lib/utils/formatSegmentForExport.ts +1 -1
  35. packages/app/src/services/api/resolve.ts +9 -2
  36. packages/app/src/services/assistant/updateStoryAndScene.ts +2 -2
  37. packages/app/src/services/assistant/useAssistant.ts +2 -2
  38. packages/app/src/services/autocomplete/types.ts +1 -1
  39. packages/app/src/services/autocomplete/useAutocomplete.ts +24 -19
  40. packages/app/src/services/editors/project-editor/useProjectEditor.ts +3 -2
  41. packages/app/src/services/editors/script-editor/useScriptEditor.ts +3 -3
  42. packages/app/src/services/editors/workflow-editor/getSegmentWorkflowProviderAndEngine.ts +2 -2
  43. packages/app/src/services/editors/workflow-editor/workflows/comfyui/getComfyWorkflow.ts +1 -1
  44. packages/app/src/services/io/ffmpegUtils.ts +2 -2
  45. packages/app/src/services/io/formats/edl.ts +4 -20
  46. packages/app/src/services/io/formats/fcp.ts +19 -29
  47. packages/app/src/services/io/formats/mlt.ts +22 -16
  48. packages/app/src/services/io/formats/otio.ts +2 -2
  49. packages/app/src/services/io/parseFileIntoSegments.ts +8 -6
  50. packages/app/src/services/io/useIO.ts +23 -18
packages/api-client/src/api/editClapVideos.ts CHANGED
@@ -42,7 +42,7 @@ export async function editClapVideos({
42
  params.t = "true"
43
  }
44
  // special trick to not touch the generated
45
- // storyboards that are used by pending videos
46
  const idsOfStoryboardsToKeep = clap.segments.map((segment: ClapSegment) => {
47
 
48
  const isPendingVideo = (
@@ -53,14 +53,14 @@ export async function editClapVideos({
53
 
54
  if (!isPendingVideo) { return undefined }
55
 
56
- const storyboard: ClapSegment | undefined = filterSegments(
57
  ClapSegmentFilteringMode.BOTH,
58
  segment,
59
  clap.segments,
60
- ClapSegmentCategory.STORYBOARD
61
  ).at(0)
62
 
63
- return storyboard?.id
64
  }).filter((x: any) => x) as string[]
65
 
66
  const newClap = await fetchClap(`${aitubeApiUrl}edit/videos?${queryString.stringify(params)}`, {
 
42
  params.t = "true"
43
  }
44
  // special trick to not touch the generated
45
+ // storyboard images that are used by pending videos
46
  const idsOfStoryboardsToKeep = clap.segments.map((segment: ClapSegment) => {
47
 
48
  const isPendingVideo = (
 
53
 
54
  if (!isPendingVideo) { return undefined }
55
 
56
+ const storyboardImage: ClapSegment | undefined = filterSegments(
57
  ClapSegmentFilteringMode.BOTH,
58
  segment,
59
  clap.segments,
60
+ ClapSegmentCategory.IMAGE
61
  ).at(0)
62
 
63
+ return storyboardImage?.id
64
  }).filter((x: any) => x) as string[]
65
 
66
  const newClap = await fetchClap(`${aitubeApiUrl}edit/videos?${queryString.stringify(params)}`, {
packages/app/public/samples/claps/empty_project.yaml CHANGED
@@ -7,15 +7,18 @@
7
  description: An exciting new project
8
  synopsis: ""
9
  licence: ""
10
- orientation: landscape
11
  durationInMs: 18000
12
  width: 1024
13
  height: 575
14
  defaultVideoModel: AnimateDiff-Lightning
15
- extraPositivePrompt: []
16
- screenplay: ""
 
17
  isLoop: false
18
  isInteractive: false
 
 
19
  - id: e16478cf-3535-48e9-98e8-f702ec9ca348
20
  track: 0
21
  startTimeInMs: 0
 
7
  description: An exciting new project
8
  synopsis: ""
9
  licence: ""
10
+ imageRatio: landscape
11
  durationInMs: 18000
12
  width: 1024
13
  height: 575
14
  defaultVideoModel: AnimateDiff-Lightning
15
+ imagePrompt: ""
16
+ storyPrompt: ""
17
+ systemPrompt: ""
18
  isLoop: false
19
  isInteractive: false
20
+ bpm: 120
21
+ frameRate: 24
22
  - id: e16478cf-3535-48e9-98e8-f702ec9ca348
23
  track: 0
24
  startTimeInMs: 0
packages/app/src/app/api/assistant/samples.ts CHANGED
@@ -11,7 +11,7 @@ import {
11
  "prompt": "",
12
  "startTimeInMs": 767000,
13
  "endTimeInMs": 769000,
14
- "category": "STORYBOARD"
15
  },
16
  {
17
  "segmentId": 2,
@@ -25,7 +25,7 @@ import {
25
  "prompt": "",
26
  "startTimeInMs": 769000,
27
  "endTimeInMs": 771000,
28
- "category": "STORYBOARD"
29
  },
30
  {
31
  "segmentId": 4,
 
11
  "prompt": "",
12
  "startTimeInMs": 767000,
13
  "endTimeInMs": 769000,
14
+ "category": "IMAGE"
15
  },
16
  {
17
  "segmentId": 2,
 
25
  "prompt": "",
26
  "startTimeInMs": 769000,
27
  "endTimeInMs": 771000,
28
+ "category": "IMAGE"
29
  },
30
  {
31
  "segmentId": 4,
packages/app/src/app/api/resolve/providers/aitube/index.ts CHANGED
@@ -32,7 +32,7 @@ export async function resolveSegment(
32
  segments: request.segments,
33
  })
34
 
35
- if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
36
  const resolvedClap = await editClapStoryboards({
37
  clap,
38
  completionMode: ClapCompletionMode.PARTIAL,
@@ -40,19 +40,19 @@ export async function resolveSegment(
40
  token: '<TODO>',
41
  })
42
 
43
- const storyboards = resolvedClap.segments.filter(
44
- (s) => s.category === ClapSegmentCategory.STORYBOARD
45
  )
46
 
47
- const storyboard = storyboards.at(0)
48
 
49
- if (!storyboard) {
50
- throw new Error(`failed to generate a storyboard`)
51
  }
52
 
53
  return {
54
  ...request.segment,
55
- ...(storyboard as TimelineSegment),
56
  }
57
  } else if (request.segment.category === ClapSegmentCategory.VIDEO) {
58
  const resolvedClap = await editClapVideos({
 
32
  segments: request.segments,
33
  })
34
 
35
+ if (request.segment.category === ClapSegmentCategory.IMAGE) {
36
  const resolvedClap = await editClapStoryboards({
37
  clap,
38
  completionMode: ClapCompletionMode.PARTIAL,
 
40
  token: '<TODO>',
41
  })
42
 
43
+ const storyboardImages = resolvedClap.segments.filter(
44
+ (s) => s.category === ClapSegmentCategory.IMAGE
45
  )
46
 
47
+ const storyboardImage = storyboardImages.at(0)
48
 
49
+ if (!storyboardImage) {
50
+ throw new Error(`failed to generate a storyboard image`)
51
  }
52
 
53
  return {
54
  ...request.segment,
55
+ ...(storyboardImage as TimelineSegment),
56
  }
57
  } else if (request.segment.category === ClapSegmentCategory.VIDEO) {
58
  const resolvedClap = await editClapVideos({
packages/app/src/app/api/resolve/providers/bigmodel/index.ts CHANGED
@@ -22,14 +22,14 @@ export async function resolveSegment(
22
  note
23
  if (!request.prompts.image.positive) {
24
  console.error(
25
- `resolveSegment: cannot resolve a storyboard with an empty prompt`
26
  )
27
  return segment
28
  }
29
  */
30
 
31
  if (!request.prompts.video.image) {
32
- throw new Error(`cannot generate a video without a storyboard`)
33
  }
34
 
35
  // https://bigmodel.cn/dev/api#cogvideox
 
22
  note
23
  if (!request.prompts.image.positive) {
24
  console.error(
25
+ `resolveSegment: cannot resolve a storyboard image with an empty prompt`
26
  )
27
  return segment
28
  }
29
  */
30
 
31
  if (!request.prompts.video.image) {
32
+ throw new Error(`cannot generate a video without a storyboard image`)
33
  }
34
 
35
  // https://bigmodel.cn/dev/api#cogvideox
packages/app/src/app/api/resolve/providers/comfy-comfydeploy/index.ts CHANGED
@@ -14,7 +14,7 @@ export async function resolveSegment(
14
  throw new Error(`Missing API key for "ComfyDeploy"`)
15
  }
16
 
17
- if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
18
  const inputFields =
19
  request.settings.imageGenerationWorkflow.inputFields || []
20
 
 
14
  throw new Error(`Missing API key for "ComfyDeploy"`)
15
  }
16
 
17
+ if (request.segment.category === ClapSegmentCategory.IMAGE) {
18
  const inputFields =
19
  request.settings.imageGenerationWorkflow.inputFields || []
20
 
packages/app/src/app/api/resolve/providers/comfy-comfyicu/index.ts CHANGED
@@ -15,7 +15,7 @@ export async function resolveSegment(
15
  throw new Error(`Missing API key for "Comfy.icu"`)
16
  }
17
 
18
- if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
19
  const workflowId =
20
  request.settings.imageGenerationWorkflow.id.split('://').pop() || ''
21
 
 
15
  throw new Error(`Missing API key for "Comfy.icu"`)
16
  }
17
 
18
+ if (request.segment.category === ClapSegmentCategory.IMAGE) {
19
  const workflowId =
20
  request.settings.imageGenerationWorkflow.id.split('://').pop() || ''
21
 
packages/app/src/app/api/resolve/providers/comfy-huggingface/index.ts CHANGED
@@ -12,7 +12,7 @@ export async function resolveSegment(
12
  if (!request.settings.huggingFaceApiKey) {
13
  throw new Error(`Missing API key for "Hugging Face"`)
14
  }
15
- if (request.segment.category !== ClapSegmentCategory.STORYBOARD) {
16
  throw new Error(
17
  `Clapper doesn't support ${request.segment.category} generation for provider "Comfy Hugging Face". Please open a pull request with (working code) to solve this!`
18
  )
 
12
  if (!request.settings.huggingFaceApiKey) {
13
  throw new Error(`Missing API key for "Hugging Face"`)
14
  }
15
+ if (request.segment.category !== ClapSegmentCategory.IMAGE) {
16
  throw new Error(
17
  `Clapper doesn't support ${request.segment.category} generation for provider "Comfy Hugging Face". Please open a pull request with (working code) to solve this!`
18
  )
packages/app/src/app/api/resolve/providers/comfy-replicate/index.ts CHANGED
@@ -18,7 +18,7 @@ export async function resolveSegment(
18
 
19
  const segment: TimelineSegment = request.segment
20
 
21
- if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
22
  const inputFields =
23
  request.settings.imageGenerationWorkflow.inputFields || []
24
 
 
18
 
19
  const segment: TimelineSegment = request.segment
20
 
21
+ if (request.segment.category === ClapSegmentCategory.IMAGE) {
22
  const inputFields =
23
  request.settings.imageGenerationWorkflow.inputFields || []
24
 
packages/app/src/app/api/resolve/providers/comfyui/index.ts CHANGED
@@ -46,13 +46,12 @@ export async function resolveSegment(
46
  ).init()
47
 
48
  if (
49
- [ClapSegmentCategory.STORYBOARD, ClapSegmentCategory.VIDEO].includes(
50
  request.segment.category
51
  )
52
  ) {
53
  const clapWorkflow = {
54
- [ClapSegmentCategory.STORYBOARD]:
55
- request.settings.imageGenerationWorkflow,
56
  [ClapSegmentCategory.VIDEO]: request.settings.videoGenerationWorkflow,
57
  }[request.segment.category]
58
 
 
46
  ).init()
47
 
48
  if (
49
+ [ClapSegmentCategory.IMAGE, ClapSegmentCategory.VIDEO].includes(
50
  request.segment.category
51
  )
52
  ) {
53
  const clapWorkflow = {
54
+ [ClapSegmentCategory.IMAGE]: request.settings.imageGenerationWorkflow,
 
55
  [ClapSegmentCategory.VIDEO]: request.settings.videoGenerationWorkflow,
56
  }[request.segment.category]
57
 
packages/app/src/app/api/resolve/providers/falai/index.ts CHANGED
@@ -1,7 +1,7 @@
1
  import * as fal from '@fal-ai/serverless-client'
2
  import { TimelineSegment } from '@aitube/timeline'
3
  import { FalAiImageSize, ResolveRequest } from '@aitube/clapper-services'
4
- import { ClapMediaOrientation, ClapSegmentCategory } from '@aitube/clap'
5
  import {
6
  FalAiAudioResponse,
7
  FalAiImageResponse,
@@ -31,7 +31,7 @@ export async function resolveSegment(
31
  // for doc see:
32
  // https://fal.ai/models/fal-ai/fast-sdxl/api
33
 
34
- if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
35
  model = request.settings.imageGenerationWorkflow.data || ''
36
 
37
  if (!request.prompts.image.positive) {
@@ -78,9 +78,9 @@ export async function resolveSegment(
78
 
79
  // this was the previous system
80
  /*
81
- request.meta.orientation === ClapMediaOrientation.SQUARE
82
  ? FalAiImageSize.SQUARE_HD
83
- : request.meta.orientation === ClapMediaOrientation.PORTRAIT
84
  ? FalAiImageSize.PORTRAIT_16_9
85
  : FalAiImageSize.LANDSCAPE_16_9
86
  */
@@ -254,7 +254,7 @@ export async function resolveSegment(
254
  segment.assetUrl = result?.video?.url || ''
255
  } else if (model === 'fal-ai/stable-video') {
256
  if (!request.prompts.video.image) {
257
- throw new Error(`cannot generate a video without a storyboard`)
258
  }
259
 
260
  const result = (await fal.run(model, {
 
1
  import * as fal from '@fal-ai/serverless-client'
2
  import { TimelineSegment } from '@aitube/timeline'
3
  import { FalAiImageSize, ResolveRequest } from '@aitube/clapper-services'
4
+ import { ClapImageRatio, ClapSegmentCategory } from '@aitube/clap'
5
  import {
6
  FalAiAudioResponse,
7
  FalAiImageResponse,
 
31
  // for doc see:
32
  // https://fal.ai/models/fal-ai/fast-sdxl/api
33
 
34
+ if (request.segment.category === ClapSegmentCategory.IMAGE) {
35
  model = request.settings.imageGenerationWorkflow.data || ''
36
 
37
  if (!request.prompts.image.positive) {
 
78
 
79
  // this was the previous system
80
  /*
81
+ request.meta.orientation === ClapImageRatio.SQUARE
82
  ? FalAiImageSize.SQUARE_HD
83
+ : request.meta.orientation === ClapImageRatio.PORTRAIT
84
  ? FalAiImageSize.PORTRAIT_16_9
85
  : FalAiImageSize.LANDSCAPE_16_9
86
  */
 
254
  segment.assetUrl = result?.video?.url || ''
255
  } else if (model === 'fal-ai/stable-video') {
256
  if (!request.prompts.video.image) {
257
+ throw new Error(`cannot generate a video without a storyboard image`)
258
  }
259
 
260
  const result = (await fal.run(model, {
packages/app/src/app/api/resolve/providers/falai/runFaceSwap.ts CHANGED
@@ -17,7 +17,7 @@ export async function runFaceSwap(
17
 
18
  const segment: TimelineSegment = request.segment
19
 
20
- if (segment.category === ClapSegmentCategory.STORYBOARD) {
21
  const imageFaceswapWorkflowModel =
22
  request.settings.imageFaceswapWorkflow.data || ''
23
 
 
17
 
18
  const segment: TimelineSegment = request.segment
19
 
20
+ if (segment.category === ClapSegmentCategory.IMAGE) {
21
  const imageFaceswapWorkflowModel =
22
  request.settings.imageFaceswapWorkflow.data || ''
23
 
packages/app/src/app/api/resolve/providers/gradio/index.ts CHANGED
@@ -9,7 +9,7 @@ export async function resolveSegment(
9
  ): Promise<TimelineSegment> {
10
  const segment = request.segment
11
 
12
- if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
13
  segment.assetUrl = await callGradioApi({
14
  url: request.settings.gradioApiUrlForImage,
15
  inputs: request.prompts.image,
 
9
  ): Promise<TimelineSegment> {
10
  const segment = request.segment
11
 
12
+ if (request.segment.category === ClapSegmentCategory.IMAGE) {
13
  segment.assetUrl = await callGradioApi({
14
  url: request.settings.gradioApiUrlForImage,
15
  inputs: request.prompts.image,
packages/app/src/app/api/resolve/providers/huggingface/index.ts CHANGED
@@ -21,7 +21,7 @@ export async function resolveSegment(
21
  request.settings.huggingFaceApiKey
22
  )
23
 
24
- if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
25
  segment.assetUrl = await generateImage(request)
26
  } else if (request.segment.category === ClapSegmentCategory.DIALOGUE) {
27
  segment.assetUrl = await generateVoice(request)
 
21
  request.settings.huggingFaceApiKey
22
  )
23
 
24
+ if (request.segment.category === ClapSegmentCategory.IMAGE) {
25
  segment.assetUrl = await generateImage(request)
26
  } else if (request.segment.category === ClapSegmentCategory.DIALOGUE) {
27
  segment.assetUrl = await generateVoice(request)
packages/app/src/app/api/resolve/providers/letzai/index.ts CHANGED
@@ -16,7 +16,7 @@ export async function resolveSegment(
16
 
17
  let model = request.settings.imageGenerationWorkflow.data || ''
18
 
19
- if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
20
  model = request.settings.imageGenerationWorkflow.data || ''
21
 
22
  if (!request.prompts.image.positive) {
 
16
 
17
  let model = request.settings.imageGenerationWorkflow.data || ''
18
 
19
+ if (request.segment.category === ClapSegmentCategory.IMAGE) {
20
  model = request.settings.imageGenerationWorkflow.data || ''
21
 
22
  if (!request.prompts.image.positive) {
packages/app/src/app/api/resolve/providers/modelslab/index.ts CHANGED
@@ -12,7 +12,7 @@ export async function resolveSegment(
12
  if (!request.settings.modelsLabApiKey) {
13
  throw new Error(`Missing API key for "ModelsLab.com"`)
14
  }
15
- if (request.segment.category !== ClapSegmentCategory.STORYBOARD) {
16
  throw new Error(
17
  `Clapper doesn't support ${request.segment.category} generation for provider "ModelsLab". Please open a pull request with (working code) to solve this!`
18
  )
 
12
  if (!request.settings.modelsLabApiKey) {
13
  throw new Error(`Missing API key for "ModelsLab.com"`)
14
  }
15
+ if (request.segment.category !== ClapSegmentCategory.IMAGE) {
16
  throw new Error(
17
  `Clapper doesn't support ${request.segment.category} generation for provider "ModelsLab". Please open a pull request with (working code) to solve this!`
18
  )
packages/app/src/app/api/resolve/providers/piapi/index.ts CHANGED
@@ -1,6 +1,6 @@
1
  import { TimelineSegment } from '@aitube/timeline'
2
  import { ResolveRequest } from '@aitube/clapper-services'
3
- import { ClapMediaOrientation, ClapSegmentCategory } from '@aitube/clap'
4
 
5
  import { getWorkflowInputValues } from '../getWorkflowInputValues'
6
  import { createImage } from './midjourney/createImage'
@@ -18,7 +18,7 @@ export async function resolveSegment(
18
 
19
  let model = request.settings.imageGenerationWorkflow.data || ''
20
 
21
- if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
22
  model = request.settings.imageGenerationWorkflow.data || ''
23
 
24
  if (!request.prompts.image.positive) {
@@ -65,7 +65,7 @@ export async function resolveSegment(
65
 
66
  if (!request.prompts.video.image) {
67
  console.error(
68
- `resolveSegment: cannot generate video without a storyboard`
69
  )
70
  return segment
71
  }
@@ -77,9 +77,9 @@ export async function resolveSegment(
77
  if (workflow.id === 'piapi://kling/v1/video') {
78
  // can only be 16:9,9:16,1:1
79
  const aspectRatio =
80
- request.meta.orientation === ClapMediaOrientation.SQUARE
81
  ? '1:1'
82
- : request.meta.orientation === ClapMediaOrientation.PORTRAIT
83
  ? '9:16'
84
  : '16:9'
85
 
 
1
  import { TimelineSegment } from '@aitube/timeline'
2
  import { ResolveRequest } from '@aitube/clapper-services'
3
+ import { ClapImageRatio, ClapSegmentCategory } from '@aitube/clap'
4
 
5
  import { getWorkflowInputValues } from '../getWorkflowInputValues'
6
  import { createImage } from './midjourney/createImage'
 
18
 
19
  let model = request.settings.imageGenerationWorkflow.data || ''
20
 
21
+ if (request.segment.category === ClapSegmentCategory.IMAGE) {
22
  model = request.settings.imageGenerationWorkflow.data || ''
23
 
24
  if (!request.prompts.image.positive) {
 
65
 
66
  if (!request.prompts.video.image) {
67
  console.error(
68
+ `resolveSegment: cannot generate video without a storyboard image`
69
  )
70
  return segment
71
  }
 
77
  if (workflow.id === 'piapi://kling/v1/video') {
78
  // can only be 16:9,9:16,1:1
79
  const aspectRatio =
80
+ request.meta.orientation === ClapImageRatio.SQUARE
81
  ? '1:1'
82
+ : request.meta.orientation === ClapImageRatio.PORTRAIT
83
  ? '9:16'
84
  : '16:9'
85
 
packages/app/src/app/api/resolve/providers/replicate/index.ts CHANGED
@@ -1,6 +1,6 @@
1
  import Replicate from 'replicate'
2
 
3
- import { ClapMediaOrientation, ClapSegmentCategory } from '@aitube/clap'
4
  import { ResolveRequest } from '@aitube/clapper-services'
5
  import { TimelineSegment } from '@aitube/timeline'
6
  import { getWorkflowInputValues } from '../getWorkflowInputValues'
@@ -17,15 +17,15 @@ export async function resolveSegment(
17
 
18
  const segment = request.segment
19
 
20
- if (request.segment.category == ClapSegmentCategory.STORYBOARD) {
21
  const { workflowValues } = getWorkflowInputValues(
22
  request.settings.imageGenerationWorkflow
23
  )
24
 
25
  const aspectRatio =
26
- request.meta.orientation === ClapMediaOrientation.SQUARE
27
  ? '1:1'
28
- : request.meta.orientation === ClapMediaOrientation.PORTRAIT
29
  ? '9:16'
30
  : '16:9'
31
 
 
1
  import Replicate from 'replicate'
2
 
3
+ import { ClapImageRatio, ClapSegmentCategory } from '@aitube/clap'
4
  import { ResolveRequest } from '@aitube/clapper-services'
5
  import { TimelineSegment } from '@aitube/timeline'
6
  import { getWorkflowInputValues } from '../getWorkflowInputValues'
 
17
 
18
  const segment = request.segment
19
 
20
+ if (request.segment.category == ClapSegmentCategory.IMAGE) {
21
  const { workflowValues } = getWorkflowInputValues(
22
  request.settings.imageGenerationWorkflow
23
  )
24
 
25
  const aspectRatio =
26
+ request.meta.orientation === ClapImageRatio.SQUARE
27
  ? '1:1'
28
+ : request.meta.orientation === ClapImageRatio.PORTRAIT
29
  ? '9:16'
30
  : '16:9'
31
 
packages/app/src/app/api/resolve/providers/replicate/runFaceSwap.ts CHANGED
@@ -14,7 +14,7 @@ export async function runFaceSwap(
14
 
15
  const segment: TimelineSegment = request.segment
16
 
17
- if (segment.category === ClapSegmentCategory.STORYBOARD) {
18
  const imageFaceswapWorkflowModel =
19
  request.settings.imageFaceswapWorkflow.data || ''
20
 
 
14
 
15
  const segment: TimelineSegment = request.segment
16
 
17
+ if (segment.category === ClapSegmentCategory.IMAGE) {
18
  const imageFaceswapWorkflowModel =
19
  request.settings.imageFaceswapWorkflow.data || ''
20
 
packages/app/src/app/api/resolve/providers/stabilityai/generateImage.ts CHANGED
@@ -1,4 +1,4 @@
1
- import { ClapMediaOrientation } from '@aitube/clap'
2
 
3
  import { ResolveRequest, StabilityAiImageSize } from '@aitube/clapper-services'
4
 
@@ -22,9 +22,9 @@ export async function generateImage(request: ResolveRequest): Promise<string> {
22
  }
23
 
24
  const aspectRatio =
25
- request.meta.orientation === ClapMediaOrientation.SQUARE
26
  ? StabilityAiImageSize.SQUARE
27
- : request.meta.orientation === ClapMediaOrientation.PORTRAIT
28
  ? StabilityAiImageSize.PORTRAIT_9_16
29
  : StabilityAiImageSize.LANDSCAPE_16_9
30
 
 
1
+ import { ClapImageRatio } from '@aitube/clap'
2
 
3
  import { ResolveRequest, StabilityAiImageSize } from '@aitube/clapper-services'
4
 
 
22
  }
23
 
24
  const aspectRatio =
25
+ request.meta.orientation === ClapImageRatio.SQUARE
26
  ? StabilityAiImageSize.SQUARE
27
+ : request.meta.orientation === ClapImageRatio.PORTRAIT
28
  ? StabilityAiImageSize.PORTRAIT_9_16
29
  : StabilityAiImageSize.LANDSCAPE_16_9
30
 
packages/app/src/app/api/resolve/providers/stabilityai/index.ts CHANGED
@@ -13,7 +13,7 @@ export async function resolveSegment(
13
 
14
  const segment = request.segment
15
 
16
- if (request.segment.category === ClapSegmentCategory.STORYBOARD) {
17
  segment.assetUrl = await generateImage(request)
18
  } else if (request.segment.category === ClapSegmentCategory.VIDEO) {
19
  segment.assetUrl = await generateVideo(request)
 
13
 
14
  const segment = request.segment
15
 
16
+ if (request.segment.category === ClapSegmentCategory.IMAGE) {
17
  segment.assetUrl = await generateImage(request)
18
  } else if (request.segment.category === ClapSegmentCategory.VIDEO) {
19
  segment.assetUrl = await generateVideo(request)
packages/app/src/app/embed/EmbeddedPlayer.tsx CHANGED
@@ -11,7 +11,8 @@ export function EmbeddedPlayer() {
11
  const isPlaying = useMonitor((s) => s.isPlaying)
12
  const togglePlayback = useMonitor((s) => s.togglePlayback)
13
 
14
- const meta = useTimeline((s) => s.meta)
 
15
 
16
  const [isOverlayVisible, setOverlayVisible] = useState(true)
17
 
@@ -64,8 +65,8 @@ export function EmbeddedPlayer() {
64
  >
65
  <div
66
  className={cn(`flex h-full flex-row items-center`, {
67
- 'bg-yellow-500/100': meta.isInteractive,
68
- 'bg-red-500/100': meta.isLive,
69
  })}
70
  style={{
71
  width: '100%', // <-- TODO: compute the % of progression within the experience
@@ -90,7 +91,7 @@ export function EmbeddedPlayer() {
90
  onClick={togglePlayback}
91
  />
92
  <StaticOrInteractiveTag
93
- isInteractive={meta.isInteractive}
94
  size="md"
95
  className=""
96
  />
 
11
  const isPlaying = useMonitor((s) => s.isPlaying)
12
  const togglePlayback = useMonitor((s) => s.togglePlayback)
13
 
14
+ const isInteractive = useTimeline((s) => s.isInteractive)
15
+ const isLive = useTimeline((s) => s.isLive)
16
 
17
  const [isOverlayVisible, setOverlayVisible] = useState(true)
18
 
 
65
  >
66
  <div
67
  className={cn(`flex h-full flex-row items-center`, {
68
+ 'bg-yellow-500/100': isInteractive,
69
+ 'bg-red-500/100': isLive,
70
  })}
71
  style={{
72
  width: '100%', // <-- TODO: compute the % of progression within the experience
 
91
  onClick={togglePlayback}
92
  />
93
  <StaticOrInteractiveTag
94
+ isInteractive={isInteractive}
95
  size="md"
96
  className=""
97
  />
packages/app/src/components/editors/FilterEditor/FilterViewer/index.tsx CHANGED
@@ -14,7 +14,7 @@ import { useEffect, useState } from 'react'
14
  // TODO: move this to the renderer service
15
  // also since filters use WebGPU, I think one day we can run them in real-time
16
  // over the video as well (or maybe using WebGL)
17
- function useCurrentlyVisibleStoryboard(): string | undefined {
18
  const { activeStoryboardSegment } = useRenderer((s) => s.bufferedSegments)
19
 
20
  // can't return something if there is nothing
@@ -25,7 +25,7 @@ function useCurrentlyVisibleStoryboard(): string | undefined {
25
  return activeStoryboardSegment.assetUrl
26
  }
27
 
28
- function useFilteredStoryboard(input?: string): string | undefined {
29
  const current = useFilterEditor((s) => s.current)
30
  const runFilterPipeline = useFilterEditor((s) => s.runFilterPipeline)
31
  const [result, setResult] = useState('')
@@ -65,8 +65,8 @@ export function FilterViewer() {
65
  const undo = useFilterEditor((s) => s.undo)
66
  const redo = useFilterEditor((s) => s.redo)
67
 
68
- const input = useCurrentlyVisibleStoryboard()
69
- const output = useFilteredStoryboard(input)
70
 
71
  const hasBetaAccess = useUI((s) => s.hasBetaAccess)
72
 
 
14
  // TODO: move this to the renderer service
15
  // also since filters use WebGPU, I think one day we can run them in real-time
16
  // over the video as well (or maybe using WebGL)
17
+ function useVisibleStoryboardImages(): string | undefined {
18
  const { activeStoryboardSegment } = useRenderer((s) => s.bufferedSegments)
19
 
20
  // can't return something if there is nothing
 
25
  return activeStoryboardSegment.assetUrl
26
  }
27
 
28
+ function useFilteredStoryboardImages(input?: string): string | undefined {
29
  const current = useFilterEditor((s) => s.current)
30
  const runFilterPipeline = useFilterEditor((s) => s.runFilterPipeline)
31
  const [result, setResult] = useState('')
 
65
  const undo = useFilterEditor((s) => s.undo)
66
  const redo = useFilterEditor((s) => s.redo)
67
 
68
+ const input = useVisibleStoryboardImages()
69
+ const output = useFilteredStoryboardImages(input)
70
 
71
  const hasBetaAccess = useUI((s) => s.hasBetaAccess)
72
 
packages/app/src/components/editors/ProjectEditor/index.tsx CHANGED
@@ -35,7 +35,7 @@ export function ProjectEditor() {
35
  return (
36
  <FormSection label={'Project Settings'} className="p-4">
37
  <FormInput<string>
38
- label={'title'}
39
  value={current.title || ''}
40
  defaultValue=""
41
  onChange={(title) => {
@@ -76,18 +76,32 @@ export function ProjectEditor() {
76
  minValue={256}
77
  maxValue={1024}
78
  />
79
- {/*
80
- for this one we will need some kind of draft mode
81
- */}
 
 
 
 
 
 
 
 
 
 
 
82
  <FormInput<string>
83
- label={'Global prompt keywords ("3D render, comical"..)'}
84
- value={
85
- Array.isArray(current.extraPositivePrompt)
86
- ? current.extraPositivePrompt.join(', ')
87
- : ''
88
- }
89
- onChange={(newKeywords) => {
90
- // const keywords = newKeywords.split(",").map(x => x.trim())
 
 
 
91
  }}
92
  />
93
  <FormInput<string>
 
35
  return (
36
  <FormSection label={'Project Settings'} className="p-4">
37
  <FormInput<string>
38
+ label={'Title'}
39
  value={current.title || ''}
40
  defaultValue=""
41
  onChange={(title) => {
 
76
  minValue={256}
77
  maxValue={1024}
78
  />
79
+ <FormInput<number>
80
+ label={'BPM (Beats Per Minute) (WIP)'}
81
+ value={current.bpm || 110}
82
+ defaultValue={110}
83
+ minValue={1}
84
+ maxValue={500}
85
+ />
86
+ <FormInput<number>
87
+ label={'Frame rate (WIP)'}
88
+ value={current.frameRate || 24}
89
+ defaultValue={24}
90
+ minValue={1}
91
+ maxValue={1000}
92
+ />
93
  <FormInput<string>
94
+ label={'Global image/video prompt ("3D render, 1970 style..")'}
95
+ value={current.imagePrompt || ''}
96
+ onChange={(imagePrompt) => {
97
+ setCurrent({ ...current, imagePrompt })
98
+ }}
99
+ />
100
+ <FormInput<string>
101
+ label={'Global assistant prompt ("don\'t use swear words..")'}
102
+ value={current.systemPrompt || ''}
103
+ onChange={(systemPrompt) => {
104
+ setCurrent({ ...current, systemPrompt })
105
  }}
106
  />
107
  <FormInput<string>
packages/app/src/components/monitor/DynamicPlayer/StoryboardBuffer.tsx CHANGED
@@ -30,7 +30,7 @@ export function StoryboardBuffer({
30
  className
31
  )}
32
  src={src}
33
- alt="storyboard"
34
  />
35
  )
36
  }
 
30
  className
31
  )}
32
  src={src}
33
+ alt="storyboard image"
34
  />
35
  )
36
  }
packages/app/src/components/monitor/PlayerControls/index.tsx CHANGED
@@ -26,7 +26,7 @@ export function PlayerControls({ className }: { className?: string }) {
26
  const jumpAt = useMonitor((s) => s.jumpAt)
27
 
28
  const cursorTimestampAtInMs = useTimeline((s) => s.cursorTimestampAtInMs)
29
- const totalDurationInMs = useTimeline((s) => s.totalDurationInMs)
30
 
31
  const handleAccelerate = () => {}
32
 
@@ -77,7 +77,7 @@ export function PlayerControls({ className }: { className?: string }) {
77
  />
78
  </div>
79
  <Counter
80
- valueInMs={totalDurationInMs}
81
  color={theme.monitorSecondaryTextColor || theme.defaultTextColor || ''}
82
  />
83
  </div>
 
26
  const jumpAt = useMonitor((s) => s.jumpAt)
27
 
28
  const cursorTimestampAtInMs = useTimeline((s) => s.cursorTimestampAtInMs)
29
+ const durationInMs = useTimeline((s) => s.durationInMs)
30
 
31
  const handleAccelerate = () => {}
32
 
 
77
  />
78
  </div>
79
  <Counter
80
+ valueInMs={durationInMs}
81
  color={theme.monitorSecondaryTextColor || theme.defaultTextColor || ''}
82
  />
83
  </div>
packages/app/src/components/toolbars/editors-menu/EditorsSideMenu.tsx CHANGED
@@ -28,12 +28,7 @@ export function EditorsSideMenu() {
28
  const theme = useTheme()
29
  return (
30
  <div
31
- className="
32
- w-10 md:w-11 lg:w-12 xl:w-13 flex
33
- h-full
34
- flex-col items-center justify-between
35
- border-r
36
- transition-all duration-200 ease-in-out"
37
  style={{
38
  backgroundColor:
39
  theme.editorMenuBgColor || theme.defaultBgColor || '#eeeeee',
@@ -41,10 +36,7 @@ export function EditorsSideMenu() {
41
  theme.editorBorderColor || theme.defaultBorderColor || '#eeeeee',
42
  }}
43
  >
44
- <div className="
45
- flex h-full w-full flex-col items-center
46
- transition-all duration-200 ease-in-out
47
- ">
48
  <EditorsSideMenuItem view={EditorView.PROJECT} label="Project settings">
49
  <MdMovieEdit />
50
  </EditorsSideMenuItem>
 
28
  const theme = useTheme()
29
  return (
30
  <div
31
+ className="flex h-full w-10 flex-col items-center justify-between border-r transition-all duration-200 ease-in-out md:w-11 lg:w-12 xl:w-13"
 
 
 
 
 
32
  style={{
33
  backgroundColor:
34
  theme.editorMenuBgColor || theme.defaultBgColor || '#eeeeee',
 
36
  theme.editorBorderColor || theme.defaultBorderColor || '#eeeeee',
37
  }}
38
  >
39
+ <div className="flex h-full w-full flex-col items-center transition-all duration-200 ease-in-out">
 
 
 
40
  <EditorsSideMenuItem view={EditorView.PROJECT} label="Project settings">
41
  <MdMovieEdit />
42
  </EditorsSideMenuItem>
packages/app/src/components/toolbars/editors-menu/EditorsSideMenuItem.tsx CHANGED
@@ -54,15 +54,14 @@ export function EditorsSideMenuItem({
54
 
55
  return (
56
  <Tooltip delayDuration={0}>
57
- <TooltipTrigger asChild disabled={!tooltipLabel}
58
- className="
59
- h-8 md:h-9 xl:h-11
60
- w-full
61
- transition-all duration-200 ease-in-out
62
- ">
63
  <div
64
  className={cn(
65
- `flex h-8 md:h-9 xl:h-11 w-full flex-col`,
66
  `transition-all duration-150 ease-out`,
67
  `items-center justify-center`,
68
  unmanaged || isActive ? '' : `cursor-pointer`,
 
54
 
55
  return (
56
  <Tooltip delayDuration={0}>
57
+ <TooltipTrigger
58
+ asChild
59
+ disabled={!tooltipLabel}
60
+ className="h-8 w-full transition-all duration-200 ease-in-out md:h-9 xl:h-11"
61
+ >
 
62
  <div
63
  className={cn(
64
+ `flex h-8 w-full flex-col md:h-9 xl:h-11`,
65
  `transition-all duration-150 ease-out`,
66
  `items-center justify-center`,
67
  unmanaged || isActive ? '' : `cursor-pointer`,
packages/app/src/components/toolbars/top-menu/IsBusy/index.tsx CHANGED
@@ -6,7 +6,7 @@ export function IsBusy({ nbPendingTasks = 0 }: { nbPendingTasks?: number }) {
6
  return (
7
  <GoDotFill
8
  className={cn(
9
- `-mt-[9px] ml-[1px] -mr-1.5 h-1.5 w-1.5 text-yellow-400`,
10
  nbPendingTasks > 0 ? 'animate-pulse opacity-100' : 'opacity-0'
11
  )}
12
  />
 
6
  return (
7
  <GoDotFill
8
  className={cn(
9
+ `-mr-1.5 -mt-[9px] ml-[1px] h-1.5 w-1.5 text-yellow-400`,
10
  nbPendingTasks > 0 ? 'animate-pulse opacity-100' : 'opacity-0'
11
  )}
12
  />
packages/app/src/components/toolbars/top-menu/TopMenuLogo/index.tsx CHANGED
@@ -83,7 +83,7 @@ export function TopMenuLogo() {
83
  <BiSolidMoviePlay style={{ color }} />
84
  )}
85
  <span
86
- className="hidden md:inline ld:text-md scale-[88%] pr-0.5 font-bold tracking-[-0.03em] md:pr-1 lg:pr-2"
87
  style={{ color }}
88
  >
89
  Clapper
 
83
  <BiSolidMoviePlay style={{ color }} />
84
  )}
85
  <span
86
+ className="ld:text-md hidden scale-[88%] pr-0.5 font-bold tracking-[-0.03em] md:inline md:pr-1 lg:pr-2"
87
  style={{ color }}
88
  >
89
  Clapper
packages/app/src/components/toolbars/top-menu/assistant/index.tsx CHANGED
@@ -18,7 +18,7 @@ import { useAutocomplete } from '@/services/autocomplete/useAutocomplete'
18
 
19
  export function TopMenuAssistant() {
20
  const setShowSettings = useUI((s) => s.setShowSettings)
21
- const storyboardsToStory = useAutocomplete((s) => s.storyboardsToStory)
22
 
23
  const hasBetaAccess = useUI((s) => s.hasBetaAccess)
24
 
@@ -41,7 +41,7 @@ export function TopMenuAssistant() {
41
  <MenubarSeparator />
42
  <MenubarItem
43
  onClick={() => {
44
- storyboardsToStory()
45
  }}
46
  >
47
  Storyboards-to-captions (beta, client-side AI)
 
18
 
19
  export function TopMenuAssistant() {
20
  const setShowSettings = useUI((s) => s.setShowSettings)
21
+ const convertImagesToStory = useAutocomplete((s) => s.convertImagesToStory)
22
 
23
  const hasBetaAccess = useUI((s) => s.hasBetaAccess)
24
 
 
41
  <MenubarSeparator />
42
  <MenubarItem
43
  onClick={() => {
44
+ convertImagesToStory()
45
  }}
46
  >
47
  Storyboards-to-captions (beta, client-side AI)
packages/app/src/components/ui/menubar.tsx CHANGED
@@ -38,7 +38,7 @@ const MenubarTrigger = React.forwardRef<
38
  <MenubarPrimitive.Trigger
39
  ref={ref}
40
  className={cn(
41
- 'flex cursor-default select-none items-center rounded-sm px-1.5 lg:px-2 py-1.5 text-xs lg:text-sm font-normal outline-none data-[state=open]:bg-neutral-100 data-[state=open]:text-neutral-900 focus:bg-neutral-100 focus:text-neutral-900 dark:text-neutral-400 dark:data-[state=open]:bg-neutral-800 dark:data-[state=open]:text-neutral-300 dark:focus:bg-neutral-800 dark:focus:text-neutral-300',
42
  className
43
  )}
44
  {...props}
 
38
  <MenubarPrimitive.Trigger
39
  ref={ref}
40
  className={cn(
41
+ 'flex cursor-default select-none items-center rounded-sm px-1.5 py-1.5 text-xs font-normal outline-none data-[state=open]:bg-neutral-100 data-[state=open]:text-neutral-900 focus:bg-neutral-100 focus:text-neutral-900 dark:text-neutral-400 dark:data-[state=open]:bg-neutral-800 dark:data-[state=open]:text-neutral-300 dark:focus:bg-neutral-800 dark:focus:text-neutral-300 lg:px-2 lg:text-sm',
42
  className
43
  )}
44
  {...props}
packages/app/src/lib/utils/decodeOutput.ts CHANGED
@@ -25,7 +25,7 @@ export async function decodeOutput(input: any): Promise<string> {
25
  // which is a unreasonable since a few frames quickly add up to 10 Mb,
26
  // we can't afford to have a 20 Gb .clap file
27
  //
28
- // if you really want to have a pro, Hollywood-grade storyboard storage,
29
  // this isn't impossible but then you need to use either file paths or remote URL paths
30
  // and if you want some lossless like this, we should add a parameter to support that
31
  const jpegImageAsBase64 = await convertToJpeg(base64Url)
 
25
  // which is a unreasonable since a few frames quickly add up to 10 Mb,
26
  // we can't afford to have a 20 Gb .clap file
27
  //
28
+ // if you really want to have a pro, Hollywood-grade storyboard image storage,
29
  // this isn't impossible but then you need to use either file paths or remote URL paths
30
  // and if you want some lossless like this, we should add a parameter to support that
31
  const jpegImageAsBase64 = await convertToJpeg(base64Url)
packages/app/src/lib/utils/formatSegmentForExport.ts CHANGED
@@ -66,7 +66,7 @@ export function formatSegmentForExport(
66
 
67
  const isExportableToFile =
68
  (segment.category === ClapSegmentCategory.VIDEO ||
69
- segment.category === ClapSegmentCategory.STORYBOARD ||
70
  segment.category === ClapSegmentCategory.DIALOGUE ||
71
  segment.category === ClapSegmentCategory.SOUND ||
72
  segment.category === ClapSegmentCategory.MUSIC) &&
 
66
 
67
  const isExportableToFile =
68
  (segment.category === ClapSegmentCategory.VIDEO ||
69
+ segment.category === ClapSegmentCategory.IMAGE ||
70
  segment.category === ClapSegmentCategory.DIALOGUE ||
71
  segment.category === ClapSegmentCategory.SOUND ||
72
  segment.category === ClapSegmentCategory.MUSIC) &&
packages/app/src/services/api/resolve.ts CHANGED
@@ -13,6 +13,7 @@ import {
13
  } from '@aitube/clapper-services'
14
  import { useSettings } from '../settings'
15
  import {
 
16
  ClapSegmentCategory,
17
  ClapWorkflowEngine,
18
  ClapWorkflowProvider,
@@ -24,12 +25,14 @@ export async function resolve(
24
  req: Partial<ResolveRequest>
25
  ): Promise<TimelineSegment> {
26
  const { getRequestSettings }: SettingsStore = useSettings.getState()
27
- const { meta }: TimelineStore = useTimeline.getState()
 
 
28
 
29
  const defaultTimelineSegment: TimelineSegment =
30
  await clapSegmentToTimelineSegment(
31
  newSegment({
32
- category: ClapSegmentCategory.STORYBOARD,
33
  })
34
  )
35
 
@@ -67,6 +70,10 @@ export async function resolve(
67
  : [],
68
  mainCharacterId: req.mainCharacterId || undefined,
69
  mainCharacterEntity: req.mainCharacterEntity || undefined,
 
 
 
 
70
  meta,
71
  prompts: getDefaultResolveRequestPrompts(req.prompts),
72
  }
 
13
  } from '@aitube/clapper-services'
14
  import { useSettings } from '../settings'
15
  import {
16
+ ClapMeta,
17
  ClapSegmentCategory,
18
  ClapWorkflowEngine,
19
  ClapWorkflowProvider,
 
25
  req: Partial<ResolveRequest>
26
  ): Promise<TimelineSegment> {
27
  const { getRequestSettings }: SettingsStore = useSettings.getState()
28
+ const timeline: TimelineStore = useTimeline.getState()
29
+
30
+ const meta = timeline.getClapMeta()
31
 
32
  const defaultTimelineSegment: TimelineSegment =
33
  await clapSegmentToTimelineSegment(
34
  newSegment({
35
+ category: ClapSegmentCategory.IMAGE,
36
  })
37
  )
38
 
 
70
  : [],
71
  mainCharacterId: req.mainCharacterId || undefined,
72
  mainCharacterEntity: req.mainCharacterEntity || undefined,
73
+
74
+ // jbilcke-hf: I don't think we need all of those fields
75
+ // for our request, especially since some are a bit large
76
+ // and probably slow-down all our requests eg. the story prompt, thumbnail..
77
  meta,
78
  prompts: getDefaultResolveRequestPrompts(req.prompts),
79
  }
packages/app/src/services/assistant/updateStoryAndScene.ts CHANGED
@@ -150,7 +150,7 @@ export async function updateStoryAndScene({
150
  }
151
  } else {
152
  // if the LLM tries to add a new camera, we must
153
- // also create corresponding video and storyboards segments as well
154
  if (segment.category === ClapSegmentCategory.CAMERA) {
155
  segmentsToAdd.push(
156
  await clapSegmentToTimelineSegment(
@@ -167,7 +167,7 @@ export async function updateStoryAndScene({
167
  newSegment({
168
  ...segmentProperties,
169
  prompt: 'movie still',
170
- category: ClapSegmentCategory.STORYBOARD,
171
  outputType: ClapOutputType.IMAGE,
172
  })
173
  )
 
150
  }
151
  } else {
152
  // if the LLM tries to add a new camera, we must
153
+ // also create corresponding video and storyboard image segments as well
154
  if (segment.category === ClapSegmentCategory.CAMERA) {
155
  segmentsToAdd.push(
156
  await clapSegmentToTimelineSegment(
 
167
  newSegment({
168
  ...segmentProperties,
169
  prompt: 'movie still',
170
+ category: ClapSegmentCategory.IMAGE,
171
  outputType: ClapOutputType.IMAGE,
172
  })
173
  )
packages/app/src/services/assistant/useAssistant.ts CHANGED
@@ -187,7 +187,7 @@ export const useAssistant = create<AssistantStore>((set, get) => ({
187
  bufferedSegments: { activeSegments },
188
  } = useRenderer.getState()
189
  const timeline: TimelineStore = useTimeline.getState()
190
- const { meta, scenes, addSegment, entityIndex } = timeline
191
 
192
  // note: here `settings` is not the store's state itself (with methods etc)
193
  // but a snapshot of the serializable state values only
@@ -269,7 +269,7 @@ export const useAssistant = create<AssistantStore>((set, get) => ({
269
  fullScene: scene?.sequenceFullText || '',
270
  actionLine: scene?.line || '',
271
  entities: entityIndex,
272
- projectInfo: meta.description,
273
  history: get().history,
274
  }
275
 
 
187
  bufferedSegments: { activeSegments },
188
  } = useRenderer.getState()
189
  const timeline: TimelineStore = useTimeline.getState()
190
+ const { description, scenes, entityIndex } = timeline
191
 
192
  // note: here `settings` is not the store's state itself (with methods etc)
193
  // but a snapshot of the serializable state values only
 
269
  fullScene: scene?.sequenceFullText || '',
270
  actionLine: scene?.line || '',
271
  entities: entityIndex,
272
+ projectInfo: description,
273
  history: get().history,
274
  }
275
 
packages/app/src/services/autocomplete/types.ts CHANGED
@@ -12,7 +12,7 @@ export type AutocompleteControls = {
12
  * @param params
13
  * @returns
14
  */
15
- storyboardsToStory: (params?: {
16
  startTimeInMs?: number
17
  endTimeInMs?: number
18
  }) => Promise<void>
 
12
  * @param params
13
  * @returns
14
  */
15
+ convertImagesToStory: (params?: {
16
  startTimeInMs?: number
17
  endTimeInMs?: number
18
  }) => Promise<void>
packages/app/src/services/autocomplete/useAutocomplete.ts CHANGED
@@ -26,7 +26,7 @@ import { AutocompleteStore } from './types'
26
  export const useAutocomplete = create<AutocompleteStore>((set, get) => ({
27
  ...getDefaultAutocompleteState(),
28
 
29
- storyboardsToStory: async (
30
  params: {
31
  startTimeInMs?: number
32
  endTimeInMs?: number
@@ -46,7 +46,7 @@ export const useAutocomplete = create<AutocompleteStore>((set, get) => ({
46
  : 0
47
  const endTimeInMs = isValidNumber(params?.endTimeInMs)
48
  ? params?.endTimeInMs!
49
- : timeline.totalDurationInMs
50
 
51
  const range = { startTimeInMs, endTimeInMs }
52
 
@@ -56,33 +56,35 @@ export const useAutocomplete = create<AutocompleteStore>((set, get) => ({
56
 
57
  // since this is very long task, we can run it in the background
58
  visibility: TaskVisibility.BACKGROUND,
59
- initialMessage: `Analyzing storyboards..`,
60
- successMessage: `Analyzing storyboards.. 100% done`,
61
  value: 0,
62
  })
63
 
64
  set({ isRunning: true })
65
 
66
  try {
67
- const storyboards = filterSegments<TimelineSegment>(
68
  ClapSegmentFilteringMode.ANY,
69
  range,
70
  timeline.segments,
71
- ClapSegmentCategory.STORYBOARD
72
- ).filter((storyboard) => storyboard.assetUrl.startsWith('data:'))
 
 
73
 
74
  let i = 0
75
  let progress = 0
76
  // to keep things light and in the background, we use an async for loop
77
- for (const storyboard of storyboards) {
78
  const isStillRunning = get().isRunning
79
  if (!isStillRunning) {
80
  break
81
  }
82
 
83
  try {
84
- console.log(`analyzing storyboard:`, storyboard)
85
- const frames = [storyboard.assetUrl]
86
  const captions = await extractCaptionsFromFrames(
87
  frames,
88
  (
@@ -93,7 +95,7 @@ export const useAutocomplete = create<AutocompleteStore>((set, get) => ({
93
  // this will be counting from to 100%, for each call to extractCaptionsFromFrames()
94
  // so TODO @Julian: adjust this for the right calculation
95
  // task.setProgress({
96
- // message: `Analyzing storyboards (${progress}%)`,
97
  // value: progress,
98
  // })
99
  }
@@ -101,12 +103,12 @@ export const useAutocomplete = create<AutocompleteStore>((set, get) => ({
101
 
102
  i++
103
 
104
- const relativeProgress = i / storyboards.length
105
 
106
  progress += relativeProgress * 100
107
 
108
  task.setProgress({
109
- message: `Analyzing storyboards (${Math.round(progress)}%)`,
110
  value: progress,
111
  })
112
 
@@ -168,8 +170,8 @@ export const useAutocomplete = create<AutocompleteStore>((set, get) => ({
168
  category: ClapSegmentCategory.CAMERA,
169
  prompt: 'medium-shot',
170
  label: 'medium-shot',
171
- startTimeInMs: storyboard.startTimeInMs,
172
- endTimeInMs: storyboard.endTimeInMs,
173
  status: ClapSegmentStatus.COMPLETED,
174
  track: timeline.findFreeTrack({ startTimeInMs, endTimeInMs }), // track row index
175
  })
@@ -187,8 +189,8 @@ export const useAutocomplete = create<AutocompleteStore>((set, get) => ({
187
  category,
188
  prompt,
189
  label: prompt,
190
- startTimeInMs: storyboard.startTimeInMs,
191
- endTimeInMs: storyboard.endTimeInMs,
192
  status: ClapSegmentStatus.COMPLETED,
193
  track: timeline.findFreeTrack({ startTimeInMs, endTimeInMs }), // track row index
194
  })
@@ -199,13 +201,16 @@ export const useAutocomplete = create<AutocompleteStore>((set, get) => ({
199
 
200
  await timeline.addSegments({ segments })
201
  } catch (err) {
202
- console.error(`failed to analyze a storyboard:`, err)
203
  }
204
 
205
  // TODO: use a special prompt to get categorized captions
206
  }
207
  } catch (err) {
208
- console.error(`storyboardsToStory(): failed to analyze storyboards:`, err)
 
 
 
209
  } finally {
210
  task.success()
211
  set({ isRunning: false })
 
26
  export const useAutocomplete = create<AutocompleteStore>((set, get) => ({
27
  ...getDefaultAutocompleteState(),
28
 
29
+ convertImagesToStory: async (
30
  params: {
31
  startTimeInMs?: number
32
  endTimeInMs?: number
 
46
  : 0
47
  const endTimeInMs = isValidNumber(params?.endTimeInMs)
48
  ? params?.endTimeInMs!
49
+ : timeline.durationInMs
50
 
51
  const range = { startTimeInMs, endTimeInMs }
52
 
 
56
 
57
  // since this is very long task, we can run it in the background
58
  visibility: TaskVisibility.BACKGROUND,
59
+ initialMessage: `Analyzing images..`,
60
+ successMessage: `Analyzing images.. 100% done`,
61
  value: 0,
62
  })
63
 
64
  set({ isRunning: true })
65
 
66
  try {
67
+ const storyboardImages = filterSegments<TimelineSegment>(
68
  ClapSegmentFilteringMode.ANY,
69
  range,
70
  timeline.segments,
71
+ ClapSegmentCategory.IMAGE
72
+ ).filter((storyboardImage) =>
73
+ storyboardImage.assetUrl.startsWith('data:')
74
+ )
75
 
76
  let i = 0
77
  let progress = 0
78
  // to keep things light and in the background, we use an async for loop
79
+ for (const storyboardImage of storyboardImages) {
80
  const isStillRunning = get().isRunning
81
  if (!isStillRunning) {
82
  break
83
  }
84
 
85
  try {
86
+ console.log(`analyzing storyboard image:`, storyboardImage)
87
+ const frames = [storyboardImage.assetUrl]
88
  const captions = await extractCaptionsFromFrames(
89
  frames,
90
  (
 
95
  // this will be counting from to 100%, for each call to extractCaptionsFromFrames()
96
  // so TODO @Julian: adjust this for the right calculation
97
  // task.setProgress({
98
+ // message: `Analyzing storyboard images (${progress}%)`,
99
  // value: progress,
100
  // })
101
  }
 
103
 
104
  i++
105
 
106
+ const relativeProgress = i / storyboardImages.length
107
 
108
  progress += relativeProgress * 100
109
 
110
  task.setProgress({
111
+ message: `Analyzing images (${Math.round(progress)}%)`,
112
  value: progress,
113
  })
114
 
 
170
  category: ClapSegmentCategory.CAMERA,
171
  prompt: 'medium-shot',
172
  label: 'medium-shot',
173
+ startTimeInMs: storyboardImage.startTimeInMs,
174
+ endTimeInMs: storyboardImage.endTimeInMs,
175
  status: ClapSegmentStatus.COMPLETED,
176
  track: timeline.findFreeTrack({ startTimeInMs, endTimeInMs }), // track row index
177
  })
 
189
  category,
190
  prompt,
191
  label: prompt,
192
+ startTimeInMs: storyboardImage.startTimeInMs,
193
+ endTimeInMs: storyboardImage.endTimeInMs,
194
  status: ClapSegmentStatus.COMPLETED,
195
  track: timeline.findFreeTrack({ startTimeInMs, endTimeInMs }), // track row index
196
  })
 
201
 
202
  await timeline.addSegments({ segments })
203
  } catch (err) {
204
+ console.error(`failed to analyze a storyboard image:`, err)
205
  }
206
 
207
  // TODO: use a special prompt to get categorized captions
208
  }
209
  } catch (err) {
210
+ console.error(
211
+ `convertImagesToStory(): failed to analyze storyboard images:`,
212
+ err
213
+ )
214
  } finally {
215
  task.success()
216
  set({ isRunning: false })
packages/app/src/services/editors/project-editor/useProjectEditor.ts CHANGED
@@ -12,8 +12,9 @@ export const useProjectEditor = create<ProjectEditorStore>((set, get) => ({
12
  setCurrent: (current?: ClapMeta) => {
13
  const timeline: TimelineStore = useTimeline.getState()
14
  set({ current })
15
- if (current && timeline?.meta) {
16
- Object.assign(timeline.meta, current)
 
17
  }
18
  },
19
  undo: () => {},
 
12
  setCurrent: (current?: ClapMeta) => {
13
  const timeline: TimelineStore = useTimeline.getState()
14
  set({ current })
15
+ if (current && timeline) {
16
+ // note: we really need to be sure that current is valid here
17
+ Object.assign(timeline, current)
18
  }
19
  },
20
  undo: () => {},
packages/app/src/services/editors/script-editor/useScriptEditor.ts CHANGED
@@ -40,13 +40,13 @@ export const useScriptEditor = create<ScriptEditorStore>((set, get) => ({
40
  const { highlightElements, textModel } = get()
41
 
42
  set({
43
- current: clap.meta.screenplay,
44
- lastPublished: clap.meta.screenplay,
45
  })
46
 
47
  try {
48
  // we need to update the model
49
- textModel?.setValue(clap.meta.screenplay)
50
  } catch (err) {
51
  // to catch the "Error: Model is disposed!"
52
  // this can happen if the timing isn't right,
 
40
  const { highlightElements, textModel } = get()
41
 
42
  set({
43
+ current: clap.meta.storyPrompt,
44
+ lastPublished: clap.meta.storyPrompt,
45
  })
46
 
47
  try {
48
  // we need to update the model
49
+ textModel?.setValue(clap.meta.storyPrompt)
50
  } catch (err) {
51
  // to catch the "Error: Model is disposed!"
52
  // this can happen if the timing isn't right,
packages/app/src/services/editors/workflow-editor/getSegmentWorkflowProviderAndEngine.ts CHANGED
@@ -25,7 +25,7 @@ export function getSegmentWorkflowProviderAndEngine({
25
  lipsyncEngine?: ClapWorkflowEngine
26
  } {
27
  const generationWorkflow: ClapWorkflow | undefined =
28
- segment.category === ClapSegmentCategory.STORYBOARD
29
  ? settings.imageGenerationWorkflow
30
  : segment.category === ClapSegmentCategory.VIDEO
31
  ? settings.videoGenerationWorkflow
@@ -44,7 +44,7 @@ export function getSegmentWorkflowProviderAndEngine({
44
  generationWorkflow?.engine || undefined
45
 
46
  const faceswapWorkflow: ClapWorkflow | undefined =
47
- segment.category === ClapSegmentCategory.STORYBOARD
48
  ? settings.imageFaceswapWorkflow
49
  : segment.category === ClapSegmentCategory.VIDEO
50
  ? settings.videoFaceswapWorkflow
 
25
  lipsyncEngine?: ClapWorkflowEngine
26
  } {
27
  const generationWorkflow: ClapWorkflow | undefined =
28
+ segment.category === ClapSegmentCategory.IMAGE
29
  ? settings.imageGenerationWorkflow
30
  : segment.category === ClapSegmentCategory.VIDEO
31
  ? settings.videoGenerationWorkflow
 
44
  generationWorkflow?.engine || undefined
45
 
46
  const faceswapWorkflow: ClapWorkflow | undefined =
47
+ segment.category === ClapSegmentCategory.IMAGE
48
  ? settings.imageFaceswapWorkflow
49
  : segment.category === ClapSegmentCategory.VIDEO
50
  ? settings.videoFaceswapWorkflow
packages/app/src/services/editors/workflow-editor/workflows/comfyui/getComfyWorkflow.ts CHANGED
@@ -6,7 +6,7 @@ export function getComfyWorkflow(category: ClapSegmentCategory) {
6
 
7
  let comfyWorkflow
8
 
9
- if (category === ClapSegmentCategory.STORYBOARD) {
10
  comfyWorkflow = settings.comfyClapWorkflowForImage
11
  } else if (category === ClapSegmentCategory.VIDEO) {
12
  comfyWorkflow = settings.comfyClapWorkflowForVideo
 
6
 
7
  let comfyWorkflow
8
 
9
+ if (category === ClapSegmentCategory.IMAGE) {
10
  comfyWorkflow = settings.comfyClapWorkflowForImage
11
  } else if (category === ClapSegmentCategory.VIDEO) {
12
  comfyWorkflow = settings.comfyClapWorkflowForVideo
packages/app/src/services/io/ffmpegUtils.ts CHANGED
@@ -380,7 +380,7 @@ export async function createFullSilentVideo(
380
  continue
381
  }
382
 
383
- const inputFilename = `input_${index}_${UUID()}.${input.category === ClapSegmentCategory.STORYBOARD ? 'png' : 'mp4'}`
384
  await ffmpeg.writeFile(inputFilename, input.data)
385
 
386
  const segmentDuration = (input.endTimeInMs - input.startTimeInMs) / 1000
@@ -388,7 +388,7 @@ export async function createFullSilentVideo(
388
 
389
  let outputFilename = `output_${index}_${UUID()}.mp4`
390
 
391
- if (input.category === ClapSegmentCategory.STORYBOARD) {
392
  // Handle image input
393
  console.log(`${TAG}: Processing image input`)
394
  const ffmpegCommand = [
 
380
  continue
381
  }
382
 
383
+ const inputFilename = `input_${index}_${UUID()}.${input.category === ClapSegmentCategory.IMAGE ? 'png' : 'mp4'}`
384
  await ffmpeg.writeFile(inputFilename, input.data)
385
 
386
  const segmentDuration = (input.endTimeInMs - input.startTimeInMs) / 1000
 
388
 
389
  let outputFilename = `output_${index}_${UUID()}.mp4`
390
 
391
+ if (input.category === ClapSegmentCategory.IMAGE) {
392
  // Handle image input
393
  console.log(`${TAG}: Processing image input`)
394
  const ffmpegCommand = [
packages/app/src/services/io/formats/edl.ts CHANGED
@@ -1,16 +1,5 @@
1
- import {
2
- ClapAssetSource,
3
- ClapProject,
4
- ClapSegment,
5
- ClapSegmentCategory,
6
- UUID,
7
- } from '@aitube/clap'
8
- import {
9
- TimelineSegment,
10
- timelineSegmentToClapSegment,
11
- TimelineStore,
12
- useTimeline,
13
- } from '@aitube/timeline'
14
  import {
15
  ExportableSegment,
16
  formatSegmentForExport,
@@ -28,12 +17,7 @@ const secondsToTimecode = (seconds: number, fps: number): string => {
28
 
29
  export async function generateEDL(): Promise<string> {
30
  const timeline: TimelineStore = useTimeline.getState()
31
- const {
32
- meta,
33
- getClap,
34
- totalDurationInMs,
35
- segments: timelineSegments,
36
- } = timeline
37
 
38
  const clap: ClapProject | null = await getClap()
39
  if (!clap) {
@@ -51,7 +35,7 @@ export async function generateEDL(): Promise<string> {
51
  return folderName.slice(0, 7) // Limit to 7 characters for EDL compatibility
52
  }
53
 
54
- let edlContent = `TITLE: ${meta.title}\nFCM: NON-DROP FRAME\n\n`
55
  let timelineInPoint = 0
56
 
57
  const exportableSegments: ExportableSegment[] = timelineSegments
 
1
+ import { ClapAssetSource, ClapProject } from '@aitube/clap'
2
+ import { TimelineStore, useTimeline } from '@aitube/timeline'
 
 
 
 
 
 
 
 
 
 
 
3
  import {
4
  ExportableSegment,
5
  formatSegmentForExport,
 
17
 
18
  export async function generateEDL(): Promise<string> {
19
  const timeline: TimelineStore = useTimeline.getState()
20
+ const { title, getClap, segments: timelineSegments } = timeline
 
 
 
 
 
21
 
22
  const clap: ClapProject | null = await getClap()
23
  if (!clap) {
 
35
  return folderName.slice(0, 7) // Limit to 7 characters for EDL compatibility
36
  }
37
 
38
+ let edlContent = `TITLE: ${title}\nFCM: NON-DROP FRAME\n\n`
39
  let timelineInPoint = 0
40
 
41
  const exportableSegments: ExportableSegment[] = timelineSegments
packages/app/src/services/io/formats/fcp.ts CHANGED
@@ -1,16 +1,5 @@
1
- import {
2
- ClapAssetSource,
3
- ClapProject,
4
- ClapSegment,
5
- ClapSegmentCategory,
6
- UUID,
7
- } from '@aitube/clap'
8
- import {
9
- TimelineSegment,
10
- timelineSegmentToClapSegment,
11
- TimelineStore,
12
- useTimeline,
13
- } from '@aitube/timeline'
14
  import {
15
  ExportableSegment,
16
  formatSegmentForExport,
@@ -18,14 +7,9 @@ import {
18
 
19
  export async function generateFCP(): Promise<string> {
20
  const timeline: TimelineStore = useTimeline.getState()
21
- const {
22
- meta,
23
- getClap,
24
- totalDurationInMs,
25
- segments: timelineSegments,
26
- } = timeline
27
 
28
- const DEFAULT_FRAME_RATE = 30
29
 
30
  const formatFCPTime = (
31
  timeInMs: number,
@@ -41,12 +25,12 @@ export async function generateFCP(): Promise<string> {
41
  }
42
 
43
  const createAssetFormat = (id: string): string => {
44
- return /* XML */ `<format id="${id}" name="FFVideoFormat${meta.height}p${DEFAULT_FRAME_RATE}" frameDuration="${formatFCPTime(1000 / DEFAULT_FRAME_RATE)}" width="${meta.width}" height="${meta.height}"/>`
45
  }
46
 
47
  const resources: string[] = []
48
  const assetClips: string[] = []
49
- const formatId = `r${meta.width}x${meta.height}`
50
 
51
  resources.push(createAssetFormat(formatId))
52
 
@@ -89,8 +73,8 @@ export async function generateFCP(): Promise<string> {
89
  ${resources.join('\n')}
90
  </resources>
91
  <library>
92
- <event name="${meta.title}">
93
- <project name="${meta.title}">
94
  <sequence format="${formatId}" tcStart="0s" tcFormat="NDF" audioLayout="stereo" audioRate="48k">
95
  <spine>
96
  ${assetClips.join('\n')}
@@ -104,14 +88,20 @@ export async function generateFCP(): Promise<string> {
104
 
105
  export async function generateFCP7XML(): Promise<string> {
106
  const timeline: TimelineStore = useTimeline.getState()
107
- const { meta, segments: timelineSegments } = timeline
 
 
 
 
 
 
108
 
109
  let xmlContent = `<?xml version="1.0" encoding="UTF-8"?>
110
  <!DOCTYPE xmeml>
111
  <xmeml version="5">
112
  <sequence>
113
- <name>${meta.title}</name>
114
- <duration>${meta.durationInMs / 1000}</duration>
115
  <rate>
116
  <timebase>30</timebase>
117
  <ntsc>FALSE</ntsc>
@@ -120,8 +110,8 @@ export async function generateFCP7XML(): Promise<string> {
120
  <video>
121
  <format>
122
  <samplecharacteristics>
123
- <width>${meta.width}</width>
124
- <height>${meta.height}</height>
125
  </samplecharacteristics>
126
  </format>
127
  <track>
 
1
+ import { ClapProject, UUID } from '@aitube/clap'
2
+ import { TimelineStore, useTimeline } from '@aitube/timeline'
 
 
 
 
 
 
 
 
 
 
 
3
  import {
4
  ExportableSegment,
5
  formatSegmentForExport,
 
7
 
8
  export async function generateFCP(): Promise<string> {
9
  const timeline: TimelineStore = useTimeline.getState()
10
+ const { title, width, height, getClap, segments: timelineSegments } = timeline
 
 
 
 
 
11
 
12
+ const DEFAULT_FRAME_RATE = 24
13
 
14
  const formatFCPTime = (
15
  timeInMs: number,
 
25
  }
26
 
27
  const createAssetFormat = (id: string): string => {
28
+ return /* XML */ `<format id="${id}" name="FFVideoFormat${height}p${DEFAULT_FRAME_RATE}" frameDuration="${formatFCPTime(1000 / DEFAULT_FRAME_RATE)}" width="${width}" height="${height}"/>`
29
  }
30
 
31
  const resources: string[] = []
32
  const assetClips: string[] = []
33
+ const formatId = `r${width}x${height}`
34
 
35
  resources.push(createAssetFormat(formatId))
36
 
 
73
  ${resources.join('\n')}
74
  </resources>
75
  <library>
76
+ <event name="${title}">
77
+ <project name="${title}">
78
  <sequence format="${formatId}" tcStart="0s" tcFormat="NDF" audioLayout="stereo" audioRate="48k">
79
  <spine>
80
  ${assetClips.join('\n')}
 
88
 
89
  export async function generateFCP7XML(): Promise<string> {
90
  const timeline: TimelineStore = useTimeline.getState()
91
+ const {
92
+ title,
93
+ durationInMs,
94
+ width,
95
+ height,
96
+ segments: timelineSegments,
97
+ } = timeline
98
 
99
  let xmlContent = `<?xml version="1.0" encoding="UTF-8"?>
100
  <!DOCTYPE xmeml>
101
  <xmeml version="5">
102
  <sequence>
103
+ <name>${title}</name>
104
+ <duration>${durationInMs / 1000}</duration>
105
  <rate>
106
  <timebase>30</timebase>
107
  <ntsc>FALSE</ntsc>
 
110
  <video>
111
  <format>
112
  <samplecharacteristics>
113
+ <width>${width}</width>
114
+ <height>${height}</height>
115
  </samplecharacteristics>
116
  </format>
117
  <track>
packages/app/src/services/io/formats/mlt.ts CHANGED
@@ -8,7 +8,13 @@ import { TimelineStore, useTimeline } from '@aitube/timeline'
8
 
9
  export async function generateMLT(): Promise<string> {
10
  const timeline: TimelineStore = useTimeline.getState()
11
- const { meta, segments: timelineSegments } = timeline
 
 
 
 
 
 
12
 
13
  const segments: ExportableSegment[] = timelineSegments
14
  .map((segment, i) => formatSegmentForExport(segment, i))
@@ -19,7 +25,7 @@ export async function generateMLT(): Promise<string> {
19
  )
20
 
21
  const storyboards: ExportableSegment[] = segments.filter(
22
- ({ segment }) => segment.category === ClapSegmentCategory.STORYBOARD
23
  )
24
 
25
  const dialogues: ExportableSegment[] = segments.filter(
@@ -36,11 +42,11 @@ export async function generateMLT(): Promise<string> {
36
 
37
  // want to see some colors? install es6-string-html in your VSCode
38
  return /* XML */ `<?xml version="1.0" standalone="no"?>
39
- <mlt LC_NUMERIC="C" version="7.24.0" title="${meta.title}" producer="main_bin">
40
  <profile
41
- description="${meta.width}:${meta.height}"
42
- width="${meta.width}"
43
- height="${meta.height}"
44
  progressive="0"
45
  sample_aspect_num="1"
46
  sample_aspect_den="1"
@@ -58,8 +64,8 @@ colorspace="709"
58
  <playlist id="main_bin">
59
  <property name="xml_retain">1</property>
60
  </playlist>
61
- <producer id="black" in="00:00:00.000" out="${formatDuration(meta.durationInMs)}">
62
- <property name="length">${formatDuration(meta.durationInMs)}</property>
63
  <property name="eof">pause</property>
64
  <property name="resource">0</property>
65
  <property name="aspect_ratio">1</property>
@@ -68,7 +74,7 @@ colorspace="709"
68
  <property name="set.test_audio">0</property>
69
  </producer>
70
  <playlist id="background">
71
- <entry producer="black" in="00:00:00.000" out="${formatDuration(meta.durationInMs)}" />
72
  </playlist>
73
  ${segments
74
  .map(
@@ -76,8 +82,8 @@ ${segments
76
  <producer
77
  id="${shortId}"
78
  in="${formatDuration(0)}"
79
- out="${formatDuration(meta.durationInMs)}">
80
- <property name="length">${formatDuration(meta.durationInMs)}</property>
81
  <property name="eof">pause</property>
82
  <property name="resource">${filePath}</property>
83
  <property name="ttl">1</property>
@@ -85,8 +91,8 @@ out="${formatDuration(meta.durationInMs)}">
85
  <property name="meta.media.progressive">1</property>
86
  <property name="seekable">1</property>
87
  <property name="format">1</property>
88
- <property name="meta.media.width">${meta.width}</property>
89
- <property name="meta.media.height">${meta.height}</property>
90
  <property name="mlt_service">qimage</property>
91
  <property name="creation_time">${
92
  segment.createdAt || new Date().toISOString()
@@ -140,8 +146,8 @@ ${storyboards
140
  </playlist>
141
  ${[...dialogues, ...sounds, ...music].map(
142
  ({ segment, filePath, fileName, shortId }) => /* XML */ `
143
- <chain id="${shortId}" out="${formatDuration(meta.durationInMs)}">
144
- <property name="length">${formatDuration(meta.durationInMs)}</property>
145
  <property name="eof">pause</property>
146
  <property name="resource">${filePath}</property>
147
  <property name="mlt_service">avformat-novalidate</property>
@@ -220,7 +226,7 @@ ${music.map(
220
  id="tractor0"
221
  title="Shotcut version 24.04.28"
222
  in="00:00:00.000"
223
- out="${formatDuration(meta.durationInMs)}">
224
  <property name="shotcut">1</property>
225
  <property name="shotcut:projectAudioChannels">2</property>
226
  <property name="shotcut:projectFolder">1</property>
 
8
 
9
  export async function generateMLT(): Promise<string> {
10
  const timeline: TimelineStore = useTimeline.getState()
11
+ const {
12
+ title,
13
+ width,
14
+ height,
15
+ durationInMs,
16
+ segments: timelineSegments,
17
+ } = timeline
18
 
19
  const segments: ExportableSegment[] = timelineSegments
20
  .map((segment, i) => formatSegmentForExport(segment, i))
 
25
  )
26
 
27
  const storyboards: ExportableSegment[] = segments.filter(
28
+ ({ segment }) => segment.category === ClapSegmentCategory.IMAGE
29
  )
30
 
31
  const dialogues: ExportableSegment[] = segments.filter(
 
42
 
43
  // want to see some colors? install es6-string-html in your VSCode
44
  return /* XML */ `<?xml version="1.0" standalone="no"?>
45
+ <mlt LC_NUMERIC="C" version="7.24.0" title="${title}" producer="main_bin">
46
  <profile
47
+ description="${width}:${height}"
48
+ width="${width}"
49
+ height="${height}"
50
  progressive="0"
51
  sample_aspect_num="1"
52
  sample_aspect_den="1"
 
64
  <playlist id="main_bin">
65
  <property name="xml_retain">1</property>
66
  </playlist>
67
+ <producer id="black" in="00:00:00.000" out="${formatDuration(durationInMs)}">
68
+ <property name="length">${formatDuration(durationInMs)}</property>
69
  <property name="eof">pause</property>
70
  <property name="resource">0</property>
71
  <property name="aspect_ratio">1</property>
 
74
  <property name="set.test_audio">0</property>
75
  </producer>
76
  <playlist id="background">
77
+ <entry producer="black" in="00:00:00.000" out="${formatDuration(durationInMs)}" />
78
  </playlist>
79
  ${segments
80
  .map(
 
82
  <producer
83
  id="${shortId}"
84
  in="${formatDuration(0)}"
85
+ out="${formatDuration(durationInMs)}">
86
+ <property name="length">${formatDuration(durationInMs)}</property>
87
  <property name="eof">pause</property>
88
  <property name="resource">${filePath}</property>
89
  <property name="ttl">1</property>
 
91
  <property name="meta.media.progressive">1</property>
92
  <property name="seekable">1</property>
93
  <property name="format">1</property>
94
+ <property name="meta.media.width">${width}</property>
95
+ <property name="meta.media.height">${height}</property>
96
  <property name="mlt_service">qimage</property>
97
  <property name="creation_time">${
98
  segment.createdAt || new Date().toISOString()
 
146
  </playlist>
147
  ${[...dialogues, ...sounds, ...music].map(
148
  ({ segment, filePath, fileName, shortId }) => /* XML */ `
149
+ <chain id="${shortId}" out="${formatDuration(durationInMs)}">
150
+ <property name="length">${formatDuration(durationInMs)}</property>
151
  <property name="eof">pause</property>
152
  <property name="resource">${filePath}</property>
153
  <property name="mlt_service">avformat-novalidate</property>
 
226
  id="tractor0"
227
  title="Shotcut version 24.04.28"
228
  in="00:00:00.000"
229
+ out="${formatDuration(durationInMs)}">
230
  <property name="shotcut">1</property>
231
  <property name="shotcut:projectAudioChannels">2</property>
232
  <property name="shotcut:projectFolder">1</property>
packages/app/src/services/io/formats/otio.ts CHANGED
@@ -49,11 +49,11 @@ interface OTIOTimeline {
49
 
50
  export async function generateOTIO(): Promise<string> {
51
  const timeline: TimelineStore = useTimeline.getState()
52
- const { meta, segments: timelineSegments } = timeline
53
 
54
  const otioData: OTIOTimeline = {
55
  OTIO_SCHEMA: 'Timeline.1',
56
- name: meta.title,
57
  global_start_time: null,
58
  tracks: {
59
  OTIO_SCHEMA: 'Stack.1',
 
49
 
50
  export async function generateOTIO(): Promise<string> {
51
  const timeline: TimelineStore = useTimeline.getState()
52
+ const { title, segments: timelineSegments } = timeline
53
 
54
  const otioData: OTIOTimeline = {
55
  OTIO_SCHEMA: 'Timeline.1',
56
+ name: title,
57
  global_start_time: null,
58
  tracks: {
59
  OTIO_SCHEMA: 'Stack.1',
packages/app/src/services/io/parseFileIntoSegments.ts CHANGED
@@ -121,7 +121,7 @@ export async function parseFileIntoSegments({
121
  status: ClapSegmentStatus.COMPLETED,
122
  // track: findFreeTrack({ segments, startTimeInMs, endTimeInMs }), // track row index
123
  label: `${file.name}`, // a short label to name the segment (optional, can be human or LLM-defined)
124
- category: ClapSegmentCategory.STORYBOARD,
125
 
126
  assetUrl,
127
  assetDurationInMs: durationInMs,
@@ -132,20 +132,20 @@ export async function parseFileIntoSegments({
132
  track: track ? track + 1 : undefined,
133
  }
134
 
135
- const storyboard = await clapSegmentToTimelineSegment(
136
  newSegment(partialStoryboard)
137
  )
138
 
139
  if (isValidNumber(track)) {
140
- storyboard.track = track
141
  }
142
 
143
- storyboard.outputType = ClapOutputType.IMAGE
144
 
145
  // we assume we want it to be immediately visible
146
- storyboard.visibility = SegmentVisibility.VISIBLE
147
 
148
- newSegments.push(storyboard)
149
 
150
  break
151
  }
@@ -171,6 +171,8 @@ export async function parseFileIntoSegments({
171
  durationInSteps,
172
  })
173
 
 
 
174
  // TODO: use the correct drop time
175
  const startTimeInMs = isValidNumber(maybeStartTimeInMs)
176
  ? maybeStartTimeInMs!
 
121
  status: ClapSegmentStatus.COMPLETED,
122
  // track: findFreeTrack({ segments, startTimeInMs, endTimeInMs }), // track row index
123
  label: `${file.name}`, // a short label to name the segment (optional, can be human or LLM-defined)
124
+ category: ClapSegmentCategory.IMAGE,
125
 
126
  assetUrl,
127
  assetDurationInMs: durationInMs,
 
132
  track: track ? track + 1 : undefined,
133
  }
134
 
135
+ const storyboardImage = await clapSegmentToTimelineSegment(
136
  newSegment(partialStoryboard)
137
  )
138
 
139
  if (isValidNumber(track)) {
140
+ storyboardImage.track = track
141
  }
142
 
143
+ storyboardImage.outputType = ClapOutputType.IMAGE
144
 
145
  // we assume we want it to be immediately visible
146
+ storyboardImage.visibility = SegmentVisibility.VISIBLE
147
 
148
+ newSegments.push(storyboardImage)
149
 
150
  break
151
  }
 
171
  durationInSteps,
172
  })
173
 
174
+ useTimeline.setState({ bpm })
175
+
176
  // TODO: use the correct drop time
177
  const startTimeInMs = isValidNumber(maybeStartTimeInMs)
178
  ? maybeStartTimeInMs!
packages/app/src/services/io/useIO.ts CHANGED
@@ -3,7 +3,8 @@
3
  import {
4
  ClapAssetSource,
5
  ClapEntity,
6
- ClapMediaOrientation,
 
7
  ClapOutputType,
8
  ClapProject,
9
  ClapSegment,
@@ -189,22 +190,23 @@ export const useIO = create<IOStore>((set, get) => ({
189
  title: projectName,
190
  description: `${projectName} (${fileName})`,
191
  synopsis: '',
192
- licence:
193
- "This OpenClap file is just a conversion from the original screenplay and doesn't claim any copyright or intellectual property. All rights reserved to the original intellectual property and copyright holders. Using OpenClap isn't piracy.",
194
 
195
- orientation: ClapMediaOrientation.LANDSCAPE,
196
  durationInMs: frames.length * durationInMs,
197
 
198
  // TODO: those should come from the Clapper user settings
199
 
200
  width: 1024,
201
  height: 576,
202
-
203
- defaultVideoModel: '', // <-- we should deprecate this no?
204
- extraPositivePrompt: [],
205
- screenplay: '',
206
  isLoop: false,
207
  isInteractive: false,
 
 
 
208
  },
209
  })
210
  )
@@ -528,10 +530,11 @@ export const useIO = create<IOStore>((set, get) => ({
528
  const timeline: TimelineStore = useTimeline.getState()
529
 
530
  const {
531
- meta,
532
- getClap,
533
- totalDurationInMs,
534
  segments: timelineSegments,
 
535
  } = timeline
536
 
537
  const clap = await getClap()
@@ -584,7 +587,7 @@ export const useIO = create<IOStore>((set, get) => ({
584
  })
585
  } else if (
586
  filePath.startsWith('image/') ||
587
- segment.category === ClapSegmentCategory.STORYBOARD
588
  ) {
589
  images.push({
590
  data: base64DataUriToUint8Array(segment.assetUrl),
@@ -619,9 +622,9 @@ export const useIO = create<IOStore>((set, get) => ({
619
  const fullVideo = await createFullVideo(
620
  videoInputs,
621
  audios,
622
- meta.width,
623
- meta.height,
624
- totalDurationInMs,
625
  (progress, message) => {
626
  task.setProgress({
627
  message: `Rendering video (${Math.round(progress)}%)`,
@@ -650,7 +653,7 @@ export const useIO = create<IOStore>((set, get) => ({
650
  try {
651
  const timeline: TimelineStore = useTimeline.getState()
652
 
653
- const { meta, segments: timelineSegments } = timeline
654
 
655
  const segments: ExportableSegment[] = timelineSegments
656
  .map((segment, i) => formatSegmentForExport(segment, i))
@@ -658,7 +661,9 @@ export const useIO = create<IOStore>((set, get) => ({
658
 
659
  let files: fflate.AsyncZippable = {}
660
 
661
- files['screenplay.txt'] = fflate.strToU8(meta.screenplay)
 
 
662
 
663
  files['meta.json'] = fflate.strToU8(JSON.stringify(meta, null, 2))
664
 
@@ -762,7 +767,7 @@ export const useIO = create<IOStore>((set, get) => ({
762
  audios,
763
  meta.width,
764
  meta.height,
765
- timeline.totalDurationInMs,
766
  (progress, message) => {
767
  task.setProgress({
768
  message,
 
3
  import {
4
  ClapAssetSource,
5
  ClapEntity,
6
+ ClapImageRatio,
7
+ ClapMeta,
8
  ClapOutputType,
9
  ClapProject,
10
  ClapSegment,
 
190
  title: projectName,
191
  description: `${projectName} (${fileName})`,
192
  synopsis: '',
193
+ licence: '',
 
194
 
195
+ imageRatio: ClapImageRatio.LANDSCAPE,
196
  durationInMs: frames.length * durationInMs,
197
 
198
  // TODO: those should come from the Clapper user settings
199
 
200
  width: 1024,
201
  height: 576,
202
+ imagePrompt: '',
203
+ storyPrompt: '',
204
+ systemPrompt: '',
 
205
  isLoop: false,
206
  isInteractive: false,
207
+
208
+ bpm: 120,
209
+ frameRate: 24,
210
  },
211
  })
212
  )
 
530
  const timeline: TimelineStore = useTimeline.getState()
531
 
532
  const {
533
+ width,
534
+ height,
535
+ durationInMs,
536
  segments: timelineSegments,
537
+ getClap,
538
  } = timeline
539
 
540
  const clap = await getClap()
 
587
  })
588
  } else if (
589
  filePath.startsWith('image/') ||
590
+ segment.category === ClapSegmentCategory.IMAGE
591
  ) {
592
  images.push({
593
  data: base64DataUriToUint8Array(segment.assetUrl),
 
622
  const fullVideo = await createFullVideo(
623
  videoInputs,
624
  audios,
625
+ width,
626
+ height,
627
+ durationInMs,
628
  (progress, message) => {
629
  task.setProgress({
630
  message: `Rendering video (${Math.round(progress)}%)`,
 
653
  try {
654
  const timeline: TimelineStore = useTimeline.getState()
655
 
656
+ const { segments: timelineSegments } = timeline
657
 
658
  const segments: ExportableSegment[] = timelineSegments
659
  .map((segment, i) => formatSegmentForExport(segment, i))
 
661
 
662
  let files: fflate.AsyncZippable = {}
663
 
664
+ const meta = timeline.getClapMeta()
665
+
666
+ files['story_prompt.txt'] = fflate.strToU8(meta.storyPrompt)
667
 
668
  files['meta.json'] = fflate.strToU8(JSON.stringify(meta, null, 2))
669
 
 
767
  audios,
768
  meta.width,
769
  meta.height,
770
+ timeline.durationInMs,
771
  (progress, message) => {
772
  task.setProgress({
773
  message,