jbilcke-hf HF staff commited on
Commit
e4e0e54
β€’
1 Parent(s): 3e9f9ed

add "movie director assistant" LLM step

Browse files
This view is limited to 50 files because it contains too many changes. Β  See raw diff
Files changed (50) hide show
  1. TODO.md +6 -0
  2. package-lock.json +93 -1
  3. package.json +5 -1
  4. src/config.mts +3 -3
  5. src/data/all_words.json +1 -1
  6. src/data/good_words.json +1 -1
  7. src/index.mts +223 -74
  8. src/initFolders.mts +11 -4
  9. src/llm/enrichVideoSpecsUsingLLM.mts +79 -0
  10. src/llm/openai/createChatCompletion.mts +105 -0
  11. src/llm/openai/createChatCompletionStream.mts +66 -0
  12. src/llm/openai/generateYAML.mts +42 -0
  13. src/llm/openai/getTextPrompt.mts +4 -0
  14. src/llm/openai/getUserContent.mts +7 -0
  15. src/llm/openai/openai.mts +7 -0
  16. src/llm/openai/runModerationCheck.mts +30 -0
  17. src/llm/openai/stream.mts +35 -0
  18. src/llm/types.mts +25 -0
  19. src/main.mts +12 -8
  20. src/preproduction/mocks.mts +36 -0
  21. src/preproduction/prompts.mts +119 -0
  22. src/production/generateShot.mts +2 -2
  23. src/scheduler/deleteTask.mts +0 -33
  24. src/scheduler/deleteVideo.mts +28 -0
  25. src/scheduler/getAllTasksForOwner.mts +0 -9
  26. src/scheduler/getAllVideosForOwner.mts +9 -0
  27. src/scheduler/getCompletedTasks.mts +0 -9
  28. src/scheduler/getCompletedVideos.mts +9 -0
  29. src/scheduler/getPendingTasks.mts +0 -9
  30. src/scheduler/getPendingVideos.mts +9 -0
  31. src/scheduler/getTask.mts +0 -23
  32. src/scheduler/getVideo.mts +23 -0
  33. src/scheduler/getVideoStatus.mts +13 -0
  34. src/scheduler/markVideoAsPending.mts +25 -0
  35. src/scheduler/markVideoAsToAbort.mts +22 -0
  36. src/scheduler/markVideoAsToDelete.mts +23 -0
  37. src/scheduler/markVideoAsToPause.mts +24 -0
  38. src/scheduler/{processTask.mts β†’ processVideo.mts} +97 -90
  39. src/scheduler/readTask.mts +0 -11
  40. src/scheduler/readTasks.mts +0 -45
  41. src/scheduler/readVideoMetadataFile.mts +11 -0
  42. src/scheduler/readVideoMetadataFiles.mts +44 -0
  43. src/scheduler/saveAndCheckIfNeedToStop.mts +68 -0
  44. src/scheduler/saveCompletedTask.mts +0 -12
  45. src/scheduler/saveCompletedVideo.mts +12 -0
  46. src/scheduler/savePendingTask.mts +0 -11
  47. src/scheduler/savePendingVideo.mts +11 -0
  48. src/scheduler/{updatePendingTask.mts β†’ updatePendingVideo.mts} +7 -7
  49. src/tests/checkStatus.mts +2 -2
  50. src/tests/submitVideo.mts +2 -2
TODO.md ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+
2
+
3
+ to allow multiple videos to be processed a the same time:
4
+
5
+ [ ] yield from the loop at each step
6
+ [ ] random processing of videos
package-lock.json CHANGED
@@ -14,15 +14,19 @@
14
  "@types/express": "^4.17.17",
15
  "@types/ffmpeg-concat": "^1.1.2",
16
  "@types/uuid": "^9.0.2",
 
17
  "express": "^4.18.2",
18
  "ffmpeg-concat": "^1.3.0",
19
  "fluent-ffmpeg": "^2.1.2",
20
  "fs-extra": "^11.1.1",
 
21
  "node-fetch": "^3.3.1",
 
22
  "puppeteer": "^20.8.0",
23
  "temp-dir": "^3.0.0",
24
  "ts-node": "^10.9.1",
25
- "uuid": "^9.0.0"
 
26
  }
27
  },
28
  "node_modules/@babel/code-frame": {
@@ -686,6 +690,14 @@
686
  "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.12.0.tgz",
687
  "integrity": "sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg=="
688
  },
 
 
 
 
 
 
 
 
689
  "node_modules/b4a": {
690
  "version": "1.6.4",
691
  "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.6.4.tgz",
@@ -1222,6 +1234,11 @@
1222
  "ms": "2.0.0"
1223
  }
1224
  },
 
 
 
 
 
1225
  "node_modules/decompress-response": {
1226
  "version": "6.0.0",
1227
  "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz",
@@ -1479,6 +1496,14 @@
1479
  "node": ">= 0.6"
1480
  }
1481
  },
 
 
 
 
 
 
 
 
1482
  "node_modules/execa": {
1483
  "version": "0.10.0",
1484
  "resolved": "https://registry.npmjs.org/execa/-/execa-0.10.0.tgz",
@@ -1772,6 +1797,25 @@
1772
  "node": ">=0.8.0"
1773
  }
1774
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1775
  "node_modules/foreground-child": {
1776
  "version": "3.1.1",
1777
  "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz",
@@ -2218,6 +2262,16 @@
2218
  "through2": "^0.6.3"
2219
  }
2220
  },
 
 
 
 
 
 
 
 
 
 
2221
  "node_modules/graceful-fs": {
2222
  "version": "4.2.11",
2223
  "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
@@ -2609,6 +2663,14 @@
2609
  "resolved": "https://registry.npmjs.org/jpeg-js/-/jpeg-js-0.4.4.tgz",
2610
  "integrity": "sha512-WZzeDOEtTOBK4Mdsar0IqEU5sMr3vSV2RqkAIzUEV2BHnUfKGyswWFPFwK5EeDo93K3FohSHbLAjj0s1Wzd+dg=="
2611
  },
 
 
 
 
 
 
 
 
2612
  "node_modules/js-tokens": {
2613
  "version": "4.0.0",
2614
  "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
@@ -3349,6 +3411,28 @@
3349
  "wrappy": "1"
3350
  }
3351
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3352
  "node_modules/optionator": {
3353
  "version": "0.8.3",
3354
  "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz",
@@ -5059,6 +5143,14 @@
5059
  "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
5060
  "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
5061
  },
 
 
 
 
 
 
 
 
5062
  "node_modules/yargs": {
5063
  "version": "17.7.1",
5064
  "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.1.tgz",
 
14
  "@types/express": "^4.17.17",
15
  "@types/ffmpeg-concat": "^1.1.2",
16
  "@types/uuid": "^9.0.2",
17
+ "eventsource-parser": "^1.0.0",
18
  "express": "^4.18.2",
19
  "ffmpeg-concat": "^1.3.0",
20
  "fluent-ffmpeg": "^2.1.2",
21
  "fs-extra": "^11.1.1",
22
+ "gpt-tokens": "^1.1.1",
23
  "node-fetch": "^3.3.1",
24
+ "openai": "^3.3.0",
25
  "puppeteer": "^20.8.0",
26
  "temp-dir": "^3.0.0",
27
  "ts-node": "^10.9.1",
28
+ "uuid": "^9.0.0",
29
+ "yaml": "^2.3.1"
30
  }
31
  },
32
  "node_modules/@babel/code-frame": {
 
690
  "resolved": "https://registry.npmjs.org/aws4/-/aws4-1.12.0.tgz",
691
  "integrity": "sha512-NmWvPnx0F1SfrQbYwOi7OeaNGokp9XhzNioJ/CSBs8Qa4vxug81mhJEAVZwxXuBmYB5KDRfMq/F3RR0BIU7sWg=="
692
  },
693
+ "node_modules/axios": {
694
+ "version": "0.26.1",
695
+ "resolved": "https://registry.npmjs.org/axios/-/axios-0.26.1.tgz",
696
+ "integrity": "sha512-fPwcX4EvnSHuInCMItEhAGnaSEXRBjtzh9fOtsE6E1G6p7vl7edEeZe11QHf18+6+9gR5PbKV/sGKNaD8YaMeA==",
697
+ "dependencies": {
698
+ "follow-redirects": "^1.14.8"
699
+ }
700
+ },
701
  "node_modules/b4a": {
702
  "version": "1.6.4",
703
  "resolved": "https://registry.npmjs.org/b4a/-/b4a-1.6.4.tgz",
 
1234
  "ms": "2.0.0"
1235
  }
1236
  },
1237
+ "node_modules/decimal.js": {
1238
+ "version": "10.4.3",
1239
+ "resolved": "https://registry.npmjs.org/decimal.js/-/decimal.js-10.4.3.tgz",
1240
+ "integrity": "sha512-VBBaLc1MgL5XpzgIP7ny5Z6Nx3UrRkIViUkPUdtl9aya5amy3De1gsUUSB1g3+3sExYNjCAsAznmukyxCb1GRA=="
1241
+ },
1242
  "node_modules/decompress-response": {
1243
  "version": "6.0.0",
1244
  "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz",
 
1496
  "node": ">= 0.6"
1497
  }
1498
  },
1499
+ "node_modules/eventsource-parser": {
1500
+ "version": "1.0.0",
1501
+ "resolved": "https://registry.npmjs.org/eventsource-parser/-/eventsource-parser-1.0.0.tgz",
1502
+ "integrity": "sha512-9jgfSCa3dmEme2ES3mPByGXfgZ87VbP97tng1G2nWwWx6bV2nYxm2AWCrbQjXToSe+yYlqaZNtxffR9IeQr95g==",
1503
+ "engines": {
1504
+ "node": ">=14.18"
1505
+ }
1506
+ },
1507
  "node_modules/execa": {
1508
  "version": "0.10.0",
1509
  "resolved": "https://registry.npmjs.org/execa/-/execa-0.10.0.tgz",
 
1797
  "node": ">=0.8.0"
1798
  }
1799
  },
1800
+ "node_modules/follow-redirects": {
1801
+ "version": "1.15.2",
1802
+ "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.2.tgz",
1803
+ "integrity": "sha512-VQLG33o04KaQ8uYi2tVNbdrWp1QWxNNea+nmIB4EVM28v0hmP17z7aG1+wAkNzVq4KeXTq3221ye5qTJP91JwA==",
1804
+ "funding": [
1805
+ {
1806
+ "type": "individual",
1807
+ "url": "https://github.com/sponsors/RubenVerborgh"
1808
+ }
1809
+ ],
1810
+ "engines": {
1811
+ "node": ">=4.0"
1812
+ },
1813
+ "peerDependenciesMeta": {
1814
+ "debug": {
1815
+ "optional": true
1816
+ }
1817
+ }
1818
+ },
1819
  "node_modules/foreground-child": {
1820
  "version": "3.1.1",
1821
  "resolved": "https://registry.npmjs.org/foreground-child/-/foreground-child-3.1.1.tgz",
 
2262
  "through2": "^0.6.3"
2263
  }
2264
  },
2265
+ "node_modules/gpt-tokens": {
2266
+ "version": "1.1.1",
2267
+ "resolved": "https://registry.npmjs.org/gpt-tokens/-/gpt-tokens-1.1.1.tgz",
2268
+ "integrity": "sha512-fB1u0ZH7PywF9FByfWCqn6Hpp3so/pFUmk3AiV4QlOskr57LK8Ds3YJOjdemWKRGJQ+2pT9ikt++Eb+/et9gTQ==",
2269
+ "dependencies": {
2270
+ "decimal.js": "^10.4.3",
2271
+ "js-tiktoken": "^1.0.7",
2272
+ "openai": "^3.3.0"
2273
+ }
2274
+ },
2275
  "node_modules/graceful-fs": {
2276
  "version": "4.2.11",
2277
  "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
 
2663
  "resolved": "https://registry.npmjs.org/jpeg-js/-/jpeg-js-0.4.4.tgz",
2664
  "integrity": "sha512-WZzeDOEtTOBK4Mdsar0IqEU5sMr3vSV2RqkAIzUEV2BHnUfKGyswWFPFwK5EeDo93K3FohSHbLAjj0s1Wzd+dg=="
2665
  },
2666
+ "node_modules/js-tiktoken": {
2667
+ "version": "1.0.7",
2668
+ "resolved": "https://registry.npmjs.org/js-tiktoken/-/js-tiktoken-1.0.7.tgz",
2669
+ "integrity": "sha512-biba8u/clw7iesNEWLOLwrNGoBP2lA+hTaBLs/D45pJdUPFXyxD6nhcDVtADChghv4GgyAiMKYMiRx7x6h7Biw==",
2670
+ "dependencies": {
2671
+ "base64-js": "^1.5.1"
2672
+ }
2673
+ },
2674
  "node_modules/js-tokens": {
2675
  "version": "4.0.0",
2676
  "resolved": "https://registry.npmjs.org/js-tokens/-/js-tokens-4.0.0.tgz",
 
3411
  "wrappy": "1"
3412
  }
3413
  },
3414
+ "node_modules/openai": {
3415
+ "version": "3.3.0",
3416
+ "resolved": "https://registry.npmjs.org/openai/-/openai-3.3.0.tgz",
3417
+ "integrity": "sha512-uqxI/Au+aPRnsaQRe8CojU0eCR7I0mBiKjD3sNMzY6DaC1ZVrc85u98mtJW6voDug8fgGN+DIZmTDxTthxb7dQ==",
3418
+ "dependencies": {
3419
+ "axios": "^0.26.0",
3420
+ "form-data": "^4.0.0"
3421
+ }
3422
+ },
3423
+ "node_modules/openai/node_modules/form-data": {
3424
+ "version": "4.0.0",
3425
+ "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz",
3426
+ "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==",
3427
+ "dependencies": {
3428
+ "asynckit": "^0.4.0",
3429
+ "combined-stream": "^1.0.8",
3430
+ "mime-types": "^2.1.12"
3431
+ },
3432
+ "engines": {
3433
+ "node": ">= 6"
3434
+ }
3435
+ },
3436
  "node_modules/optionator": {
3437
  "version": "0.8.3",
3438
  "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.8.3.tgz",
 
5143
  "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz",
5144
  "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A=="
5145
  },
5146
+ "node_modules/yaml": {
5147
+ "version": "2.3.1",
5148
+ "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.3.1.tgz",
5149
+ "integrity": "sha512-2eHWfjaoXgTBC2jNM1LRef62VQa0umtvRiDSk6HSzW7RvS5YtkabJrwYLLEKWBc8a5U2PTSCs+dJjUTJdlHsWQ==",
5150
+ "engines": {
5151
+ "node": ">= 14"
5152
+ }
5153
+ },
5154
  "node_modules/yargs": {
5155
  "version": "17.7.1",
5156
  "resolved": "https://registry.npmjs.org/yargs/-/yargs-17.7.1.tgz",
package.json CHANGED
@@ -21,14 +21,18 @@
21
  "@types/express": "^4.17.17",
22
  "@types/ffmpeg-concat": "^1.1.2",
23
  "@types/uuid": "^9.0.2",
 
24
  "express": "^4.18.2",
25
  "ffmpeg-concat": "^1.3.0",
26
  "fluent-ffmpeg": "^2.1.2",
27
  "fs-extra": "^11.1.1",
 
28
  "node-fetch": "^3.3.1",
 
29
  "puppeteer": "^20.8.0",
30
  "temp-dir": "^3.0.0",
31
  "ts-node": "^10.9.1",
32
- "uuid": "^9.0.0"
 
33
  }
34
  }
 
21
  "@types/express": "^4.17.17",
22
  "@types/ffmpeg-concat": "^1.1.2",
23
  "@types/uuid": "^9.0.2",
24
+ "eventsource-parser": "^1.0.0",
25
  "express": "^4.18.2",
26
  "ffmpeg-concat": "^1.3.0",
27
  "fluent-ffmpeg": "^2.1.2",
28
  "fs-extra": "^11.1.1",
29
+ "gpt-tokens": "^1.1.1",
30
  "node-fetch": "^3.3.1",
31
+ "openai": "^3.3.0",
32
  "puppeteer": "^20.8.0",
33
  "temp-dir": "^3.0.0",
34
  "ts-node": "^10.9.1",
35
+ "uuid": "^9.0.0",
36
+ "yaml": "^2.3.1"
37
  }
38
  }
src/config.mts CHANGED
@@ -2,9 +2,9 @@ import path from "node:path"
2
 
3
  export const storagePath = `${process.env.VC_STORAGE_PATH || './sandbox'}`
4
 
5
- export const tasksDirPath = path.join(storagePath, "tasks")
6
- export const pendingTasksDirFilePath = path.join(tasksDirPath, "pending")
7
- export const completedTasksDirFilePath = path.join(tasksDirPath, "completed")
8
 
9
  export const filesDirPath = path.join(storagePath, "files")
10
  export const pendingFilesDirFilePath = path.join(filesDirPath, "pending")
 
2
 
3
  export const storagePath = `${process.env.VC_STORAGE_PATH || './sandbox'}`
4
 
5
+ export const metadataDirPath = path.join(storagePath, "metadata")
6
+ export const pendingMetadataDirFilePath = path.join(metadataDirPath, "pending")
7
+ export const completedMetadataDirFilePath = path.join(metadataDirPath, "completed")
8
 
9
  export const filesDirPath = path.join(storagePath, "files")
10
  export const pendingFilesDirFilePath = path.join(filesDirPath, "pending")
src/data/all_words.json CHANGED
@@ -4269,7 +4269,7 @@
4269
  "tank",
4270
  "tap",
4271
  "target",
4272
- "task",
4273
  "taste",
4274
  "tax",
4275
  "tea",
 
4269
  "tank",
4270
  "tap",
4271
  "target",
4272
+ "video",
4273
  "taste",
4274
  "tax",
4275
  "tea",
src/data/good_words.json CHANGED
@@ -4259,7 +4259,7 @@
4259
  "tank",
4260
  "tap",
4261
  "target",
4262
- "task",
4263
  "taste",
4264
  "tax",
4265
  "tea",
 
4259
  "tank",
4260
  "tap",
4261
  "target",
4262
+ "video",
4263
  "taste",
4264
  "tax",
4265
  "tea",
src/index.mts CHANGED
@@ -4,17 +4,21 @@ import path from "node:path"
4
  import { validate as uuidValidate } from "uuid"
5
  import express from "express"
6
 
7
- import { VideoTask, VideoTaskRequest } from "./types.mts"
8
  import { parseVideoRequest } from "./utils/parseVideoRequest.mts"
9
- import { savePendingTask } from "./scheduler/savePendingTask.mts"
10
- import { getTask } from "./scheduler/getTask.mts"
11
  import { main } from "./main.mts"
12
  import { completedFilesDirFilePath } from "./config.mts"
13
- import { deleteTask } from "./scheduler/deleteTask.mts"
14
- import { getPendingTasks } from "./scheduler/getPendingTasks.mts"
 
 
 
15
  import { hasValidAuthorization } from "./utils/hasValidAuthorization.mts"
16
- import { getAllTasksForOwner } from "./scheduler/getAllTasksForOwner.mts"
17
  import { initFolders } from "./initFolders.mts"
 
18
 
19
  initFolders()
20
  // to disable all processing (eg. to debug)
@@ -26,8 +30,8 @@ const port = 7860
26
 
27
  app.use(express.json())
28
 
29
- app.post("/", async (req, res) => {
30
- const request = req.body as VideoTaskRequest
31
 
32
  if (!hasValidAuthorization(req.headers)) {
33
  console.log("Invalid authorization")
@@ -37,68 +41,45 @@ app.post("/", async (req, res) => {
37
  return
38
  }
39
 
40
- let task: VideoTask = null
41
 
42
- console.log(`creating task from request..`)
43
- console.log(`request: `, JSON.stringify(request))
44
- try {
45
- task = await parseVideoRequest(request)
46
- } catch (err) {
47
- console.error(`failed to create task: ${task} (${err})`)
48
  res.status(400)
49
- res.write(JSON.stringify({ error: "query seems to be malformed" }))
50
  res.end()
51
  return
52
  }
53
 
54
- console.log(`saving task ${task.id}`)
 
 
 
55
  try {
56
- await savePendingTask(task)
57
- res.status(200)
58
- res.write(JSON.stringify(task))
59
- res.end()
60
  } catch (err) {
61
- console.error(err)
62
- res.status(500)
63
- res.write(JSON.stringify({ error: "couldn't save the task" }))
64
- res.end()
65
- }
66
- })
67
-
68
- // only get the tasks for a specific owner
69
- app.get("/owner/:ownerId", async (req, res) => {
70
- if (!hasValidAuthorization(req.headers)) {
71
- console.log("Invalid authorization")
72
- res.status(401)
73
- res.write(JSON.stringify({ error: "invalid token" }))
74
- res.end()
75
- return
76
- }
77
-
78
- const ownerId = req.params.ownerId
79
-
80
- if (!uuidValidate(ownerId)) {
81
- console.error("invalid owner id")
82
  res.status(400)
83
- res.write(JSON.stringify({ error: `invalid owner id` }))
84
  res.end()
85
  return
86
  }
87
 
 
88
  try {
89
- const tasks = await getAllTasksForOwner(ownerId)
90
  res.status(200)
91
- res.write(JSON.stringify(tasks, null, 2))
92
  res.end()
93
  } catch (err) {
94
  console.error(err)
95
  res.status(500)
96
- res.write(JSON.stringify({ error: `couldn't get the tasks for owner ${ownerId}` }))
97
  res.end()
98
  }
99
  })
100
 
101
- app.get("/download/:id\.mp4", async (req, res) => {
102
 
103
  /*
104
  for simplicity, let's skip auth when fetching videos
@@ -113,7 +94,7 @@ app.get("/download/:id\.mp4", async (req, res) => {
113
  }
114
  */
115
 
116
- const [ownerId, videoId] = `${req.params.id}`.split("_")
117
 
118
  if (!uuidValidate(ownerId)) {
119
  console.error("invalid owner id")
@@ -123,6 +104,7 @@ app.get("/download/:id\.mp4", async (req, res) => {
123
  return
124
  }
125
 
 
126
 
127
  if (!uuidValidate(videoId)) {
128
  console.error("invalid video id")
@@ -132,9 +114,9 @@ app.get("/download/:id\.mp4", async (req, res) => {
132
  return
133
  }
134
 
135
- let task: VideoTask = null
136
  try {
137
- task = await getTask(ownerId, videoId)
138
  console.log(`returning video ${videoId} to owner ${ownerId}`)
139
  } catch (err) {
140
  res.status(404)
@@ -143,7 +125,7 @@ app.get("/download/:id\.mp4", async (req, res) => {
143
  return
144
  }
145
 
146
- const completedFilePath = path.join(completedFilesDirFilePath, task.fileName)
147
 
148
  // note: we DON'T want to use the pending file path, as there may be operations on it
149
  // (ie. a process might be busy writing stuff to it)
@@ -177,8 +159,9 @@ app.get("/download/:id\.mp4", async (req, res) => {
177
  }
178
  })
179
 
180
- // get all pending tasks
181
- app.get("/", async (req, res) => {
 
182
  if (!hasValidAuthorization(req.headers)) {
183
  console.log("Invalid authorization")
184
  res.status(401)
@@ -187,20 +170,106 @@ app.get("/", async (req, res) => {
187
  return
188
  }
189
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  try {
191
- const tasks = await getPendingTasks()
192
  res.status(200)
193
- res.write(JSON.stringify(tasks, null, 2))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
194
  res.end()
195
  } catch (err) {
196
  console.error(err)
197
  res.status(500)
198
- res.write(JSON.stringify({ error: "couldn't get the tasks" }))
199
  res.end()
200
  }
201
  })
202
 
203
- app.get("/:id", async (req, res) => {
 
 
204
 
205
  if (!hasValidAuthorization(req.headers)) {
206
  console.log("Invalid authorization")
@@ -210,38 +279,113 @@ app.get("/:id", async (req, res) => {
210
  return
211
  }
212
 
213
- const [ownerId, videoId] = `${req.params.id}`.split("_")
214
 
215
  if (!uuidValidate(ownerId)) {
216
- console.error("invalid owner id")
217
  res.status(400)
218
- res.write(JSON.stringify({ error: `invalid owner id` }))
219
  res.end()
220
  return
221
  }
222
 
 
223
 
224
  if (!uuidValidate(videoId)) {
225
- console.error("invalid video id")
226
  res.status(400)
227
- res.write(JSON.stringify({ error: `invalid video id` }))
228
  res.end()
229
  return
230
  }
231
 
 
232
  try {
233
- const task = await getTask(ownerId, videoId)
234
- res.status(200)
235
- res.write(JSON.stringify(task))
236
- res.end()
 
 
237
  } catch (err) {
238
- console.error(err)
239
- res.status(404)
240
- res.write(JSON.stringify({ error: "couldn't find this task" }))
241
  res.end()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
242
  }
243
  })
244
 
 
 
245
  app.delete("/:id", async (req, res) => {
246
 
247
  if (!hasValidAuthorization(req.headers)) {
@@ -270,27 +414,32 @@ app.delete("/:id", async (req, res) => {
270
  return
271
  }
272
 
273
- let task: VideoTask = null
 
 
 
274
  try {
275
- task = await getTask(ownerId, videoId)
276
  } catch (err) {
277
  console.error(err)
278
  res.status(404)
279
- res.write(JSON.stringify({ error: "couldn't find this task" }))
280
  res.end()
 
281
  }
282
 
283
  try {
284
- await deleteTask(task)
285
  res.status(200)
286
  res.write(JSON.stringify({ success: true }))
287
  res.end()
288
  } catch (err) {
289
  console.error(err)
290
  res.status(500)
291
- res.write(JSON.stringify({ success: false, error: "failed to delete the task" }))
292
  res.end()
293
  }
294
  })
 
295
 
296
  app.listen(port, () => { console.log(`Open http://localhost:${port}`) })
 
4
  import { validate as uuidValidate } from "uuid"
5
  import express from "express"
6
 
7
+ import { Video, VideoStatus, VideoAPIRequest } from "./types.mts"
8
  import { parseVideoRequest } from "./utils/parseVideoRequest.mts"
9
+ import { savePendingVideo } from "./scheduler/savePendingVideo.mts"
10
+ import { getVideo } from "./scheduler/getVideo.mts"
11
  import { main } from "./main.mts"
12
  import { completedFilesDirFilePath } from "./config.mts"
13
+ import { markVideoAsToDelete } from "./scheduler/markVideoAsToDelete.mts"
14
+ import { markVideoAsToAbort } from "./scheduler/markVideoAsToAbort.mts"
15
+ import { markVideoAsToPause } from "./scheduler/markVideoAsToPause.mts"
16
+ import { markVideoAsPending } from "./scheduler/markVideoAsPending.mts"
17
+ import { getPendingVideos } from "./scheduler/getPendingVideos.mts"
18
  import { hasValidAuthorization } from "./utils/hasValidAuthorization.mts"
19
+ import { getAllVideosForOwner } from "./scheduler/getAllVideosForOwner.mts"
20
  import { initFolders } from "./initFolders.mts"
21
+ import { sortVideosByYoungestFirst } from "./utils/sortVideosByYoungestFirst.mts"
22
 
23
  initFolders()
24
  // to disable all processing (eg. to debug)
 
30
 
31
  app.use(express.json())
32
 
33
+ app.post("/:ownerId", async (req, res) => {
34
+ const request = req.body as VideoAPIRequest
35
 
36
  if (!hasValidAuthorization(req.headers)) {
37
  console.log("Invalid authorization")
 
41
  return
42
  }
43
 
44
+ const ownerId = req.params.ownerId
45
 
46
+ if (!uuidValidate(ownerId)) {
47
+ console.error("invalid owner id")
 
 
 
 
48
  res.status(400)
49
+ res.write(JSON.stringify({ error: `invalid owner id` }))
50
  res.end()
51
  return
52
  }
53
 
54
+ let video: Video = null
55
+
56
+ console.log(`creating video from request..`)
57
+ console.log(`request: `, JSON.stringify(request))
58
  try {
59
+ video = await parseVideoRequest(ownerId, request)
 
 
 
60
  } catch (err) {
61
+ console.error(`failed to create video: ${video} (${err})`)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
62
  res.status(400)
63
+ res.write(JSON.stringify({ error: "query seems to be malformed" }))
64
  res.end()
65
  return
66
  }
67
 
68
+ console.log(`saving video ${video.id}`)
69
  try {
70
+ await savePendingVideo(video)
71
  res.status(200)
72
+ res.write(JSON.stringify(video))
73
  res.end()
74
  } catch (err) {
75
  console.error(err)
76
  res.status(500)
77
+ res.write(JSON.stringify({ error: "couldn't save the video" }))
78
  res.end()
79
  }
80
  })
81
 
82
+ app.get("/:ownerId/:videoId\.mp4", async (req, res) => {
83
 
84
  /*
85
  for simplicity, let's skip auth when fetching videos
 
94
  }
95
  */
96
 
97
+ const ownerId = req.params.ownerId
98
 
99
  if (!uuidValidate(ownerId)) {
100
  console.error("invalid owner id")
 
104
  return
105
  }
106
 
107
+ const videoId = req.params.videoId
108
 
109
  if (!uuidValidate(videoId)) {
110
  console.error("invalid video id")
 
114
  return
115
  }
116
 
117
+ let video: Video = null
118
  try {
119
+ video = await getVideo(ownerId, videoId)
120
  console.log(`returning video ${videoId} to owner ${ownerId}`)
121
  } catch (err) {
122
  res.status(404)
 
125
  return
126
  }
127
 
128
+ const completedFilePath = path.join(completedFilesDirFilePath, video.fileName)
129
 
130
  // note: we DON'T want to use the pending file path, as there may be operations on it
131
  // (ie. a process might be busy writing stuff to it)
 
159
  }
160
  })
161
 
162
+ // get metadata (json)
163
+ app.get("/:ownerId/:videoId", async (req, res) => {
164
+
165
  if (!hasValidAuthorization(req.headers)) {
166
  console.log("Invalid authorization")
167
  res.status(401)
 
170
  return
171
  }
172
 
173
+ const ownerId = req.params.ownerId
174
+
175
+ if (!uuidValidate(ownerId)) {
176
+ console.error("invalid owner id")
177
+ res.status(400)
178
+ res.write(JSON.stringify({ error: `invalid owner id` }))
179
+ res.end()
180
+ return
181
+ }
182
+
183
+ const videoId = req.params.videoId
184
+
185
+ if (!uuidValidate(videoId)) {
186
+ console.error("invalid video id")
187
+ res.status(400)
188
+ res.write(JSON.stringify({ error: `invalid video id` }))
189
+ res.end()
190
+ return
191
+ }
192
+
193
  try {
194
+ const video = await getVideo(ownerId, videoId)
195
  res.status(200)
196
+ res.write(JSON.stringify(video))
197
+ res.end()
198
+ } catch (err) {
199
+ console.error(err)
200
+ res.status(404)
201
+ res.write(JSON.stringify({ error: "couldn't find this video" }))
202
+ res.end()
203
+ }
204
+ })
205
+
206
+ // only get the videos for a specific owner
207
+ app.get("/:ownerId", async (req, res) => {
208
+ if (!hasValidAuthorization(req.headers)) {
209
+ console.log("Invalid authorization")
210
+ res.status(401)
211
+ res.write(JSON.stringify({ error: "invalid token" }))
212
+ res.end()
213
+ return
214
+ }
215
+
216
+ const ownerId = req.params.ownerId
217
+
218
+ if (!uuidValidate(ownerId)) {
219
+ console.error(`invalid owner d ${ownerId}`)
220
+ res.status(400)
221
+ res.write(JSON.stringify({ error: `invalid owner id ${ownerId}` }))
222
+ res.end()
223
+ return
224
+ }
225
+
226
+ try {
227
+ const videos = await getAllVideosForOwner(ownerId)
228
+ sortVideosByYoungestFirst(videos)
229
+ res.status(200)
230
+ res.write(JSON.stringify(videos, null, 2))
231
+ res.end()
232
+ } catch (err) {
233
+ console.error(err)
234
+ res.status(500)
235
+ res.write(JSON.stringify({ error: `couldn't get the videos for owner ${ownerId}` }))
236
+ res.end()
237
+ }
238
+ })
239
+
240
+ // get all pending videos - this is for admin usage only
241
+ app.get("/", async (req, res) => {
242
+ if (!hasValidAuthorization(req.headers)) {
243
+ // this is what users will see in the space - but no need to show something scary
244
+ console.log("Invalid authorization")
245
+ res.status(200)
246
+ res.write(`<html><head></head><body>
247
+ This space is the REST API used by VideoChain UI:<br/>
248
+ <a href="https://jbilcke-hf-videochain-ui.hf.space" target="_blank">https://jbilcke-hf-videochain-ui.hf.space</a>
249
+ </body></html>`)
250
+ res.end()
251
+ // res.status(401)
252
+ // res.write(JSON.stringify({ error: "invalid token" }))
253
+ // res.end()
254
+ return
255
+ }
256
+
257
+ try {
258
+ const videos = await getPendingVideos()
259
+ res.status(200)
260
+ res.write(JSON.stringify(videos, null, 2))
261
  res.end()
262
  } catch (err) {
263
  console.error(err)
264
  res.status(500)
265
+ res.write(JSON.stringify({ error: "couldn't get the videos" }))
266
  res.end()
267
  }
268
  })
269
 
270
+
271
+ // edit a video
272
+ app.patch("/:ownerId/:videoId", async (req, res) => {
273
 
274
  if (!hasValidAuthorization(req.headers)) {
275
  console.log("Invalid authorization")
 
279
  return
280
  }
281
 
282
+ const ownerId = req.params.ownerId
283
 
284
  if (!uuidValidate(ownerId)) {
285
+ console.error(`invalid owner id ${ownerId}`)
286
  res.status(400)
287
+ res.write(JSON.stringify({ error: `invalid owner id ${ownerId}` }))
288
  res.end()
289
  return
290
  }
291
 
292
+ const videoId = req.params.videoId
293
 
294
  if (!uuidValidate(videoId)) {
295
+ console.error(`invalid video id ${videoId}`)
296
  res.status(400)
297
+ res.write(JSON.stringify({ error: `invalid video id ${videoId}` }))
298
  res.end()
299
  return
300
  }
301
 
302
+ let status: VideoStatus = "unknown"
303
  try {
304
+ const request = req.body as { status: VideoStatus }
305
+ if (['pending', 'abort', 'delete', 'pause'].includes(request.status)) {
306
+ status = request.status
307
+ } else {
308
+ throw new Error(`invalid video status "${request.status}"`)
309
+ }
310
  } catch (err) {
311
+ console.error(`invalid parameter (${err})`)
312
+ res.status(401)
313
+ res.write(JSON.stringify({ error: `invalid parameter (${err})` }))
314
  res.end()
315
+ return
316
+ }
317
+
318
+ switch (status) {
319
+ case 'delete':
320
+ try {
321
+ await markVideoAsToDelete(ownerId, videoId)
322
+ console.log(`deleting video ${videoId}`)
323
+ res.status(200)
324
+ res.write(JSON.stringify({ success: true }))
325
+ res.end()
326
+ } catch (err) {
327
+ console.error(`failed to delete video ${videoId} (${err})`)
328
+ res.status(500)
329
+ res.write(JSON.stringify({ error: `failed to delete video ${videoId}` }))
330
+ res.end()
331
+ }
332
+ break
333
+
334
+ case 'abort':
335
+ try {
336
+ await markVideoAsToAbort(ownerId, videoId)
337
+ console.log(`aborted video ${videoId}`)
338
+ res.status(200)
339
+ res.write(JSON.stringify({ success: true }))
340
+ res.end()
341
+ } catch (err) {
342
+ console.error(`failed to abort video ${videoId} (${err})`)
343
+ res.status(500)
344
+ res.write(JSON.stringify({ error: `failed to abort video ${videoId}` }))
345
+ res.end()
346
+ }
347
+ break
348
+
349
+ case 'pause':
350
+ try {
351
+ await markVideoAsToPause(ownerId, videoId)
352
+ console.log(`paused video ${videoId}`)
353
+ res.status(200)
354
+ res.write(JSON.stringify({ success: true }))
355
+ res.end()
356
+ } catch (err) {
357
+ console.error(`failed to pause video ${videoId} (${err})`)
358
+ res.status(500)
359
+ res.write(JSON.stringify({ error: `failed to pause video ${videoId}` }))
360
+ res.end()
361
+ }
362
+ break
363
+
364
+ case 'pending':
365
+ try {
366
+ await markVideoAsPending(ownerId, videoId)
367
+ console.log(`unpausing video ${videoId}`)
368
+ res.status(200)
369
+ res.write(JSON.stringify({ success: true }))
370
+ res.end()
371
+ } catch (err) {
372
+ console.error(`failed to unpause video ${videoId} (${err})`)
373
+ res.status(500)
374
+ res.write(JSON.stringify({ error: `failed to unpause video ${videoId}` }))
375
+ res.end()
376
+ }
377
+ break
378
+
379
+ default:
380
+ console.log(`unsupported status ${status}`)
381
+ res.status(401)
382
+ res.write(JSON.stringify({ error: `unsupported status ${status}` }))
383
+ res.end()
384
  }
385
  })
386
 
387
+ // delete a video - this is legacy, we should use other functions instead
388
+ /*
389
  app.delete("/:id", async (req, res) => {
390
 
391
  if (!hasValidAuthorization(req.headers)) {
 
414
  return
415
  }
416
 
417
+ // ecurity note: we always check the existence if the video first
418
+ // that's because we are going to delete all the associated files with a glob,
419
+ // so we must be sure the id is not a system path or something ^^
420
+ let video: Video = null
421
  try {
422
+ video = await getVideo(ownerId, videoId)
423
  } catch (err) {
424
  console.error(err)
425
  res.status(404)
426
+ res.write(JSON.stringify({ error: "couldn't find this video" }))
427
  res.end()
428
+ return
429
  }
430
 
431
  try {
432
+ await markVideoAsToDelete(ownerId, videoId)
433
  res.status(200)
434
  res.write(JSON.stringify({ success: true }))
435
  res.end()
436
  } catch (err) {
437
  console.error(err)
438
  res.status(500)
439
+ res.write(JSON.stringify({ success: false, error: "failed to delete the video" }))
440
  res.end()
441
  }
442
  })
443
+ */
444
 
445
  app.listen(port, () => { console.log(`Open http://localhost:${port}`) })
src/initFolders.mts CHANGED
@@ -1,11 +1,18 @@
1
- import { tasksDirPath, pendingTasksDirFilePath, completedTasksDirFilePath, filesDirPath, pendingFilesDirFilePath, completedFilesDirFilePath } from "./config.mts"
 
 
 
 
 
 
 
2
  import { createDirIfNeeded } from "./utils/createDirIfNeeded.mts"
3
 
4
  export const initFolders = () => {
5
  console.log(`initializing folders..`)
6
- createDirIfNeeded(tasksDirPath)
7
- createDirIfNeeded(pendingTasksDirFilePath)
8
- createDirIfNeeded(completedTasksDirFilePath)
9
  createDirIfNeeded(filesDirPath)
10
  createDirIfNeeded(pendingFilesDirFilePath)
11
  createDirIfNeeded(completedFilesDirFilePath)
 
1
+ import {
2
+ metadataDirPath,
3
+ pendingMetadataDirFilePath,
4
+ completedMetadataDirFilePath,
5
+ filesDirPath,
6
+ pendingFilesDirFilePath,
7
+ completedFilesDirFilePath
8
+ } from "./config.mts"
9
  import { createDirIfNeeded } from "./utils/createDirIfNeeded.mts"
10
 
11
  export const initFolders = () => {
12
  console.log(`initializing folders..`)
13
+ createDirIfNeeded(metadataDirPath)
14
+ createDirIfNeeded(pendingMetadataDirFilePath)
15
+ createDirIfNeeded(completedMetadataDirFilePath)
16
  createDirIfNeeded(filesDirPath)
17
  createDirIfNeeded(pendingFilesDirFilePath)
18
  createDirIfNeeded(completedFilesDirFilePath)
src/llm/enrichVideoSpecsUsingLLM.mts ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ChatCompletionRequestMessage } from "openai"
2
+
3
+ import { Video, VideoAPIRequest } from "../types.mts"
4
+ import { generateYAML } from "./openai/generateYAML.mts"
5
+ import { HallucinatedVideoRequest, OpenAIErrorResponse } from "./types.mts"
6
+ import { getQueryChatMessages } from "../preproduction/prompts.mts"
7
+ import { getValidNumber } from "../utils/getValidNumber.mts"
8
+
9
+
10
+ export const enrichVideoSpecsUsingLLM = async (video: Video): Promise<Video> => {
11
+
12
+ const messages: ChatCompletionRequestMessage[] = getQueryChatMessages(video.videoPrompt)
13
+
14
+ const defaultValue = {} as unknown as HallucinatedVideoRequest
15
+
16
+ // console.log("enrichVideoSpecsUsingLLM: messages = ", messages)
17
+
18
+ let hallucinatedVideo: HallucinatedVideoRequest
19
+
20
+
21
+ const referenceShot = video.shots[0]
22
+ video.shots = []
23
+ // console.log("referenceShot:", referenceShot)
24
+
25
+ try {
26
+ hallucinatedVideo = await generateYAML<HallucinatedVideoRequest>(
27
+ messages,
28
+ defaultValue
29
+ )
30
+ console.log("enrichVideoSpecsUsingLLM: hallucinatedVideo = ", hallucinatedVideo)
31
+ } catch (err) {
32
+
33
+ let error: OpenAIErrorResponse = err?.response?.data?.error as unknown as OpenAIErrorResponse
34
+ if (!error) {
35
+ error = { message: `${err || ""}` } as unknown as OpenAIErrorResponse
36
+ }
37
+
38
+ console.error(JSON.stringify(error, null, 2))
39
+ throw new Error(`failed to call the LLM: ${error.message}`)
40
+ }
41
+
42
+ // const video = JSON.parse(JSON.stringify(referenceVideo)) as Video
43
+
44
+ // TODO here we should make some verifications and perhaps even some conversions
45
+ // betwen the LLM response and the actual format used in a videoRequest
46
+ video.backgroundAudioPrompt = hallucinatedVideo.backgroundAudioPrompt || video.backgroundAudioPrompt
47
+ video.foregroundAudioPrompt = hallucinatedVideo.foregroundAudioPrompt || video.foregroundAudioPrompt
48
+ video.actorPrompt = hallucinatedVideo.actorPrompt || video.actorPrompt
49
+ video.actorVoicePrompt = hallucinatedVideo.actorVoicePrompt || video.actorVoicePrompt
50
+
51
+ video.noise = typeof hallucinatedVideo.noise !== "undefined"
52
+ ? (`${hallucinatedVideo.noise || ""}`.toLowerCase() === "true")
53
+ : video.noise
54
+
55
+ video.noiseAmount = typeof hallucinatedVideo.noiseAmount !== "undefined"
56
+ ? getValidNumber(hallucinatedVideo.noiseAmount, 0, 10, 2)
57
+ : video.noiseAmount
58
+
59
+ video.outroDurationMs = typeof hallucinatedVideo.outroDurationMs !== "undefined"
60
+ ? getValidNumber(hallucinatedVideo.outroDurationMs, 0, 3000, 500)
61
+ : video.outroDurationMs
62
+
63
+ const hallucinatedShots = Array.isArray(hallucinatedVideo.shots) ? hallucinatedVideo.shots : []
64
+
65
+
66
+ for (const hallucinatedShot of hallucinatedShots) {
67
+ const shot = JSON.parse(JSON.stringify(referenceShot))
68
+ shot.shotPrompt = hallucinatedShot.shotPrompt || shot.shotPrompt
69
+ shot.environmentPrompt = hallucinatedShot.environmentPrompt || shot.environmentPrompt
70
+ shot.photographyPrompt = hallucinatedShot.photographyPrompt || shot.photographyPrompt
71
+ shot.actionPrompt = hallucinatedShot.actionPrompt || shot.actionPrompt
72
+ shot.foregroundAudioPrompt = hallucinatedShot.foregroundAudioPrompt || shot.foregroundAudioPrompt
73
+ video.shots.push(shot)
74
+ }
75
+
76
+ console.log("enrichVideoSpecsUsingLLM: video = ", video)
77
+
78
+ return video
79
+ }
src/llm/openai/createChatCompletion.mts ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ChatCompletionRequestMessage } from "openai"
2
+ import { GPTTokens } from "gpt-tokens"
3
+
4
+ import { openai } from "./openai.mts"
5
+ import { runModerationCheck } from "./runModerationCheck.mts"
6
+ import { getUserContent } from "./getUserContent.mts"
7
+ import { getTextPrompt } from "./getTextPrompt.mts"
8
+
9
+ export const createChatCompletion = async (
10
+ messages: ChatCompletionRequestMessage[],
11
+ model = "gpt-4"
12
+ ): Promise<string> => {
13
+ // this is the part added by the user, and the one we need to check against the moderation API
14
+ const userContent = getUserContent(messages)
15
+
16
+ const check = await runModerationCheck(userContent)
17
+
18
+ if (check.flagged) {
19
+ console.error("Thoughtcrime: content flagged by the AI police", {
20
+ userContent,
21
+ moderationResult: check,
22
+ })
23
+ return "Thoughtcrime: content flagged by the AI police"
24
+ }
25
+
26
+ const rawPrompt = getTextPrompt(messages)
27
+
28
+
29
+ // for doc: https://www.npmjs.com/package/gpt-tokens
30
+ const usageInfo = new GPTTokens({
31
+ // Plus enjoy a 25% cost reduction for input tokens on GPT-3.5 Turbo (0.0015 per 1K input tokens)
32
+ plus : false,
33
+ model : "gpt-4",
34
+ messages: messages as any,
35
+ })
36
+
37
+ console.table({
38
+ "Tokens prompt": usageInfo.promptUsedTokens,
39
+ "Tokens completion": usageInfo.completionUsedTokens,
40
+ "Tokens total": usageInfo.usedTokens,
41
+ })
42
+
43
+ // Price USD: 0.000298
44
+ console.log("Price USD: ", usageInfo.usedUSD)
45
+
46
+ // const tokenLimit = 4000
47
+
48
+ const maxTokens = 4000 - usageInfo.promptUsedTokens
49
+
50
+ console.log("maxTokens:", maxTokens)
51
+ /*
52
+ console.log("settings:", {
53
+ tokenLimit,
54
+ promptLength: rawPrompt.length,
55
+ promptTokenLengh: rawPrompt.length / 1.9,
56
+ maxTokens
57
+ })
58
+
59
+ console.log("createChatCompletion(): raw prompt length:", rawPrompt.length)
60
+ console.log(
61
+ `createChatCompletion(): requesting ${maxTokens} of the ${tokenLimit} tokens availables`
62
+ )
63
+ */
64
+
65
+ console.log("query:", {
66
+ model,
67
+ // messages,
68
+ user: "Anonymous User",
69
+ temperature: 0.7,
70
+ max_tokens: maxTokens,
71
+ // stop: preset.stop?.length ? preset.stop : undefined,
72
+ })
73
+
74
+ const response = await openai.createChatCompletion({
75
+ model,
76
+ messages,
77
+ // TODO use the Hugging Face Login username here
78
+ user: "Anonymous User",
79
+ temperature: 0.7,
80
+
81
+ // 30 tokens is about 120 characters
82
+ // we don't want more, as it will take longer to respond
83
+ max_tokens: maxTokens,
84
+ // stop: preset.stop?.length ? preset.stop : undefined,
85
+ })
86
+
87
+ const { choices } = response.data
88
+
89
+ if (!choices.length) {
90
+ console.log("createChatCompletion(): no choice found in the LLM response..")
91
+ return ""
92
+ }
93
+ const firstChoice = choices[0]
94
+
95
+ if (firstChoice?.message?.role !== "assistant") {
96
+ console.log(
97
+ "createChatCompletion(): something went wrong, the model imagined the user response?!"
98
+ )
99
+ return ""
100
+ }
101
+
102
+ console.log("createChatCompletion(): response", firstChoice.message.content)
103
+
104
+ return firstChoice.message.content || ""
105
+ }
src/llm/openai/createChatCompletionStream.mts ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ChatCompletionRequestMessage } from "openai"
2
+
3
+ import { openai } from "./openai.mts"
4
+ import { streamCompletion } from "./stream.mts"
5
+ import { getTextPrompt } from "./getTextPrompt.mts"
6
+
7
+ export const createChatCompletionStream = async (
8
+ messages: ChatCompletionRequestMessage[],
9
+ model: string,
10
+ onMessage: (message: string) => Promise<void>,
11
+ onEnd = () => Promise<void>
12
+ ) => {
13
+ try {
14
+ const rawPrompt = getTextPrompt(messages)
15
+
16
+ const tokenLimit = 4096 // 8000
17
+
18
+ const maxTokens = Math.round(tokenLimit - rawPrompt.length / 1.9)
19
+
20
+ const completion = await openai.createCompletion({
21
+ model,
22
+ prompt: messages,
23
+ temperature: 0.7,
24
+ max_tokens: Math.min(30, maxTokens),
25
+ stream: true,
26
+ })
27
+
28
+ for await (const message of streamCompletion(completion as any)) {
29
+ try {
30
+ const parsed = JSON.parse(message)
31
+ const { text } = parsed.choices[0]
32
+
33
+ try {
34
+ await onMessage(text)
35
+ } catch (err) {
36
+ console.error(
37
+ 'Could not process stream message (callback failed)',
38
+ message,
39
+ err
40
+ )
41
+ }
42
+ } catch (error) {
43
+ console.error('Could not JSON parse stream message', message, error)
44
+ }
45
+ }
46
+ try {
47
+ await onEnd()
48
+ } catch (err) {
49
+ console.error('onEnd callback failed', err)
50
+ }
51
+ } catch (error: any) {
52
+ if (error.code) {
53
+ try {
54
+ const parsed = JSON.parse(error.body)
55
+ console.error('An error occurred during OpenAI request: ', parsed)
56
+ } catch (error) {
57
+ console.error(
58
+ 'An error occurred during OpenAI request (invalid json): ',
59
+ error
60
+ )
61
+ }
62
+ } else {
63
+ console.error('An error occurred during OpenAI request', error)
64
+ }
65
+ }
66
+ }
src/llm/openai/generateYAML.mts ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { ChatCompletionRequestMessage } from "openai"
2
+ import { parse } from "yaml"
3
+
4
+ import { createChatCompletion } from "./createChatCompletion.mts"
5
+
6
+ export const generateYAML = async <T,>(messages: ChatCompletionRequestMessage[] = [], defaultValue?: T): Promise<T> => {
7
+
8
+ const defaultResult = defaultValue || ({} as T)
9
+
10
+ if (!messages.length) {
11
+ return defaultResult
12
+ }
13
+
14
+ const output = await createChatCompletion(messages)
15
+
16
+ let raw = ""
17
+
18
+ // cleanup any remains of the markdown response
19
+ raw = output.split("```")[0]
20
+
21
+ // remove any remaining `
22
+ const input = raw.replaceAll("`", "")
23
+
24
+ try {
25
+ const obj = parse(input) as T
26
+
27
+ if (obj === null || typeof obj === undefined) {
28
+ throw new Error("couldn't parse YAML")
29
+ }
30
+
31
+ return obj
32
+ } catch (err) {
33
+ // just in case, we also try JSON!
34
+ const obj = JSON.parse(input) as T
35
+
36
+ if (obj === null || typeof obj === undefined) {
37
+ throw new Error("couldn't parse JSON")
38
+ }
39
+
40
+ return obj
41
+ }
42
+ }
src/llm/openai/getTextPrompt.mts ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ import { ChatCompletionRequestMessage } from "openai"
2
+
3
+ export const getTextPrompt = (prompt: ChatCompletionRequestMessage[]) =>
4
+ prompt.reduce((acc, item) => acc.concat(item.content), "") || ""
src/llm/openai/getUserContent.mts ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import { ChatCompletionRequestMessage } from "openai"
2
+
3
+ export const getUserContent = (prompt: ChatCompletionRequestMessage[]) =>
4
+ prompt
5
+ .filter((message) => message.role === "user")
6
+ .map((message) => message.content)
7
+ .join("\n")
src/llm/openai/openai.mts ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ import { Configuration, OpenAIApi } from "openai"
2
+
3
+ export const openai = new OpenAIApi(
4
+ new Configuration({
5
+ apiKey: process.env.VC_OPENAI_API_KEY
6
+ })
7
+ )
src/llm/openai/runModerationCheck.mts ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import {
2
+ CreateModerationResponseResultsInnerCategories,
3
+ CreateModerationResponseResultsInnerCategoryScores,
4
+ } from "openai"
5
+
6
+ import { openai } from "./openai.mts"
7
+
8
+ export const runModerationCheck = async (
9
+ input = ''
10
+ ): Promise<{
11
+ categories?: CreateModerationResponseResultsInnerCategories
12
+ category_scores?: CreateModerationResponseResultsInnerCategoryScores
13
+ flagged: boolean
14
+ }> => {
15
+ if (!input || !input.length) {
16
+ console.log(`skipping moderation check as input length is too shot`)
17
+ return {
18
+ flagged: false,
19
+ }
20
+ }
21
+
22
+ const response = await openai.createModeration({ input })
23
+ const { results } = response.data
24
+
25
+ if (!results.length) {
26
+ throw new Error(`failed to call the moderation endpoint`)
27
+ }
28
+
29
+ return results[0]
30
+ }
src/llm/openai/stream.mts ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Readable } from "node:stream"
2
+
3
+ async function* chunksToLines(
4
+ chunksAsync: AsyncIterable<Buffer>
5
+ ): AsyncIterable<string> {
6
+ let previous = ""
7
+ for await (const chunk of chunksAsync) {
8
+ const bufferChunk = Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk)
9
+ previous += bufferChunk
10
+ let eolIndex
11
+ while ((eolIndex = previous.indexOf("\n")) >= 0) {
12
+ // line includes the EOL
13
+ const line = previous.slice(0, eolIndex + 1).trimEnd()
14
+ if (line === "data: [DONE]") break
15
+ if (line.startsWith("data: ")) yield line
16
+ previous = previous.slice(eolIndex + 1)
17
+ }
18
+ }
19
+ }
20
+
21
+ async function* linesToMessages(
22
+ linesAsync: AsyncIterable<string>
23
+ ): AsyncIterable<string> {
24
+ for await (const line of linesAsync) {
25
+ const message = line.substring("data :".length)
26
+
27
+ yield message
28
+ }
29
+ }
30
+
31
+ export async function* streamCompletion(
32
+ stream: Readable
33
+ ): AsyncGenerator<string, void, undefined> {
34
+ yield* linesToMessages(chunksToLines(stream))
35
+ }
src/llm/types.mts ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // note: this has to exactly match what is in the prompt, in ../preproduction/prompts.mts
2
+ export interface HallucinatedVideoRequest {
3
+ backgroundAudioPrompt: string; // describe the background audio (crowd, birds, wind, sea etc..)
4
+ foregroundAudioPrompt: string; // describe the foreground audio (cars revving, footsteps, objects breaking, explosion etc)
5
+ actorPrompt: string; // describe the physical look of the main actor visible in the shot (man, woman, old, young, hair, glasses, clothes etc)
6
+ actorVoicePrompt: string; // describe the main actor voice (man, woman, old, young, amused, annoyed.. etc)
7
+ noise: boolean; // whether to apply movie noise or not
8
+ noiseAmount: number; // (integer) the amount of ffmpeg noise (film grain) to apply. 0 is none, 10 is a lot
9
+ outroDurationMs: number; // in milliseconds. An outro generally only lasts between 0 and 3000 (3s)
10
+
11
+ shots: Array<{
12
+ shotPrompt: string; // describe the main elements of a shot, in excruciating details. You must include ALL those parameters: characters, shot story, what is happening. How they look, the textures, the expressions, their clothes. The color, materials and style of clothes.
13
+ environmentPrompt: string; // describe the environment, in excruciating details. You must include ALL those parameters: Lights, atmosphere and weather (misty, dust, clear, rain, snow..). Time of the day and hour of the day. Furnitures, their shape, style, era. The materials used for each object. The global time period, time of the day, era. Explain if anything is moving in the backgroung.
14
+ photographyPrompt: string; // describe the photography, in excruciating details. You must include ALL those parameters: Camera angle, position and movement. Type of shot and angle. Lighting. Mood. Settings. Tint of the lights. Position of the sun or moon. Shadows and their direction. Camera shutter speed, blur, bokeh, aperture.
15
+ actionPrompt: string; // describe the dynamics of a shot, in excruciating details. You must include ALL those parameters: What is happening, who and what is moving. Which entity are in movements. What are the directions, starting and ending position. At which speed entities or objects are moving. Is there motion blur, slow motion, timelapse etc.
16
+ foregroundAudioPrompt: string; // describe the sounds in a concise way (eg. ringing bells, underwater sound and whistling dolphin, cat mewong etc),
17
+ }>
18
+ }
19
+
20
+ export interface OpenAIErrorResponse {
21
+ message: string
22
+ type: string
23
+ param: any
24
+ code: any
25
+ }
src/main.mts CHANGED
@@ -1,22 +1,26 @@
1
  import { initFolders } from "./initFolders.mts"
2
- import { getPendingTasks } from "./scheduler/getPendingTasks.mts"
3
- import { processTask } from "./scheduler/processTask.mts"
 
4
 
5
  export const main = async () => {
6
 
7
- const tasks = await getPendingTasks()
8
- if (!tasks.length) {
9
  setTimeout(() => {
10
  main()
11
  }, 500)
12
  return
13
  }
14
 
15
- console.log(`there are ${tasks.length} pending tasks`)
16
- for (const task of tasks) {
17
- await processTask(task)
 
 
 
18
  }
19
- console.log(`processed ${tasks.length} tasks`)
20
 
21
  setTimeout(() => {
22
  main()
 
1
  import { initFolders } from "./initFolders.mts"
2
+ import { getPendingVideos } from "./scheduler/getPendingVideos.mts"
3
+ import { processVideo } from "./scheduler/processVideo.mts"
4
+ import { sortPendingVideosByLeastCompletedFirst } from "./utils/sortPendingVideosByLeastCompletedFirst.mts"
5
 
6
  export const main = async () => {
7
 
8
+ const videos = await getPendingVideos()
9
+ if (!videos.length) {
10
  setTimeout(() => {
11
  main()
12
  }, 500)
13
  return
14
  }
15
 
16
+ console.log(`there are ${videos.length} pending videos`)
17
+
18
+ sortPendingVideosByLeastCompletedFirst(videos)
19
+
20
+ for (const video of videos) {
21
+ await processVideo(video)
22
  }
23
+ console.log(`processed ${videos.length} videos`)
24
 
25
  setTimeout(() => {
26
  main()
src/preproduction/mocks.mts ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Video, VideoShot } from "../types.mts"
2
+
3
+ export const mockShots: VideoShot[] = [
4
+ {
5
+ "shotPrompt": "In the extreme wide shot, a flock of ducks is converging on the Central Park, coming from multiple directions. Their feathers are glossy and clean, casting off varying degrees of green, brown and white",
6
+ "environmentPrompt": "Central Park at sunrise, the park looks slightly misty, the sky is tinged with shades of pink and orange as the day breaks. There's dew on the grass, and the leaves on trees are rustling in the light breeze",
7
+ "photographyPrompt": "Eye-level shot with a slight tilt in the camera, capturing the panorama of the park. There's natural lighting, sun just rising. The camera zooms out to capture the ducks entering the park. Shutter speed is slow to capture the movement of ducks",
8
+ "actionPrompt": "Large groups of ducks waddle into the park from various directions, some fly in groups, landing on the pond with small splashes. Movement is slow, slightly sped up to depict the invasion",
9
+ "foregroundAudioPrompt": "A symphony of soft quacking and rustling feathers",
10
+ },
11
+ {
12
+ "shotPrompt": "In the medium shot, a group of ducks are by the pond, pecking at the ground and frolicking in the water. One male mallard is particularly captivating with its emerald green head and healthy body",
13
+ "environmentPrompt": "It's a sunny spring day in Central Park. The pond is surrounded by lush, green vegetation and dappled with sunlight filtering through the leaves",
14
+ "photographyPrompt": "Low angle shot near the water level, the camera moves in a crane shot to capture ducks in action, and the camera's aperture is partially open. Natural sunlight creates playful shadows",
15
+ "actionPrompt": "Ducks are pecking at the ground, dabbling at the water's edge and frolicking in the pond. The camera tracks a particularly majestic mallard navigating through the pond",
16
+ "foregroundAudioPrompt": "Sounds of ducks quacking and splashing in the water"
17
+ },
18
+ {
19
+ "shotPrompt": "Close-up shot of a mother duck with ducklings following her in a line on the grass and into the water",
20
+ "environmentPrompt": "Central Park, by one of the smaller ponds, surrounded by green trees. Sun is high up giving off warm, radiant light",
21
+ "photographyPrompt": "High angle shot, focusing on the line of ducklings following their mother. The camera follows the ducklings. The setting is bright and clear with sun illuminating the ducklings",
22
+ "actionPrompt": "Mother duck is leading her ducklings from the grass into the water, the ducklings obediently follow, creating a neat line. The whole scene feels peaceful",
23
+ "foregroundAudioPrompt": "Ducklings' high pitched chirping, soft lapping of water at the edge of the pond"
24
+ }
25
+ ] as any
26
+
27
+ export const mock: Video = {
28
+ "backgroundAudioPrompt": "City ambience mixed with the rustling leaves and the chirping birds in the park",
29
+ "foregroundAudioPrompt": "Rustling feathers, soft quacking, flapping wings, occasional splash in the pond",
30
+ "actorPrompt": "Main actors are ducks - a variety of breeds, mostly mallards: males with glossy green heads and females in mottled brown; all plump, medium-sized waterfowl",
31
+ "actorVoicePrompt": "Soft, low pitched quacking of adult ducks and higher pitched chirping of ducklings",
32
+ "noise": true,
33
+ "noiseAmount": 2,
34
+ "outroDurationMs": 1500,
35
+ "shots": mockShots
36
+ } as any
src/preproduction/prompts.mts ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ // types of movie shots: https://www.nfi.edu/types-of-film-shots/
4
+
5
+ import { ChatCompletionRequestMessage } from "openai"
6
+
7
+ export const getQueryChatMessages = (sceneDescription: string): ChatCompletionRequestMessage[] => {
8
+ return [
9
+ {
10
+ role: "system",
11
+ name: "moviemaking_rules",
12
+ content: `# Context
13
+ You are an AI Movie Director Assistant, and you need to help generating input requests (movie "specs") for an automated movie generation API.
14
+ The format expected by the API must be in YAML. The TypeScript schema for this YAML file is:
15
+ \`\`\`typescript
16
+ {
17
+ backgroundAudioPrompt: string; // describe the background audio (crowd, birds, wind, sea etc..)
18
+ foregroundAudioPrompt: string; // describe the foreground audio (cars revving, footsteps, objects breaking, explosion etc)
19
+ actorPrompt: string; // describe the physical look of the main actor visible in the shot (man, woman, old, young, hair, glasses, clothes etc)
20
+ actorVoicePrompt: string; // describe the main actor voice (man, woman, old, young, amused, annoyed.. etc)
21
+ noise: boolean; // whether to apply movie noise or not
22
+ noiseAmount: number; // (integer) the amount of noise (film grain) to apply. This is mapped from the FFmpeg filter (0 is none, 10 is already a lot)
23
+ outroDurationMs: number; // in milliseconds. An outro generally only lasts between 0 and 3000 (3s)
24
+ shots: Array<{
25
+ shotPrompt: string; // describe the main elements of a shot, in excruciating details. You must include ALL those parameters: characters, shot story, what is happening. How they look, the textures, the expressions, their clothes. The color, materials and style of clothes.
26
+ environmentPrompt: string; // describe the environment, in excruciating details. You must include ALL those parameters: Lights, atmosphere and weather (misty, dust, clear, rain, snow..). Time of the day and hour of the day. Furnitures, their shape, style, era. The materials used for each object. The global time period, time of the day, era. Explain if anything is moving in the backgroung.
27
+ photographyPrompt: string; // describe the photography, in excruciating details. You must include ALL those parameters: Camera angle, position and movement. Type of shot and angle. Lighting. Mood. Settings. Tint of the lights. Position of the sun or moon. Shadows and their direction. Camera shutter speed, blur, bokeh, aperture.
28
+ actionPrompt: string; // describe the dynamics of a shot, in excruciating details. You must include ALL those parameters: What is happening, who and what is moving. Which entity are in movements. What are the directions, starting and ending position. At which speed entities or objects are moving. Is there motion blur, slow motion, timelapse etc.
29
+ foregroundAudioPrompt: string; // describe the sounds in a concise way (eg. ringing bells, underwater sound and whistling dolphin, cat mewong etc),
30
+ }>
31
+ }
32
+ \`\`\`
33
+ # Guidelines
34
+ You will generate 3 shots by default, unless more or less are specified.
35
+ Is it crucial to repeat the elements consituting a sequence of multiple shots verbatim from one shot to another.
36
+ For instance, you will have to repeat exactly what a character or background look like, how they are dressed etc.
37
+ This will ensure consistency from one scene to another.
38
+ ## Creating a movie
39
+ Here are some guidelines regarding film-making:
40
+ - The distance your subject is to the camera impacts how the audience feels about them.
41
+ - Subject will appear largest in a close-up or choker shot and smallest in a wide or long shot.
42
+ - Camera movement is a technique for changing the relationship between the subject and the camera frame, controlling the delivery of the narrative. It helps to give additional meaning to what’s happening on the screen.
43
+ - Do not hesitate to combine types of shots with camera movement shots and camera position (angle) shots.
44
+ ## Shots
45
+ Single shot: where the shot only captures one subject.
46
+ Two shot: which has only two characters.
47
+ Three shot: when three characters are in the frame.
48
+ Point-of-view shot (POV): shows the scene from the point of view of one of the characters, makes the audience feel that they are there seeing what the character is seeing.
49
+ Over-the-shoulder shot (OTS): shows the subject from behind the shoulder of another character.
50
+ Over-the-hip (OTH) shot, in which the camera is placed on the hip of one character and the focus is on the subject.
51
+ Reverse angle shot: which is approximately 180 degrees opposite the previous shot.
52
+ Reaction shot: which shows the character’s reaction to the previous shot.
53
+ Weather shot: where the subject of the filming is the weather.
54
+ Extreme wide shot/extreme long shot: used to show the subject and the entire area of the environment they are in.
55
+ Wide shot/long shot: used to focus on the subject while still showing the scene the subject is in.
56
+ Medium shot: shows the subject from the knees up, and is often referred to as the 3/4 shot.
57
+ Medium close-up shot: The subject fills the frame. It is somewhere between a medium close-up and a close-up.
58
+ Close-up shot: shows emotions and detailed reactions, with the subject filling the entire frame.
59
+ Choker shot: shows the subject’s face from just above the eyebrows to just below the mouth and is between a close-up and an extreme close-up.
60
+ Extreme close-up shot: shows the detail of an object, such as one a character is handling, or a person, such as just their eyes or moving lips.
61
+ Full shot: similar to a wide shot except that it focuses on the character in the frame, showing them from head to toe.
62
+ Cowboy shot: similar to the medium shot except that the character is shown from the hips or waist up.
63
+ Establishing shot: a long shot at the beginning of a scene that shows objects, buildings, and other elements of a setting from a distance to establish where the next sequence of events takes place.
64
+ ## Camera angles
65
+ Eye-level shot: This is when the camera is placed at the same height as the eyes of the characters.
66
+ Low angle shot: This shot frames the subject from a low height, often used to emphasize differences in power between characters.
67
+ Aerial shot/helicopter shot: Taken from way up high, this shot is usually from a drone or helicopter to establish the expanse of the surrounding landscape.
68
+ High angle shot: This is when the subject is framed with the camera looking down at them.
69
+ Birds-eye-view shot/overhead shot: This is a shot taken from way above the subject, usually including a significant amount of the surrounding environment to create a sense of scale or movement.
70
+ Shoulder-level shot: This is where the camera is approximately the same height as the character’s shoulders.
71
+ Hip-level shot: The camera is approximately at the height of the character’s hips.
72
+ Knee-level shot: The camera is approximately at the same level as the character’s knees.
73
+ Ground-level shot: When the height of the camera is at ground level with the character, this shot captures what’s happening on the ground the character is standing on.
74
+ Dutch-angle/tilt shot: This is where the camera is tilted to the side.
75
+ Cut-in shot: This type of shot cuts into the action on the screen to offer a different view of something happening in this main scene.
76
+ Cutaway shot: As a shot that cuts away from the main action on the screen, it’s used to focus on secondary action and add more information for greater understanding for the audience.
77
+ Master shot: A long shot that captures most or all of the action happening in a scene.
78
+ Deep focus: A shot that keeps everything on the screen in sharp focus, including the foreground, background, and middle ground.
79
+ Locked-down shot: With this shot, the camera is fixed in one position and the action continues off-screen.
80
+ ## Camera movements
81
+ Zoom Shot: involves changing the focal length of the lens to zoom in or out during filming.
82
+ Pan shot: involves moving the camera from side to side to show something to the audience or help them better follow the sequence of events.
83
+ Tilt shot: similar to a pan shot, except moving the camera up and down.
84
+ Dolly shot: the camera is attached to a dolly that moves on tracks and can possibly move up and down.
85
+ Truck shot: you move the entire camera on a fixed point and the motion goes from side to side.
86
+ Pedestal shot: the entire camera is moved vertically, not just the angle of view, and is often combined with panning and/or tilting.
87
+ Static/fixed shot: where there is no camera movement, and the shot emphasizes the movement of the subject in the environment.
88
+ Arc shot: where the camera moves in an arc pattern around the subject to give the audience a better perspective of their surroundings.
89
+ Crab shot: a less-common version of tracking a subject where the dolly the camera is on goes sideways.
90
+ Dolly zoom shot: the position of the camera and focal length are changed simultaneously.
91
+ Whip pan shot/swish pan shot: used to create a blur as you pan from one shot to the next.
92
+ Tracking shot: the camera follows the subject, either from behind or at their side, moving with them.
93
+ Whip tilt shot: used to create a blur panning from one shot to the next vertically.
94
+ Bridging shot: denotes a shift in place or time.
95
+ ## Focus
96
+ Focus pull: focus the lens to keep the subject within an acceptable focus range.
97
+ Rack focus: focus is more aggressively shifted from subject A to subject B.
98
+ Tilt-shift: parts of the image are in focus while other parts are out of focus.
99
+ Deep focus: both the subject and the environment are in focus.
100
+ Shallow focus: subject is crisp and in focus while the background is out of focus.
101
+ ## Camera angles
102
+ High-angle
103
+ Low-angle
104
+ Over-the-shoulder
105
+ Bird’s eye
106
+ Dutch angle/tilt`
107
+ },
108
+ {
109
+ role: "user",
110
+ name: "movie_director",
111
+ content: `# Task
112
+ Please generate the movie spec YAML based on the following description:
113
+ ${sceneDescription}.
114
+ # YAML
115
+ \`\`\`
116
+ `
117
+ },
118
+ ]
119
+ }
src/production/generateShot.mts CHANGED
@@ -135,7 +135,7 @@ export const generateShot = async ({
135
  const interpolationSteps = 3
136
  const interpolatedFramesPerSecond = 24
137
  await interpolateVideo(
138
- task,
139
  interpolationSteps,
140
  interpolatedFramesPerSecond
141
  )
@@ -194,7 +194,7 @@ export const generateShot = async ({
194
  audioFileName = foregroundAudioFileName
195
  }
196
 
197
- await addAudioToVideo(task, audioFileName)
198
  }
199
 
200
  console.log("returning result to user..")
 
135
  const interpolationSteps = 3
136
  const interpolatedFramesPerSecond = 24
137
  await interpolateVideo(
138
+ video,
139
  interpolationSteps,
140
  interpolatedFramesPerSecond
141
  )
 
194
  audioFileName = foregroundAudioFileName
195
  }
196
 
197
+ await addAudioToVideo(video, audioFileName)
198
  }
199
 
200
  console.log("returning result to user..")
src/scheduler/deleteTask.mts DELETED
@@ -1,33 +0,0 @@
1
-
2
- import { existsSync, promises as fs } from "node:fs"
3
- import path from "node:path"
4
-
5
- import tmpDir from "temp-dir"
6
-
7
- import { VideoTask } from "../types.mts"
8
- import { completedTasksDirFilePath, completedFilesDirFilePath, pendingTasksDirFilePath, pendingFilesDirFilePath } from "../config.mts"
9
- import { deleteFileIfExists } from "../utils/deleteFileIfExists.mts"
10
-
11
-
12
- export const deleteTask = async (task: VideoTask) => {
13
- const taskFileName = `${task.ownerId}_${task.id}.json`
14
- const videoFileName = task.fileName
15
-
16
- // .mp4 files
17
- const tmpFilePath = path.join(tmpDir, videoFileName)
18
- const pendingVideoPath = path.join(pendingFilesDirFilePath, videoFileName)
19
- const completedVideoPath = path.join(completedFilesDirFilePath, videoFileName)
20
-
21
- // .json files
22
- const pendingTaskPath = path.join(pendingTasksDirFilePath, taskFileName)
23
- const completedTaskPath = path.join(completedTasksDirFilePath, taskFileName)
24
-
25
- await deleteFileIfExists(tmpFilePath)
26
- await deleteFileIfExists(pendingVideoPath)
27
- await deleteFileIfExists(completedVideoPath)
28
- await deleteFileIfExists(pendingTaskPath)
29
- await deleteFileIfExists(completedTaskPath)
30
-
31
- // TODO: we didn't delete any audio file!
32
- console.log(`note: we didn't delete any audio file!`)
33
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/scheduler/deleteVideo.mts ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tmpDir from "temp-dir"
2
+ import { validate as uuidValidate } from "uuid"
3
+
4
+ import { completedMetadataDirFilePath, completedFilesDirFilePath, pendingMetadataDirFilePath, pendingFilesDirFilePath } from "../config.mts"
5
+ import { deleteFilesWithName } from "../utils/deleteAllFilesWith.mts"
6
+
7
+
8
+ // note: we make sure ownerId and videoId are *VALID*
9
+ // otherwise an attacker could try to delete important files!
10
+ export const deleteVideo = async (ownerId: string, videoId?: string) => {
11
+ if (!uuidValidate(ownerId)) {
12
+ throw new Error(`fatal error: ownerId ${ownerId} is invalid!`)
13
+ }
14
+
15
+ if (videoId && !uuidValidate(videoId)) {
16
+ throw new Error(`fatal error: videoId ${videoId} is invalid!`)
17
+ }
18
+ const id = videoId ? `${ownerId}_${videoId}` : ownerId
19
+
20
+ // this should delete everything, including audio files
21
+ // however we still have some temporary files with a name that is unique:
22
+ // we should probably rename those
23
+ await deleteFilesWithName(tmpDir, id)
24
+ await deleteFilesWithName(completedMetadataDirFilePath, id)
25
+ await deleteFilesWithName(completedFilesDirFilePath, id)
26
+ await deleteFilesWithName(pendingMetadataDirFilePath, id)
27
+ await deleteFilesWithName(pendingFilesDirFilePath, id)
28
+ }
src/scheduler/getAllTasksForOwner.mts DELETED
@@ -1,9 +0,0 @@
1
- import { VideoTask } from "../types.mts"
2
- import { getCompletedTasks } from "./getCompletedTasks.mts"
3
- import { getPendingTasks } from "./getPendingTasks.mts"
4
-
5
- export const getAllTasksForOwner = async (ownerId: string): Promise<VideoTask[]> => {
6
- const pendingTasks = await getPendingTasks(ownerId)
7
- const completedTasks = await getCompletedTasks(ownerId)
8
- return [...pendingTasks, ...completedTasks]
9
- }
 
 
 
 
 
 
 
 
 
 
src/scheduler/getAllVideosForOwner.mts ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import { Video } from "../types.mts"
2
+ import { getCompletedVideos } from "./getCompletedVideos.mts"
3
+ import { getPendingVideos } from "./getPendingVideos.mts"
4
+
5
+ export const getAllVideosForOwner = async (ownerId: string): Promise<Video[]> => {
6
+ const pendingVideos = await getPendingVideos(ownerId)
7
+ const completedVideos = await getCompletedVideos(ownerId)
8
+ return [...pendingVideos, ...completedVideos]
9
+ }
src/scheduler/getCompletedTasks.mts DELETED
@@ -1,9 +0,0 @@
1
- import { VideoTask } from "../types.mts"
2
- import { completedTasksDirFilePath } from "../config.mts"
3
- import { readTasks } from "./readTasks.mts"
4
-
5
- export const getCompletedTasks = async (ownerId?: string): Promise<VideoTask[]> => {
6
- const completedTasks = await readTasks(completedTasksDirFilePath, ownerId)
7
-
8
- return completedTasks
9
- }
 
 
 
 
 
 
 
 
 
 
src/scheduler/getCompletedVideos.mts ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import { Video } from "../types.mts"
2
+ import { completedMetadataDirFilePath } from "../config.mts"
3
+ import { readVideoMetadataFiles } from "./readVideoMetadataFiles.mts"
4
+
5
+ export const getCompletedVideos = async (ownerId?: string): Promise<Video[]> => {
6
+ const completedVideos = await readVideoMetadataFiles(completedMetadataDirFilePath, ownerId)
7
+
8
+ return completedVideos
9
+ }
src/scheduler/getPendingTasks.mts DELETED
@@ -1,9 +0,0 @@
1
- import { VideoTask } from "../types.mts"
2
- import { pendingTasksDirFilePath } from "../config.mts"
3
- import { readTasks } from "./readTasks.mts"
4
-
5
- export const getPendingTasks = async (ownerId?: string): Promise<VideoTask[]> => {
6
- const pendingTasks = await readTasks(pendingTasksDirFilePath, ownerId)
7
-
8
- return pendingTasks
9
- }
 
 
 
 
 
 
 
 
 
 
src/scheduler/getPendingVideos.mts ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import { Video } from "../types.mts"
2
+ import { pendingMetadataDirFilePath } from "../config.mts"
3
+ import { readVideoMetadataFiles } from "./readVideoMetadataFiles.mts"
4
+
5
+ export const getPendingVideos = async (ownerId?: string): Promise<Video[]> => {
6
+ const pendingVideos = await readVideoMetadataFiles(pendingMetadataDirFilePath, ownerId)
7
+
8
+ return pendingVideos
9
+ }
src/scheduler/getTask.mts DELETED
@@ -1,23 +0,0 @@
1
- import path from "node:path"
2
-
3
- import { completedTasksDirFilePath, pendingTasksDirFilePath } from "../config.mts"
4
- import { readTask } from "./readTask.mts"
5
-
6
- export const getTask = async (ownerId: string, videoId: string) => {
7
- const taskFileName = `${ownerId}_${videoId}.json`
8
-
9
- const completedTaskFilePath = path.join(completedTasksDirFilePath, taskFileName)
10
- const pendingTaskFilePath = path.join(pendingTasksDirFilePath, taskFileName)
11
-
12
- try {
13
- const completedTask = await readTask(completedTaskFilePath)
14
- return completedTask
15
- } catch (err) {
16
- try {
17
- const pendingTask = await readTask(pendingTaskFilePath)
18
- return pendingTask
19
- } catch (err) {
20
- throw new Error(`couldn't find video task ${videoId} for owner ${ownerId}`)
21
- }
22
- }
23
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/scheduler/getVideo.mts ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import path from "node:path"
2
+
3
+ import { completedMetadataDirFilePath, pendingMetadataDirFilePath } from "../config.mts"
4
+ import { readVideoMetadataFile } from "./readVideoMetadataFile.mts"
5
+
6
+ export const getVideo = async (ownerId: string, videoId: string) => {
7
+ const videoFileName = `${ownerId}_${videoId}.json`
8
+
9
+ const completedVideoMetadataFilePath = path.join(completedMetadataDirFilePath, videoFileName)
10
+ const pendingVideoMetadataFilePath = path.join(pendingMetadataDirFilePath, videoFileName)
11
+
12
+ try {
13
+ const completedVideo = await readVideoMetadataFile(completedVideoMetadataFilePath)
14
+ return completedVideo
15
+ } catch (err) {
16
+ try {
17
+ const pendingVideo= await readVideoMetadataFile(pendingVideoMetadataFilePath)
18
+ return pendingVideo
19
+ } catch (err) {
20
+ throw new Error(`couldn't find video ${videoId} for owner ${ownerId}`)
21
+ }
22
+ }
23
+ }
src/scheduler/getVideoStatus.mts ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Video, VideoStatus } from "../types.mts"
2
+
3
+ import { getVideo } from "./getVideo.mts"
4
+
5
+ export const getVideoStatus = async (video: Video): Promise<VideoStatus> => {
6
+ try {
7
+ const { status } = await getVideo(video.ownerId, video.id)
8
+ return status
9
+ } catch (err) {
10
+ console.log(`failed to get the video status.. weird`)
11
+ }
12
+ return "unknown"
13
+ }
src/scheduler/markVideoAsPending.mts ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { updatePendingVideo } from "./updatePendingVideo.mts"
2
+ import { getVideo } from "./getVideo.mts"
3
+
4
+ export const markVideoAsPending = async (ownerId: string, videoId: string) => {
5
+ try {
6
+ const video = await getVideo(ownerId, videoId)
7
+ if (video.status === "abort" ) {
8
+ // actually, if we wanted to, we could ressurect it..
9
+ console.log(`cannot mark video as pending: video ${videoId} is aborted`)
10
+ } else if (video.status === "completed") {
11
+ console.log(`video ${videoId} is already completed`)
12
+ } else if (video.status === "delete") {
13
+ console.log(`cannot mark video as pending: video ${videoId} is marked for deletion`)
14
+ } else if (video.status === "pending") {
15
+ console.log(`video ${videoId} is already pending`)
16
+ } {
17
+ video.status = "pending"
18
+ await updatePendingVideo(video)
19
+ return true
20
+ }
21
+ } catch (err) {
22
+ console.error(`failed to mark video as pending ${videoId}`)
23
+ }
24
+ return false
25
+ }
src/scheduler/markVideoAsToAbort.mts ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { updatePendingVideo } from "./updatePendingVideo.mts"
2
+ import { getVideo } from "./getVideo.mts"
3
+
4
+ export const markVideoAsToAbort = async (ownerId: string, videoId: string) => {
5
+ try {
6
+ const video = await getVideo(ownerId, videoId)
7
+ if (video.status === "abort" ) {
8
+ console.log(`video ${videoId} is already aborted`)
9
+ } else if (video.status === "delete") {
10
+ console.log(`cannot abort: video ${videoId} is marked for deletion`)
11
+ } else if (video.status === "completed") {
12
+ console.log(`cannot abort: video ${videoId} is completed`)
13
+ } {
14
+ video.status = "abort"
15
+ await updatePendingVideo(video)
16
+ return true
17
+ }
18
+ } catch (err) {
19
+ console.error(`failed to abort video ${videoId}`)
20
+ }
21
+ return false
22
+ }
src/scheduler/markVideoAsToDelete.mts ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { updatePendingVideo } from "./updatePendingVideo.mts"
2
+ import { getVideo } from "./getVideo.mts"
3
+ import { deleteVideo } from "./deleteVideo.mts"
4
+
5
+ export const markVideoAsToDelete = async (ownerId: string, videoId: string) => {
6
+ try {
7
+ const video = await getVideo(ownerId, videoId)
8
+ if (video.status === "delete" ) {
9
+ console.log(`video ${videoId} is already marked for deletion`)
10
+ } else if (video.status === "completed" ) {
11
+ console.log(`video ${videoId} is completed: we can delete immediately`)
12
+ await deleteVideo(ownerId, videoId)
13
+ return true
14
+ } else {
15
+ video.status = "delete"
16
+ await updatePendingVideo(video)
17
+ return true
18
+ }
19
+ } catch (err) {
20
+ console.error(`failed to delete video ${videoId}`)
21
+ }
22
+ return false
23
+ }
src/scheduler/markVideoAsToPause.mts ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { updatePendingVideo } from "./updatePendingVideo.mts"
2
+ import { getVideo } from "./getVideo.mts"
3
+
4
+ export const markVideoAsToPause = async (ownerId: string, videoId: string) => {
5
+ try {
6
+ const video = await getVideo(ownerId, videoId)
7
+ if (video.status === "abort" ) {
8
+ console.log(`cannot pause: video ${videoId} is being aborted`)
9
+ } else if (video.status === "completed") {
10
+ console.log(`cannot pause: video ${videoId} is completed`)
11
+ } else if (video.status === "delete") {
12
+ console.log(`cannot pause: video ${videoId} is marked for deletion`)
13
+ } else if (video.status === "pause") {
14
+ console.log(`video ${videoId} is already paused`)
15
+ } {
16
+ video.status = "pause"
17
+ await updatePendingVideo(video)
18
+ return true
19
+ }
20
+ } catch (err) {
21
+ console.error(`failed to mark video as paused ${videoId}`)
22
+ }
23
+ return false
24
+ }
src/scheduler/{processTask.mts β†’ processVideo.mts} RENAMED
@@ -1,44 +1,49 @@
1
  import { v4 as uuidv4 } from "uuid"
2
 
3
- import { saveCompletedTask } from "./saveCompletedTask.mts"
4
- import { savePendingTask } from "./savePendingTask.mts"
5
- import { updatePendingTask } from "./updatePendingTask.mts"
6
- import { VideoShot, VideoTask } from "../types.mts"
7
- import { downloadFileToTmp } from "../utils/downloadFileToTmp.mts"
8
  import { generateVideo } from "../production/generateVideo.mts"
9
- import { copyVideoFromTmpToPending } from "../utils/copyVideoFromTmpToPending.mts"
10
- import { copyVideoFromTmpToCompleted } from "../utils/copyVideoFromTmpToCompleted.mts"
11
  import { upscaleVideo } from "../production/upscaleVideo.mts"
12
  import { interpolateVideo } from "../production/interpolateVideo.mts"
13
  import { postInterpolation } from "../production/postInterpolation.mts"
14
- import { moveVideoFromPendingToCompleted } from "../utils/moveVideoFromPendingToCompleted.mts"
15
  import { assembleShots } from "../production/assembleShots.mts"
16
- import { copyVideoFromPendingToCompleted } from "../utils/copyVideoFromPendingToCompleted.mts"
17
  import { generateAudio } from "../production/generateAudio.mts"
18
- import { mergeAudio } from "../production/mergeAudio.mts"
19
  import { addAudioToVideo } from "../production/addAudioToVideo.mts"
20
 
21
- export const processTask = async (task: VideoTask) => {
22
- console.log(`processing video task ${task.id}`)
 
23
 
24
- // something isn't right, the task is already completed
25
- if (task.completed) {
26
- console.log(`video task ${task.id} is already completed`)
27
- await saveCompletedTask(task)
28
- return
29
- }
30
 
31
- // always count 1 more step, for the final assembly
 
32
 
33
- let nbTotalSteps = 1
34
 
35
- for (const shot of task.shots) {
 
 
 
 
36
  nbTotalSteps += shot.nbTotalSteps
37
  }
38
 
39
  let nbCompletedSteps = 0
40
 
41
- for (const shot of task.shots) {
 
 
 
 
 
 
 
 
 
42
  nbCompletedSteps += shot.nbCompletedSteps
43
 
44
  // skip shots completed previously
@@ -56,6 +61,8 @@ export const processTask = async (task: VideoTask) => {
56
  // const nbFramesForBaseModel = Math.min(3, Math.max(1, Math.round(duration))) * 8
57
  const nbFramesForBaseModel = 24
58
 
 
 
59
  if (!shot.hasGeneratedPreview) {
60
  console.log("generating a preview of the final result..")
61
  let generatedPreviewVideoUrl = ""
@@ -73,21 +80,20 @@ export const processTask = async (task: VideoTask) => {
73
 
74
  await copyVideoFromTmpToPending(shot.fileName)
75
 
76
- await copyVideoFromPendingToCompleted(shot.fileName, task.fileName)
77
 
78
  shot.hasGeneratedPreview = true
79
  shot.nbCompletedSteps++
80
  nbCompletedSteps++
81
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
82
- task.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
83
-
84
- await updatePendingTask(task)
85
 
 
86
  } catch (err) {
87
  console.error(`failed to generate preview for shot ${shot.id} (${err})`)
88
  // something is wrong, let's put the whole thing back into the queue
89
- task.error = `failed to generate preview for shot ${shot.id} (will try again later)`
90
- await updatePendingTask(task)
91
  break
92
  }
93
 
@@ -117,16 +123,17 @@ export const processTask = async (task: VideoTask) => {
117
  shot.nbCompletedSteps++
118
  nbCompletedSteps++
119
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
120
- task.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
121
 
122
- await copyVideoFromPendingToCompleted(shot.fileName, task.fileName)
123
 
124
- await updatePendingTask(task)
125
  } catch (err) {
126
  console.error(`failed to generate shot ${shot.id} (${err})`)
127
  // something is wrong, let's put the whole thing back into the queue
128
- task.error = `failed to generate shot ${shot.id} (will try again later)`
129
- await updatePendingTask(task)
 
130
  break
131
  }
132
 
@@ -141,16 +148,18 @@ export const processTask = async (task: VideoTask) => {
141
  shot.nbCompletedSteps++
142
  nbCompletedSteps++
143
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
144
- task.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
145
 
146
- await copyVideoFromPendingToCompleted(shot.fileName, task.fileName)
 
 
147
 
148
- await updatePendingTask(task)
149
  } catch (err) {
150
  console.error(`failed to upscale shot ${shot.id} (${err})`)
151
  // something is wrong, let's put the whole thing back into the queue
152
- task.error = `failed to upscale shot ${shot.id} (will try again later)`
153
- await updatePendingTask(task)
 
154
  break
155
  }
156
  }
@@ -182,17 +191,17 @@ export const processTask = async (task: VideoTask) => {
182
  shot.nbCompletedSteps++
183
  nbCompletedSteps++
184
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
185
- task.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
186
 
187
- await copyVideoFromPendingToCompleted(shot.fileName, task.fileName)
188
 
189
- await updatePendingTask(task)
190
 
191
  } catch (err) {
192
  console.error(`failed to interpolate shot ${shot.id} (${err})`)
193
  // something is wrong, let's put the whole thing back into the queue
194
- task.error = `failed to interpolate shot ${shot.id} (will try again later)`
195
- await updatePendingTask(task)
196
  break
197
  }
198
  }
@@ -215,22 +224,22 @@ export const processTask = async (task: VideoTask) => {
215
  shot.nbCompletedSteps++
216
  nbCompletedSteps++
217
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
218
- task.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
219
 
220
- await copyVideoFromPendingToCompleted(shot.fileName, task.fileName)
221
 
222
- await updatePendingTask(task)
223
  } catch (err) {
224
  console.error(`failed to post-process shot ${shot.id} (${err})`)
225
  // something is wrong, let's put the whole thing back into the queue
226
- task.error = `failed to post-process shot ${shot.id} (will try again later)`
227
- await updatePendingTask(task)
228
  break
229
  }
230
  }
231
 
232
 
233
- let foregroundAudioFileName = `${task.ownerId}_${task.id}_${shot.id}_${uuidv4()}.m4a`
234
 
235
  if (!shot.hasGeneratedForegroundAudio) {
236
  if (shot.foregroundAudioPrompt) {
@@ -243,19 +252,19 @@ export const processTask = async (task: VideoTask) => {
243
  shot.nbCompletedSteps++
244
  nbCompletedSteps++
245
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
246
- task.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
247
 
248
  await addAudioToVideo(shot.fileName, foregroundAudioFileName)
249
 
250
- await copyVideoFromPendingToCompleted(shot.fileName, task.fileName)
251
 
252
- await updatePendingTask(task)
253
 
254
  } catch (err) {
255
  console.error(`failed to generate foreground audio for ${shot.id} (${err})`)
256
  // something is wrong, let's put the whole thing back into the queue
257
- task.error = `failed to generate foreground audio ${shot.id} (will try again later)`
258
- await updatePendingTask(task)
259
  break
260
  }
261
  } else {
@@ -263,8 +272,8 @@ export const processTask = async (task: VideoTask) => {
263
  shot.nbCompletedSteps++
264
  nbCompletedSteps++
265
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
266
- task.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
267
- await updatePendingTask(task)
268
  }
269
  }
270
 
@@ -272,56 +281,54 @@ export const processTask = async (task: VideoTask) => {
272
  shot.completedAt = new Date().toISOString()
273
  shot.progressPercent = 100
274
 
275
- task.nbCompletedShots++
276
 
277
- await updatePendingTask(task)
278
  }
279
 
280
  console.log(`end of the loop:`)
281
- console.log(`nb completed shots: ${task.nbCompletedShots}`)
282
- console.log(`len of the shot array: ${task.shots.length}`)
283
 
284
- if (task.nbCompletedShots === task.shots.length) {
285
- console.log(`we have completed the whole video sequence!`)
286
- console.log(`assembling the video..`)
287
 
288
- if (task.shots.length === 1) {
 
 
 
 
 
 
289
  console.log(`we only have one shot, so this gonna be easy`)
290
- task.hasAssembledVideo = true
291
 
292
  // the single shot (so, the first) becomes the final movie
293
- await moveVideoFromPendingToCompleted(task.shots[0].fileName, task.fileName)
294
-
295
- await updatePendingTask(task)
296
- }
297
 
298
- if (!task.hasAssembledVideo) {
299
- console.log(`assembling the ${task.shots.length} shots together (might take a while)`)
300
- try {
301
- await assembleShots(task.shots, task.fileName)
302
- console.log(`finished assembling the ${task.shots.length} shots together!`)
 
303
 
304
- await moveVideoFromPendingToCompleted(task.fileName)
305
 
306
- task.hasAssembledVideo = true
307
 
308
- await updatePendingTask(task)
309
- } catch (err) {
310
- console.error(`failed to assemble the shots together (${err})`)
311
- // something is wrong, let's put the whole thing back into the queue
312
- task.error = `failed to assemble the shots together (will try again later)`
313
- await updatePendingTask(task)
314
- return
 
315
  }
316
  }
317
 
318
  nbCompletedSteps++
319
- task.progressPercent = 100
320
- task.completed = true
321
- task.completedAt = new Date().toISOString()
322
- await updatePendingTask(task)
323
-
324
- console.log(`moving task to completed tasks..`)
325
- await saveCompletedTask(task)
326
  }
327
  }
 
1
  import { v4 as uuidv4 } from "uuid"
2
 
3
+ import { Video } from "../types.mts"
4
+
 
 
 
5
  import { generateVideo } from "../production/generateVideo.mts"
 
 
6
  import { upscaleVideo } from "../production/upscaleVideo.mts"
7
  import { interpolateVideo } from "../production/interpolateVideo.mts"
8
  import { postInterpolation } from "../production/postInterpolation.mts"
 
9
  import { assembleShots } from "../production/assembleShots.mts"
 
10
  import { generateAudio } from "../production/generateAudio.mts"
 
11
  import { addAudioToVideo } from "../production/addAudioToVideo.mts"
12
 
13
+ import { downloadFileToTmp } from "../utils/downloadFileToTmp.mts"
14
+ import { copyVideoFromTmpToPending } from "../utils/copyVideoFromTmpToPending.mts"
15
+ import { copyVideoFromPendingToCompleted } from "../utils/copyVideoFromPendingToCompleted.mts"
16
 
17
+ import { saveAndCheckIfNeedToStop } from "./saveAndCheckIfNeedToStop.mts"
18
+ import { enrichVideoSpecsUsingLLM } from "../llm/enrichVideoSpecsUsingLLM.mts"
19
+
20
+ export const processVideo = async (video: Video) => {
 
 
21
 
22
+ // just a an additional precaution, for consistency and robustness
23
+ if (["pause", "completed", "abort", "delete"].includes(video.status)) { return }
24
 
25
+ console.log(`processing video video ${video.id}`)
26
 
27
+ // always count 2 more steps: 1 for the LLM, 1 for the final assembly
28
+
29
+ let nbTotalSteps = 2
30
+
31
+ for (const shot of video.shots) {
32
  nbTotalSteps += shot.nbTotalSteps
33
  }
34
 
35
  let nbCompletedSteps = 0
36
 
37
+ if (!video.hasGeneratedSpecs) {
38
+ await enrichVideoSpecsUsingLLM(video)
39
+
40
+ nbCompletedSteps++
41
+ video.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
42
+
43
+ if (await saveAndCheckIfNeedToStop(video)) { return }
44
+ }
45
+
46
+ for (const shot of video.shots) {
47
  nbCompletedSteps += shot.nbCompletedSteps
48
 
49
  // skip shots completed previously
 
61
  // const nbFramesForBaseModel = Math.min(3, Math.max(1, Math.round(duration))) * 8
62
  const nbFramesForBaseModel = 24
63
 
64
+ if (await saveAndCheckIfNeedToStop(video)) { return }
65
+
66
  if (!shot.hasGeneratedPreview) {
67
  console.log("generating a preview of the final result..")
68
  let generatedPreviewVideoUrl = ""
 
80
 
81
  await copyVideoFromTmpToPending(shot.fileName)
82
 
83
+ await copyVideoFromPendingToCompleted(shot.fileName, video.fileName)
84
 
85
  shot.hasGeneratedPreview = true
86
  shot.nbCompletedSteps++
87
  nbCompletedSteps++
88
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
89
+ video.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
 
 
90
 
91
+ if (await saveAndCheckIfNeedToStop(video)) { return }
92
  } catch (err) {
93
  console.error(`failed to generate preview for shot ${shot.id} (${err})`)
94
  // something is wrong, let's put the whole thing back into the queue
95
+ video.error = `failed to generate preview for shot ${shot.id} (will try again later)`
96
+ if (await saveAndCheckIfNeedToStop(video)) { return }
97
  break
98
  }
99
 
 
123
  shot.nbCompletedSteps++
124
  nbCompletedSteps++
125
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
126
+ video.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
127
 
128
+ await copyVideoFromPendingToCompleted(shot.fileName, video.fileName)
129
 
130
+ if (await saveAndCheckIfNeedToStop(video)) { return }
131
  } catch (err) {
132
  console.error(`failed to generate shot ${shot.id} (${err})`)
133
  // something is wrong, let's put the whole thing back into the queue
134
+ video.error = `failed to generate shot ${shot.id} (will try again later)`
135
+ if (await saveAndCheckIfNeedToStop(video)) { return }
136
+
137
  break
138
  }
139
 
 
148
  shot.nbCompletedSteps++
149
  nbCompletedSteps++
150
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
151
+ video.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
152
 
153
+ await copyVideoFromPendingToCompleted(shot.fileName, video.fileName)
154
+
155
+ if (await saveAndCheckIfNeedToStop(video)) { return }
156
 
 
157
  } catch (err) {
158
  console.error(`failed to upscale shot ${shot.id} (${err})`)
159
  // something is wrong, let's put the whole thing back into the queue
160
+ video.error = `failed to upscale shot ${shot.id} (will try again later)`
161
+ if (await saveAndCheckIfNeedToStop(video)) { return }
162
+
163
  break
164
  }
165
  }
 
191
  shot.nbCompletedSteps++
192
  nbCompletedSteps++
193
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
194
+ video.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
195
 
196
+ await copyVideoFromPendingToCompleted(shot.fileName, video.fileName)
197
 
198
+ if (await saveAndCheckIfNeedToStop(video)) { return }
199
 
200
  } catch (err) {
201
  console.error(`failed to interpolate shot ${shot.id} (${err})`)
202
  // something is wrong, let's put the whole thing back into the queue
203
+ video.error = `failed to interpolate shot ${shot.id} (will try again later)`
204
+ if (await saveAndCheckIfNeedToStop(video)) { return }
205
  break
206
  }
207
  }
 
224
  shot.nbCompletedSteps++
225
  nbCompletedSteps++
226
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
227
+ video.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
228
 
229
+ await copyVideoFromPendingToCompleted(shot.fileName, video.fileName)
230
 
231
+ if (await saveAndCheckIfNeedToStop(video)) { return }
232
  } catch (err) {
233
  console.error(`failed to post-process shot ${shot.id} (${err})`)
234
  // something is wrong, let's put the whole thing back into the queue
235
+ video.error = `failed to post-process shot ${shot.id} (will try again later)`
236
+ if (await saveAndCheckIfNeedToStop(video)) { return }
237
  break
238
  }
239
  }
240
 
241
 
242
+ let foregroundAudioFileName = `${video.ownerId}_${video.id}_${shot.id}_${uuidv4()}.m4a`
243
 
244
  if (!shot.hasGeneratedForegroundAudio) {
245
  if (shot.foregroundAudioPrompt) {
 
252
  shot.nbCompletedSteps++
253
  nbCompletedSteps++
254
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
255
+ video.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
256
 
257
  await addAudioToVideo(shot.fileName, foregroundAudioFileName)
258
 
259
+ await copyVideoFromPendingToCompleted(shot.fileName, video.fileName)
260
 
261
+ if (await saveAndCheckIfNeedToStop(video)) { return }
262
 
263
  } catch (err) {
264
  console.error(`failed to generate foreground audio for ${shot.id} (${err})`)
265
  // something is wrong, let's put the whole thing back into the queue
266
+ video.error = `failed to generate foreground audio ${shot.id} (will try again later)`
267
+ if (await saveAndCheckIfNeedToStop(video)) { return }
268
  break
269
  }
270
  } else {
 
272
  shot.nbCompletedSteps++
273
  nbCompletedSteps++
274
  shot.progressPercent = Math.round((shot.nbCompletedSteps / shot.nbTotalSteps) * 100)
275
+ video.progressPercent = Math.round((nbCompletedSteps / nbTotalSteps) * 100)
276
+ if (await saveAndCheckIfNeedToStop(video)) { return }
277
  }
278
  }
279
 
 
281
  shot.completedAt = new Date().toISOString()
282
  shot.progressPercent = 100
283
 
284
+ video.nbCompletedShots++
285
 
286
+ if (await saveAndCheckIfNeedToStop(video)) { return }
287
  }
288
 
289
  console.log(`end of the loop:`)
290
+ console.log(`nb completed shots: ${video.nbCompletedShots}`)
291
+ console.log(`len of the shot array: ${video.shots.length}`)
292
 
293
+ // now time to check the end game
 
 
294
 
295
+ if (video.nbCompletedShots === video.shots.length) {
296
+ console.log(`we have generated each individual shot!`)
297
+ console.log(`assembling the fonal..`)
298
+
299
+ if (!video.hasAssembledVideo) {
300
+
301
+ if (video.shots.length === 1) {
302
  console.log(`we only have one shot, so this gonna be easy`)
303
+ video.hasAssembledVideo = true
304
 
305
  // the single shot (so, the first) becomes the final movie
306
+ await copyVideoFromPendingToCompleted(video.shots[0].fileName, video.fileName)
 
 
 
307
 
308
+ if (await saveAndCheckIfNeedToStop(video)) { return }
309
+ } else {
310
+ console.log(`assembling ${video.shots.length} shots together, might take a while`)
311
+ try {
312
+ await assembleShots(video.shots, video.fileName)
313
+ console.log(`finished assembling the ${video.shots.length} shots together!`)
314
 
315
+ await copyVideoFromPendingToCompleted(video.fileName)
316
 
317
+ video.hasAssembledVideo = true
318
 
319
+ if (await saveAndCheckIfNeedToStop(video)) { return }
320
+ } catch (err) {
321
+ console.error(`failed to assemble the shots together (${err})`)
322
+ // something is wrong, let's put the whole thing back into the queue
323
+ video.error = `failed to assemble the shots together (will try again later)`
324
+ if (await saveAndCheckIfNeedToStop(video)) { return }
325
+ return
326
+ }
327
  }
328
  }
329
 
330
  nbCompletedSteps++
331
+ video.completed = true
332
+ if (await saveAndCheckIfNeedToStop(video)) { return }
 
 
 
 
 
333
  }
334
  }
src/scheduler/readTask.mts DELETED
@@ -1,11 +0,0 @@
1
- import { promises as fs } from "node:fs"
2
-
3
- import { VideoTask } from "../types.mts"
4
-
5
- export const readTask = async (taskFilePath: string): Promise<VideoTask> => {
6
- const task = JSON.parse(
7
- await fs.readFile(taskFilePath, 'utf8')
8
- ) as VideoTask
9
-
10
- return task
11
- }
 
 
 
 
 
 
 
 
 
 
 
 
src/scheduler/readTasks.mts DELETED
@@ -1,45 +0,0 @@
1
- import path from "node:path"
2
- import { promises as fs } from "node:fs"
3
-
4
- import { VideoTask } from "../types.mts"
5
- import { readTask } from "./readTask.mts"
6
-
7
-
8
- export const readTasks = async (taskDirFilePath: string, ownerId?: string): Promise<VideoTask[]> => {
9
-
10
- let tasksFiles: string[] = []
11
- try {
12
- const filesInDir = await fs.readdir(taskDirFilePath)
13
- // console.log("filesInDir:", filesInDir)
14
-
15
- // we only keep valid files (in UUID.json format)
16
- tasksFiles = filesInDir.filter(fileName =>
17
- fileName.match(/[a-z0-9\-_]\.json/i) && (ownerId ? fileName.includes(ownerId): true)
18
- )
19
-
20
- // console.log("tasksfiles:", tasksFiles)
21
- } catch (err) {
22
- console.log(`failed to read tasks: ${err}`)
23
- }
24
-
25
- const tasks: VideoTask[] = []
26
-
27
- for (const taskFileName of tasksFiles) {
28
- // console.log("taskFileName:", taskFileName)
29
- const taskFilePath = path.join(taskDirFilePath, taskFileName)
30
- try {
31
- const task = await readTask(taskFilePath)
32
- tasks.push(task)
33
- } catch (parsingErr) {
34
- console.log(`failed to read ${taskFileName}: ${parsingErr}`)
35
- console.log(`deleting corrupted file ${taskFileName}`)
36
- try {
37
- await fs.unlink(taskFilePath)
38
- } catch (unlinkErr) {
39
- console.log(`failed to unlink ${taskFileName}: ${unlinkErr}`)
40
- }
41
- }
42
- }
43
-
44
- return tasks
45
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/scheduler/readVideoMetadataFile.mts ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { promises as fs } from "node:fs"
2
+
3
+ import { Video } from "../types.mts"
4
+
5
+ export const readVideoMetadataFile = async (videoMetadataFilePath: string): Promise<Video> => {
6
+ const video = JSON.parse(
7
+ await fs.readFile(videoMetadataFilePath, 'utf8')
8
+ ) as Video
9
+
10
+ return video
11
+ }
src/scheduler/readVideoMetadataFiles.mts ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import path from "node:path"
2
+ import { promises as fs } from "node:fs"
3
+
4
+ import { Video } from "../types.mts"
5
+ import { readVideoMetadataFile } from "./readVideoMetadataFile.mts"
6
+
7
+ export const readVideoMetadataFiles = async (videoMetadataDirFilePath: string, ownerId?: string): Promise<Video[]> => {
8
+
9
+ let videosMetadataFiles: string[] = []
10
+ try {
11
+ const filesInDir = await fs.readdir(videoMetadataDirFilePath)
12
+ // console.log("filesInDir:", filesInDir)
13
+
14
+ // we only keep valid files (in UUID.json format)
15
+ videosMetadataFiles = filesInDir.filter(fileName =>
16
+ fileName.match(/[a-z0-9\-_]\.json/i) && (ownerId ? fileName.includes(ownerId): true)
17
+ )
18
+
19
+ // console.log("videosfiles:", videosFiles)
20
+ } catch (err) {
21
+ console.log(`failed to read videos: ${err}`)
22
+ }
23
+
24
+ const videos: Video[] = []
25
+
26
+ for (const videoMetadataFileName of videosMetadataFiles) {
27
+ // console.log("videoFileName:", videoFileName)
28
+ const videoMetadataFilePath = path.join(videoMetadataDirFilePath, videoMetadataFileName)
29
+ try {
30
+ const videoMetadata = await readVideoMetadataFile(videoMetadataFilePath)
31
+ videos.push(videoMetadata)
32
+ } catch (parsingErr) {
33
+ console.log(`failed to read ${videoMetadataFileName}: ${parsingErr}`)
34
+ console.log(`deleting corrupted file ${videoMetadataFileName}`)
35
+ try {
36
+ await fs.unlink(videoMetadataFilePath)
37
+ } catch (unlinkErr) {
38
+ console.log(`failed to unlink ${videoMetadataFileName}: ${unlinkErr}`)
39
+ }
40
+ }
41
+ }
42
+
43
+ return videos
44
+ }
src/scheduler/saveAndCheckIfNeedToStop.mts ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { Video } from "../types.mts"
2
+ import { deleteVideo } from "./deleteVideo.mts"
3
+ import { getVideoStatus } from "./getVideoStatus.mts"
4
+ import { saveCompletedVideo } from "./saveCompletedVideo.mts"
5
+ import { updatePendingVideo } from "./updatePendingVideo.mts"
6
+
7
+ export const saveAndCheckIfNeedToStop = async (video: Video): Promise<boolean> => {
8
+
9
+ const status = await getVideoStatus(video)
10
+ const isToDelete = status === "delete"
11
+ const isToAbort = status === "abort"
12
+ const isToPause = status === "pause"
13
+
14
+ // well, normally no other process is supported to mark a video as "completed"
15
+ // while we are busy processing it
16
+ // but maybe in the future, we can afford to waste procesing power to do the "who goes faster"..?
17
+ // const isCompleted = status === "completed"
18
+
19
+ const mustStop = isToAbort || isToPause || isToDelete
20
+
21
+ // deletion is the most priority check, as we just need to ignore all the rest
22
+ if (isToDelete) {
23
+ await deleteVideo(video.ownerId, video.id)
24
+ return mustStop
25
+ }
26
+
27
+ // then we give priority to the pending video: maybe it is done?
28
+ if (video.completed) {
29
+ console.log(`video ${video.id} is completed!`)
30
+ video.progressPercent = 100
31
+ video.completedAt = new Date().toISOString()
32
+ video.status = "completed"
33
+ await updatePendingVideo(video)
34
+ await saveCompletedVideo(video)
35
+ return mustStop
36
+ }
37
+
38
+
39
+ if (isToPause) {
40
+ console.log(`we've been requested to pause the video`)
41
+ video.status = "pause"
42
+ await updatePendingVideo(video)
43
+ return mustStop
44
+ }
45
+
46
+ if (isToAbort) {
47
+ console.log(`we've been requested to cancel the video`)
48
+
49
+ // we are not going to update the percentage, because we want to keep the
50
+ // info that we aborted mid-course
51
+ video.completed = true
52
+
53
+ // watch what we do here: we mark the video as completed
54
+ // that's because "abort" is a temporary status
55
+ video.status = "completed"
56
+
57
+ video.completedAt = new Date().toISOString()
58
+ await updatePendingVideo(video)
59
+ await saveCompletedVideo(video)
60
+
61
+ return mustStop
62
+ }
63
+
64
+ await updatePendingVideo(video)
65
+
66
+ // tell the loop if it should be aborted or not
67
+ return mustStop
68
+ }
src/scheduler/saveCompletedTask.mts DELETED
@@ -1,12 +0,0 @@
1
- import path from "path"
2
-
3
- import { VideoTask } from "../types.mts"
4
- import { completedTasksDirFilePath, pendingTasksDirFilePath } from "../config.mts"
5
- import { moveFile } from "../utils/moveFile.mts"
6
-
7
- export const saveCompletedTask = async (task: VideoTask) => {
8
- const fileName = `${task.ownerId}_${task.id}.json`
9
- const pendingFilePath = path.join(pendingTasksDirFilePath, fileName)
10
- const completedFilePath = path.join(completedTasksDirFilePath, fileName)
11
- await moveFile(pendingFilePath, completedFilePath)
12
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
src/scheduler/saveCompletedVideo.mts ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import path from "path"
2
+
3
+ import { Video } from "../types.mts"
4
+ import { completedMetadataDirFilePath, pendingMetadataDirFilePath } from "../config.mts"
5
+ import { moveFile } from "../utils/moveFile.mts"
6
+
7
+ export const saveCompletedVideo = async (video: Video) => {
8
+ const metadataFileName = `${video.ownerId}_${video.id}.json`
9
+ const pendingMetadataFilePath = path.join(pendingMetadataDirFilePath, metadataFileName)
10
+ const completedMetadataFilePath = path.join(completedMetadataDirFilePath, metadataFileName)
11
+ await moveFile(pendingMetadataFilePath, completedMetadataFilePath)
12
+ }
src/scheduler/savePendingTask.mts DELETED
@@ -1,11 +0,0 @@
1
- import { promises as fs } from "node:fs"
2
- import path from "path"
3
-
4
- import { VideoTask } from "../types.mts"
5
- import { pendingTasksDirFilePath } from "../config.mts"
6
-
7
- export const savePendingTask = async (task: VideoTask) => {
8
- const fileName = `${task.ownerId}_${task.id}.json`
9
- const filePath = path.join(pendingTasksDirFilePath, fileName)
10
- await fs.writeFile(filePath, JSON.stringify(task, null, 2), "utf8")
11
- }
 
 
 
 
 
 
 
 
 
 
 
 
src/scheduler/savePendingVideo.mts ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { promises as fs } from "node:fs"
2
+ import path from "path"
3
+
4
+ import { Video } from "../types.mts"
5
+ import { pendingMetadataDirFilePath } from "../config.mts"
6
+
7
+ export const savePendingVideo = async (video: Video) => {
8
+ const fileName = `${video.ownerId}_${video.id}.json`
9
+ const filePath = path.join(pendingMetadataDirFilePath, fileName)
10
+ await fs.writeFile(filePath, JSON.stringify(video, null, 2), "utf8")
11
+ }
src/scheduler/{updatePendingTask.mts β†’ updatePendingVideo.mts} RENAMED
@@ -1,16 +1,16 @@
1
  import { promises as fs } from "node:fs"
2
  import path from "path"
3
 
4
- import { VideoTask } from "../types.mts"
5
- import { pendingTasksDirFilePath } from "../config.mts"
6
 
7
- export const updatePendingTask = async (task: VideoTask) => {
8
  try {
9
- const fileName = `${task.ownerId}_${task.id}.json`
10
- const filePath = path.join(pendingTasksDirFilePath, fileName)
11
- await fs.writeFile(filePath, JSON.stringify(task, null, 2), "utf8")
12
  } catch (err) {
13
- console.error(`Failed to update the task. Probably an issue with the serialized object or the file system: ${err}`)
14
  // we do not forward the exception, there is no need
15
  // we will just try again the job later (even if it means losing a bit of data)
16
  }
 
1
  import { promises as fs } from "node:fs"
2
  import path from "path"
3
 
4
+ import { Video } from "../types.mts"
5
+ import { pendingMetadataDirFilePath } from "../config.mts"
6
 
7
+ export const updatePendingVideo = async (video: Video) => {
8
  try {
9
+ const fileName = `${video.ownerId}_${video.id}.json`
10
+ const filePath = path.join(pendingMetadataDirFilePath, fileName)
11
+ await fs.writeFile(filePath, JSON.stringify(video, null, 2), "utf8")
12
  } catch (err) {
13
+ console.error(`Failed to update the video. Probably an issue with the serialized object or the file system: ${err}`)
14
  // we do not forward the exception, there is no need
15
  // we will just try again the job later (even if it means losing a bit of data)
16
  }
src/tests/checkStatus.mts CHANGED
@@ -9,6 +9,6 @@ const response = await fetch(`${server}/${videoId}`, {
9
  });
10
 
11
  console.log('response:', response)
12
- const task = await response.json()
13
 
14
- console.log("task:", JSON.stringify(task, null, 2))
 
9
  });
10
 
11
  console.log('response:', response)
12
+ const video = await response.json()
13
 
14
+ console.log("video:", JSON.stringify(video, null, 2))
src/tests/submitVideo.mts CHANGED
@@ -18,6 +18,6 @@ const response = await fetch(`${server}/`, {
18
 
19
 
20
  console.log('response:', response)
21
- const task = await response.json()
22
 
23
- console.log("task:", JSON.stringify(task, null, 2))
 
18
 
19
 
20
  console.log('response:', response)
21
+ const video = await response.json()
22
 
23
+ console.log("video:", JSON.stringify(video, null, 2))