Mbonea commited on
Commit
a00f760
1 Parent(s): e9c34f9

partially tested code for the swarm

Browse files
App/Editor/Schema.py CHANGED
@@ -9,10 +9,13 @@ class LinkInfo(BaseModel):
9
 
10
 
11
  class Constants(BaseModel):
 
 
12
  duration: Optional[int]
13
  height: Optional[int]
14
  width: Optional[int]
15
  text: Optional[dict]
 
16
 
17
 
18
  class Assets(BaseModel):
 
9
 
10
 
11
  class Constants(BaseModel):
12
+ task: Optional[str]
13
+ chunk: Optional[int]
14
  duration: Optional[int]
15
  height: Optional[int]
16
  width: Optional[int]
17
  text: Optional[dict]
18
+ frames: Optional[dict]
19
 
20
 
21
  class Assets(BaseModel):
App/Editor/editorRoutes.py CHANGED
@@ -1,33 +1,84 @@
1
- from fastapi import APIRouter, HTTPException, status, BackgroundTasks
2
  from .Schema import EditorRequest, TaskInfo
3
- from App.Worker import celery_task
4
  from celery.result import AsyncResult
 
 
5
 
6
  videditor_router = APIRouter(tags=["vidEditor"])
7
 
8
 
9
  @videditor_router.post("/create-video")
10
  async def create_video(videoRequest: EditorRequest, background_task: BackgroundTasks):
11
- # background_task.add_task(celery_task, videoRequest)
12
- result = celery_task.delay(videoRequest)
13
  return {"task_id": "started"}
14
 
15
 
16
- @videditor_router.get("/progress/{task_id}", response_model=TaskInfo)
17
- async def progress(task_id: str):
18
- task_result = AsyncResult(
19
- task_id,
20
- )
21
- if not task_result.ready():
22
- progress = task_result.info.get("progress", 0)
23
- completed_tasks = task_result.info.get("completed_tasks", [])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
  return {
25
- "task_id": task_id,
26
- "progress": progress,
27
- "completed_tasks": completed_tasks,
28
  }
29
- else:
30
- raise HTTPException(
31
- status_code=status.HTTP_404_NOT_FOUND,
32
- detail="Task not found",
33
- )
 
 
 
 
1
+ from fastapi import APIRouter, HTTPException, status, BackgroundTasks, UploadFile, Query
2
  from .Schema import EditorRequest, TaskInfo
3
+ from App.Worker import celery_task, concatenate_videos
4
  from celery.result import AsyncResult
5
+ import aiofiles, os, uuid, aiohttp
6
+ from App import SERVER_STATE, Task
7
 
8
  videditor_router = APIRouter(tags=["vidEditor"])
9
 
10
 
11
  @videditor_router.post("/create-video")
12
  async def create_video(videoRequest: EditorRequest, background_task: BackgroundTasks):
13
+ background_task.add_task(celery_task, videoRequest)
 
14
  return {"task_id": "started"}
15
 
16
 
17
+ @videditor_router.post("/create-chunks")
18
+ async def create_chunks(videoRequest: EditorRequest, background_task: BackgroundTasks):
19
+ video_duration = videoRequest.constants.duration
20
+ task_id = uuid.uuid4()
21
+ new_task = Task(TASK_ID=task_id)
22
+
23
+ active_nodes = [
24
+ node
25
+ for node in SERVER_STATE.NODES
26
+ if await new_task._check_node_online(node.SPACE_HOST)
27
+ ]
28
+ number_of_nodes = len(active_nodes)
29
+ ranges = [
30
+ [i, i + number_of_nodes] for i in range(0, video_duration, number_of_nodes)
31
+ ]
32
+ for i, node in enumerate(active_nodes):
33
+ await new_task.add_node(node, i)
34
+
35
+ SERVER_STATE.TASKS[task_id] = new_task
36
+
37
+ async with aiohttp.ClientSession() as session:
38
+ for i, node in enumerate(active_nodes):
39
+ videoRequest.constants.frames = ranges[i]
40
+ if node.SPACE_HOST == SERVER_STATE.SPACE_HOST:
41
+ background_task.add_task(celery_task, videoRequest)
42
+ async with session.post(
43
+ "node.SPACE_HOST/create-video", json=videoRequest
44
+ ) as response:
45
+ if response.status != 200:
46
+ raise HTTPException(
47
+ status_code=response.status,
48
+ detail="Failed to post request to node",
49
+ )
50
+
51
+ return {"task_id": "started"}
52
+
53
+
54
+ @videditor_router.post("/uploadfile/")
55
+ async def create_file(
56
+ background_tasks: BackgroundTasks,
57
+ file: UploadFile,
58
+ node: str,
59
+ chunk: int,
60
+ task: str,
61
+ ):
62
+
63
+ chunk_directory = f"/tmp/Video/{task}"
64
+ file_name = f"{chunk_directory}/{chunk}.mp4"
65
+ # Create the directory if it does not exist
66
+ os.makedirs(chunk_directory, exist_ok=True)
67
+
68
+ try:
69
+ async with aiofiles.open(file_name, "wb") as f:
70
+ while contents := await file.read(1024 * 1):
71
+ await f.write(contents)
72
+
73
+ except Exception as e:
74
  return {
75
+ "message": f"There was an error uploading the file, error message {str(e)} "
 
 
76
  }
77
+ finally:
78
+ await file.close()
79
+ running_task = SERVER_STATE.TASKS[task]
80
+ running_task.mark_node_completed(node)
81
+ if running_task.is_completed():
82
+ background_tasks.add_task(concatenate_videos, chunk_directory)
83
+
84
+ return {"message": "File uploaded successfully"}
App/Worker.py CHANGED
@@ -3,7 +3,7 @@ import os, shutil, subprocess
3
  import uuid
4
  from urllib.parse import urlparse
5
  from subprocess import run
6
- from App import celery_config, bot
7
  from typing import List
8
  from App.Editor.Schema import EditorRequest, LinkInfo, Assets, Constants
9
  from celery.signals import worker_process_init
@@ -11,6 +11,37 @@ from asgiref.sync import async_to_sync
11
  import json
12
  import os
13
  from pydantic import BaseModel
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
 
16
  class YouTubeUploadTask(BaseModel):
@@ -23,15 +54,15 @@ class YouTubeUploadTask(BaseModel):
23
  thumbnail: str = None
24
 
25
 
26
- celery = Celery()
27
- celery.config_from_object(celery_config)
28
  # celery.conf.update(
29
  # # Other Celery configuration settings
30
  # CELERYD_LOG_LEVEL="DEBUG", # Set log level to DEBUG for the worker
31
  # )
32
 
33
 
34
- @celery.task(name="CreateFile")
35
  def create_json_file(assets: List[Assets], asset_dir: str):
36
  for asset in assets:
37
  filename = f"{asset.type.capitalize()}Sequences.json"
@@ -46,14 +77,22 @@ def create_json_file(assets: List[Assets], asset_dir: str):
46
  f.write(json_string)
47
 
48
 
49
- @celery.task(name="Constants")
50
  def create_constants_json_file(constants: Constants, asset_dir: str):
 
 
51
  filename = "Constants.json"
52
  if constants:
53
  json_string = json.dumps(constants.dict())
54
  else:
55
  json_string = json.dumps({})
56
  os.makedirs(asset_dir, exist_ok=True)
 
 
 
 
 
 
57
  with open(os.path.join(asset_dir, filename), "w") as f:
58
  f.write(json_string)
59
 
@@ -73,16 +112,12 @@ def download_with_wget(link, download_dir, filename):
73
  subprocess.run(["aria2c", link, "-d", download_dir, "-o", filename])
74
 
75
 
76
- @celery.task(name="CopyRemotion")
77
  def copy_remotion_app(src: str, dest: str):
78
  shutil.copytree(src, dest)
79
 
80
- # # create symbolic link to prevent multiple installs
81
- # source_dir = os.path.join(src, "node_module")
82
- # create_symlink(source_dir, target_dir=dest, symlink_name="node_module")
83
 
84
-
85
- @celery.task(name="Unsilence")
86
  def unsilence(directory: str):
87
  output_dir = os.path.join(directory, "out/video.mp4")
88
  shortered_dir = os.path.join(directory, "out/temp.mp4")
@@ -91,13 +126,13 @@ def unsilence(directory: str):
91
  os.rename(shortered_dir, output_dir)
92
 
93
 
94
- @celery.task(name="InstallDependency")
95
  def install_dependencies(directory: str):
96
  os.chdir(directory)
97
  os.system("npm install")
98
 
99
 
100
- @celery.task(name="uploadTime")
101
  def upload_video_to_youtube(task_data: dict):
102
  # Convert dict to Pydantic model
103
  task = YouTubeUploadTask(**task_data)
@@ -128,7 +163,7 @@ def upload_video_to_youtube(task_data: dict):
128
  return result.stdout
129
 
130
 
131
- @celery.task(name="DownloadAssets")
132
  def download_assets(links: List[LinkInfo], temp_dir: str):
133
  public_dir = f"{temp_dir}/public"
134
  for link in links:
@@ -137,7 +172,7 @@ def download_assets(links: List[LinkInfo], temp_dir: str):
137
  download_with_wget(file_link, public_dir, file_name)
138
 
139
 
140
- @celery.task(name="RenderFile")
141
  def render_video(directory: str, output_directory: str):
142
  os.chdir(directory)
143
  os.system(
@@ -146,51 +181,75 @@ def render_video(directory: str, output_directory: str):
146
  print("complete")
147
 
148
 
149
- @celery.task(name="send")
150
- def cleanup_temp_directory(
151
- temp_dir: str, output_dir: str, chat_id: int = -1002069945904
 
 
 
152
  ):
 
153
  try:
154
- print("sending...")
155
- bot.start()
156
- # bot.send_video(chat_id=chat_id,caption="Your Video Caption",video=output_dir)
157
- bot.send_file(chat_id, file=output_dir, caption="Your video caption")
 
 
 
 
 
 
 
 
 
 
158
  except Exception as e:
159
  print(e)
160
  finally:
 
 
 
 
 
 
161
  # Cleanup: Remove the temporary directory
162
  shutil.rmtree(temp_dir, ignore_errors=True)
163
 
164
 
165
- @celery.task(name="All")
166
- def celery_task(video_task: EditorRequest):
167
  remotion_app_dir = os.path.join("/srv", "Remotion-app")
168
  project_id = str(uuid.uuid4())
169
  temp_dir = f"/tmp/{project_id}"
170
  output_dir = f"/tmp/{project_id}/out/video.mp4"
171
  assets_dir = os.path.join(temp_dir, "src/HelloWorld/Assets")
172
 
173
- # copy_remotion_app(remotion_app_dir, temp_dir),
174
- # install_dependencies(temp_dir),
175
- # create_constants_json_file(video_task.constants, assets_dir),
176
- # create_json_file(video_task.assets, assets_dir),
177
- # download_assets(video_task.links, temp_dir) if video_task.links else None,
178
- # render_video(temp_dir, output_dir),
179
- # unsilence(temp_dir),
180
- # await cleanup_temp_directory(temp_dir, output_dir),
181
-
182
- chain(
183
- copy_remotion_app.si(remotion_app_dir, temp_dir),
184
- install_dependencies.si(temp_dir),
185
- create_constants_json_file.si(video_task.constants, assets_dir),
186
- create_json_file.si(video_task.assets, assets_dir),
187
- download_assets.si(video_task.links, temp_dir) if video_task.links else None,
188
- render_video.si(temp_dir, output_dir),
189
- # unsilence.si(temp_dir),
190
- cleanup_temp_directory.si(temp_dir, output_dir),
191
- ).apply_async(
192
- # link_error=handle_error
193
- ) # Link the tasks and handle errors
 
 
 
 
194
 
195
 
196
  def handle_error(task_id, err, *args, **kwargs):
 
3
  import uuid
4
  from urllib.parse import urlparse
5
  from subprocess import run
6
+ from App import celery_config, bot, SERVER_STATE
7
  from typing import List
8
  from App.Editor.Schema import EditorRequest, LinkInfo, Assets, Constants
9
  from celery.signals import worker_process_init
 
11
  import json
12
  import os
13
  from pydantic import BaseModel
14
+ from App.utilis import upload_file
15
+
16
+ import subprocess
17
+
18
+
19
+ def concatenate_videos(input_dir):
20
+ # Get a list of all the mp4 files in the input directory
21
+ files = sorted([f for f in os.listdir(input_dir) if f.endswith(".mp4")])
22
+
23
+ # Generate the input file list for ffmpeg
24
+ input_files = "|".join([f"file '{os.path.join(input_dir, f)}'" for f in files])
25
+
26
+ output_file = f"{input_dir}/final.mp4"
27
+ # Run the ffmpeg command to concatenate the videos
28
+ subprocess.run(
29
+ [
30
+ "ffmpeg",
31
+ "-f",
32
+ "concat",
33
+ "-safe",
34
+ "0",
35
+ "-i",
36
+ f"concat:{input_files}",
37
+ "-c",
38
+ "copy",
39
+ output_file,
40
+ ]
41
+ )
42
+ bot.start()
43
+ bot.send_file(-1002069945904, file=output_file, caption="finally done!")
44
+ return output_file
45
 
46
 
47
  class YouTubeUploadTask(BaseModel):
 
54
  thumbnail: str = None
55
 
56
 
57
+ # celery = Celery()
58
+ # celery.config_from_object(celery_config)
59
  # celery.conf.update(
60
  # # Other Celery configuration settings
61
  # CELERYD_LOG_LEVEL="DEBUG", # Set log level to DEBUG for the worker
62
  # )
63
 
64
 
65
+ # @celery.task(name="CreateFile")
66
  def create_json_file(assets: List[Assets], asset_dir: str):
67
  for asset in assets:
68
  filename = f"{asset.type.capitalize()}Sequences.json"
 
77
  f.write(json_string)
78
 
79
 
80
+ # @celery.task(name="Constants")
81
  def create_constants_json_file(constants: Constants, asset_dir: str):
82
+ temp_dir = asset_dir.replace("src/HelloWorld/Assets", "")
83
+ instrunction_file = os.path.join(temp_dir, "ServerInstructions.json")
84
  filename = "Constants.json"
85
  if constants:
86
  json_string = json.dumps(constants.dict())
87
  else:
88
  json_string = json.dumps({})
89
  os.makedirs(asset_dir, exist_ok=True)
90
+ with open(instrunction_file, "w") as f:
91
+ if constants.instrunctions:
92
+ f.write(json.dumps({"frames": constants.frames}))
93
+ else:
94
+ f.write(json.dumps({"frames": [0, constants.duration]}))
95
+
96
  with open(os.path.join(asset_dir, filename), "w") as f:
97
  f.write(json_string)
98
 
 
112
  subprocess.run(["aria2c", link, "-d", download_dir, "-o", filename])
113
 
114
 
115
+ # @celery.task(name="CopyRemotion")
116
  def copy_remotion_app(src: str, dest: str):
117
  shutil.copytree(src, dest)
118
 
 
 
 
119
 
120
+ # @celery.task(name="Unsilence")
 
121
  def unsilence(directory: str):
122
  output_dir = os.path.join(directory, "out/video.mp4")
123
  shortered_dir = os.path.join(directory, "out/temp.mp4")
 
126
  os.rename(shortered_dir, output_dir)
127
 
128
 
129
+ # @celery.task(name="InstallDependency")
130
  def install_dependencies(directory: str):
131
  os.chdir(directory)
132
  os.system("npm install")
133
 
134
 
135
+ # @celery.task(name="uploadTime")
136
  def upload_video_to_youtube(task_data: dict):
137
  # Convert dict to Pydantic model
138
  task = YouTubeUploadTask(**task_data)
 
163
  return result.stdout
164
 
165
 
166
+ # @celery.task(name="DownloadAssets")
167
  def download_assets(links: List[LinkInfo], temp_dir: str):
168
  public_dir = f"{temp_dir}/public"
169
  for link in links:
 
172
  download_with_wget(file_link, public_dir, file_name)
173
 
174
 
175
+ # @celery.task(name="RenderFile")
176
  def render_video(directory: str, output_directory: str):
177
  os.chdir(directory)
178
  os.system(
 
181
  print("complete")
182
 
183
 
184
+ # @celery.task(name="send")
185
+ async def cleanup_temp_directory(
186
+ temp_dir: str,
187
+ output_dir: str,
188
+ video_task: EditorRequest,
189
+ chat_id: int = -1002069945904,
190
  ):
191
+ video_folder_dir = f"/tmp/Video/{video_task.constants.task}"
192
  try:
193
+ if not SERVER_STATE.MASTER:
194
+ await upload_file(
195
+ output_dir,
196
+ SERVER_STATE.SPACE_HOST,
197
+ video_task.constants.chunk,
198
+ video_task.constants.task,
199
+ )
200
+ else:
201
+
202
+ os.makedirs(video_folder_dir, exist_ok=True)
203
+ shutil.move(
204
+ output_dir, f"{video_folder_dir}/{video_task.constants.chunk}.mp4"
205
+ )
206
+
207
  except Exception as e:
208
  print(e)
209
  finally:
210
+ remotion_app_dir = os.path.join("/srv", "Remotion-app")
211
+ shutil.rmtree(remotion_app_dir)
212
+ # use the cache
213
+ shutil.copytree(temp_dir, remotion_app_dir)
214
+ if not SERVER_STATE.CACHED:
215
+ SERVER_STATE.CACHED = True
216
  # Cleanup: Remove the temporary directory
217
  shutil.rmtree(temp_dir, ignore_errors=True)
218
 
219
 
220
+ # @celery.task(name="All")
221
+ async def celery_task(video_task: EditorRequest):
222
  remotion_app_dir = os.path.join("/srv", "Remotion-app")
223
  project_id = str(uuid.uuid4())
224
  temp_dir = f"/tmp/{project_id}"
225
  output_dir = f"/tmp/{project_id}/out/video.mp4"
226
  assets_dir = os.path.join(temp_dir, "src/HelloWorld/Assets")
227
 
228
+ copy_remotion_app(remotion_app_dir, temp_dir)
229
+
230
+ # use the cached stuff
231
+ if not SERVER_STATE.CACHED:
232
+ install_dependencies(temp_dir)
233
+
234
+ create_constants_json_file(video_task.constants, assets_dir)
235
+ create_json_file(video_task.assets, assets_dir)
236
+ download_assets(video_task.links, temp_dir)
237
+ render_video(temp_dir, output_dir)
238
+ unsilence(temp_dir)
239
+ await cleanup_temp_directory(temp_dir, output_dir, video_task)
240
+
241
+ # chain(
242
+ # copy_remotion_app.si(remotion_app_dir, temp_dir),
243
+ # install_dependencies.si(temp_dir),
244
+ # create_constants_json_file.si(video_task.constants, assets_dir),
245
+ # create_json_file.si(video_task.assets, assets_dir),
246
+ # download_assets.si(video_task.links, temp_dir) if video_task.links else None,
247
+ # render_video.si(temp_dir, output_dir),
248
+ # # unsilence.si(temp_dir),
249
+ # cleanup_temp_directory.si(temp_dir, output_dir),
250
+ # ).apply_async(
251
+ # # link_error=handle_error
252
+ # ) # Link the tasks and handle errors
253
 
254
 
255
  def handle_error(task_id, err, *args, **kwargs):
App/__init__.py CHANGED
@@ -1,12 +1,78 @@
1
  from pyrogram import Client
2
  from telethon.sync import TelegramClient
3
  from telethon.sessions import StringSession
4
-
5
  import os
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
 
 
 
7
 
8
- TELEGRAM_SESSION = os.environ.get("TELEGRAM_SESSION")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  TELEGRAM_SESSION_PYROGRAM = os.environ.get("TELEGRAM_SESSION_PYROGRAM")
 
 
 
 
 
 
10
 
11
  bot: TelegramClient = TelegramClient(
12
  StringSession(TELEGRAM_SESSION),
 
1
  from pyrogram import Client
2
  from telethon.sync import TelegramClient
3
  from telethon.sessions import StringSession
4
+ from typing import List, Optional, Dict
5
  import os
6
+ import aiohttp
7
+ from pydantic import BaseModel
8
+
9
+
10
+ class Node(BaseModel):
11
+ MASTER: bool
12
+ SPACE_HOST: str
13
+
14
+
15
+ class NodeTaskState(BaseModel):
16
+ NODE: Node
17
+ CHUNK: int
18
+ COMPLETED: bool
19
+
20
+
21
+ class Task(BaseModel):
22
+ TASK_ID: str
23
+ COMPLETED: bool = False
24
+ NODES: Optional[List[NodeTaskState]] = []
25
+
26
+ def is_completed(self) -> bool:
27
+ for node in self.NODES:
28
+ if not node.COMPLETED:
29
+ return False
30
+ self.COMPLETED = True
31
+ return True
32
+
33
+ def mark_node_completed(self, space_host: str):
34
+ for state in self.NODES:
35
+ if state.NODE.SPACE_HOST == space_host:
36
+ state.COMPLETED = True
37
+ break
38
 
39
+ async def add_node(self, node: Node, CHUNK: int):
40
+ new_node_state = NodeTaskState(NODE=node, CHUNK=CHUNK, COMPLETED=False)
41
+ self.NODES.append(new_node_state)
42
 
43
+ @classmethod
44
+ async def _check_node_online(cls, space_host: str) -> bool:
45
+ try:
46
+ async with aiohttp.ClientSession(
47
+ timeout=aiohttp.ClientTimeout(total=5)
48
+ ) as session:
49
+ async with session.get(space_host) as response:
50
+ return response.status == 200
51
+ except aiohttp.ClientError:
52
+ return False
53
+
54
+
55
+ class ServerState(Node):
56
+ CACHED: bool
57
+ TASKS: Optional[Dict[str, Task]] = {}
58
+ NODES: Optional[list[Node]]
59
+ DB: str = "https://herokuserver-185316.firebaseio.com/"
60
+
61
+ def get_master(self) -> Optional[Node]:
62
+ for node in self.NODES:
63
+ if node.NODE.MASTER:
64
+ return node.NODE
65
+ return None
66
+
67
+
68
+ TELEGRAM_SESSION = os.environ.get("TELEGRAM_SESSION", "RANDOM_STRING")
69
  TELEGRAM_SESSION_PYROGRAM = os.environ.get("TELEGRAM_SESSION_PYROGRAM")
70
+ MASTER_SERVER = bool(os.environ.get("MASTER", 0))
71
+ SPACE_HOST = os.environ.get("SPACE_HOST", "RANDOM_STRING")
72
+
73
+
74
+ SERVER_STATE = ServerState(CACHED=False, MASTER=MASTER_SERVER, SPACE_HOST=SPACE_HOST)
75
+
76
 
77
  bot: TelegramClient = TelegramClient(
78
  StringSession(TELEGRAM_SESSION),
App/app.py CHANGED
@@ -1,18 +1,19 @@
1
  from fastapi import FastAPI, BackgroundTasks
2
  from .Editor.editorRoutes import videditor_router
3
  from App import bot
 
4
 
5
  app = FastAPI()
 
6
 
7
 
8
- # @app.on_event("startup")
9
- # async def startup_event():
10
- # await bot.start()
11
-
12
-
13
- # @app.on_event("shutdown")
14
- # async def shutdown_event():
15
- # await bot.stop()
16
 
17
 
18
  @app.get("/")
 
1
  from fastapi import FastAPI, BackgroundTasks
2
  from .Editor.editorRoutes import videditor_router
3
  from App import bot
4
+ from App.utilis import WorkerClient, SERVER_STATE
5
 
6
  app = FastAPI()
7
+ manager = WorkerClient()
8
 
9
 
10
+ @app.on_event("startup")
11
+ async def startup_event():
12
+ if SERVER_STATE.MASTER:
13
+ await bot.start()
14
+ response = await manager.register_worker()
15
+ if not response:
16
+ print("Error registering worker")
 
17
 
18
 
19
  @app.get("/")
App/utilis.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import aiohttp, asyncio
2
+ from App import SERVER_STATE, Node
3
+
4
+ import aiohttp
5
+
6
+
7
+ async def upload_file(file_path: str, node: str, chunk: int, task: str):
8
+ master_node = SERVER_STATE.get_master()
9
+ url = f"http://{master_node.SPACE_HOST}/uploadfile/?node={node}&chunk={chunk}&task={task}"
10
+ async with aiohttp.ClientSession() as session:
11
+ headers = {"Transfer-Encoding": "chunked"}
12
+ with open(file_path, "rb") as file:
13
+ async with session.post(url, headers=headers, data=file) as response:
14
+ if response.status == 200:
15
+ print("File uploaded successfully")
16
+ else:
17
+ print("Failed to upload file")
18
+
19
+
20
+ class WorkerClient:
21
+ base_url = SERVER_STATE.DB
22
+
23
+ async def register_worker(self):
24
+ async with aiohttp.ClientSession() as session:
25
+ data = {
26
+ "WORKER_ID": SERVER_STATE.SPACE_HOST,
27
+ "MASTER": SERVER_STATE.MASTER,
28
+ "HOST_NAME": SERVER_STATE.SPACE_HOST,
29
+ "SPACE_HOST": SERVER_STATE.SPACE_HOST,
30
+ }
31
+ response = await self.get_node()
32
+ if response:
33
+ return response
34
+
35
+ async with session.put(
36
+ f"{self.base_url}/nodes/{SERVER_STATE.SPACE_HOST}.json", json=data
37
+ ) as resp:
38
+ return await resp.json()
39
+
40
+ async def get_node(self):
41
+ async with aiohttp.ClientSession() as session:
42
+ async with session.get(
43
+ f"{self.base_url}/nodes/{SERVER_STATE.SPACE_HOST}.json"
44
+ ) as resp:
45
+ response = await resp.json()
46
+ return response
47
+
48
+ async def delete_node(self):
49
+ async with aiohttp.ClientSession() as session:
50
+ async with session.delete(
51
+ f"{self.base_url}/nodes/{SERVER_STATE.SPACE_HOST}.json"
52
+ ) as resp:
53
+ response = await resp.json()
54
+
55
+ async def get_all_nodes(self):
56
+ async with aiohttp.ClientSession() as session:
57
+ async with session.get(f"{self.base_url}/nodes.json") as resp:
58
+ response = await resp.json()
59
+ SERVER_STATE.NODES = [Node(**value) for value in response.values()]
60
+ return SERVER_STATE.NODES
Dockerfile CHANGED
@@ -1,17 +1,22 @@
1
  # Builder stage
2
  FROM python:3.10.0 as builder
3
 
 
4
  RUN useradd -ms /bin/bash admin
5
 
 
6
  WORKDIR /srv
7
- RUN chown -R admin:admin /srv
8
- RUN chmod -R 755 /srv
9
 
10
- # Install dependencies
11
- RUN apt-get update && \
12
- apt-get install -y wget ffmpeg curl aria2
13
 
14
- RUN apt-get install -y \
 
 
 
 
 
 
15
  fonts-liberation \
16
  libatk-bridge2.0-0 \
17
  libatk1.0-0 \
@@ -26,63 +31,38 @@ RUN apt-get install -y \
26
  libvulkan1 \
27
  libxcomposite1 \
28
  libxdamage1 \
29
- mesa-vulkan-drivers\
30
  libxfixes3 \
31
  libasound2 \
32
  libxkbcommon0 \
33
  libxrandr2 \
34
- xdg-utils
 
 
35
 
36
- # Download youtubeuploader
37
  ADD https://github.com/porjo/youtubeuploader/releases/download/23.06/youtubeuploader_23.06_Linux_x86_64.tar.gz youtubeuploader.tar.gz
38
-
39
-
40
- # Create youtube directory and extract youtubeuploader there
41
  RUN mkdir -p /srv/youtube && \
42
  tar -zxvf youtubeuploader.tar.gz -C /srv/youtube && \
43
  rm youtubeuploader.tar.gz && \
44
  chmod +x /srv/youtube/youtubeuploader
45
 
46
-
47
- # Copy the application code
48
- COPY --chown=admin . /srv
49
-
50
- # Download and install Thorium Browser
51
-
52
- RUN apt-get update && apt-get install -y \
53
- software-properties-common \
54
- npm
55
  RUN npm install npm@latest -g && \
56
  npm install n -g && \
57
  n latest
58
 
 
 
 
59
 
60
-
61
-
62
- #install the stuff
63
- # RUN cd /srv/Remotion-app && npm install
64
-
65
-
66
-
67
-
68
-
69
- # Install Python dependencies
70
- COPY requirements.txt .
71
- RUN pip install --no-cache-dir -r requirements.txt
72
-
73
- #install unsilence
74
- RUN pipx ensurepath
75
- RUN pipx install unsilence
76
-
77
-
78
-
79
-
80
 
81
  # Command to run the application
82
- CMD python -m uvicorn App.app:app --host 0.0.0.0 --port 7860 & python -m celery -A App.Worker.celery worker -c 5 --max-tasks-per-child=1 --without-heartbeat
83
-
84
-
85
- # CMD python -m uvicorn App.app:app --host 0.0.0.0 --port 7860 --workers 2
86
 
 
87
 
 
88
  EXPOSE 7860
 
1
  # Builder stage
2
  FROM python:3.10.0 as builder
3
 
4
+ # Create a non-root user
5
  RUN useradd -ms /bin/bash admin
6
 
7
+ # Set the working directory
8
  WORKDIR /srv
 
 
9
 
10
+ # Copy requirements file first to leverage caching
11
+ COPY --chown=admin requirements.txt .
 
12
 
13
+ # Install Python dependencies
14
+ RUN pip install --no-cache-dir -r requirements.txt
15
+
16
+ # Install system dependencies
17
+ RUN apt-get update && \
18
+ apt-get install -y --no-install-recommends \
19
+ wget ffmpeg curl aria2 \
20
  fonts-liberation \
21
  libatk-bridge2.0-0 \
22
  libatk1.0-0 \
 
31
  libvulkan1 \
32
  libxcomposite1 \
33
  libxdamage1 \
34
+ mesa-vulkan-drivers \
35
  libxfixes3 \
36
  libasound2 \
37
  libxkbcommon0 \
38
  libxrandr2 \
39
+ xdg-utils \
40
+ software-properties-common \
41
+ npm
42
 
43
+ # Install youtubeuploader
44
  ADD https://github.com/porjo/youtubeuploader/releases/download/23.06/youtubeuploader_23.06_Linux_x86_64.tar.gz youtubeuploader.tar.gz
 
 
 
45
  RUN mkdir -p /srv/youtube && \
46
  tar -zxvf youtubeuploader.tar.gz -C /srv/youtube && \
47
  rm youtubeuploader.tar.gz && \
48
  chmod +x /srv/youtube/youtubeuploader
49
 
50
+ # Install latest npm and node
 
 
 
 
 
 
 
 
51
  RUN npm install npm@latest -g && \
52
  npm install n -g && \
53
  n latest
54
 
55
+ # Install unsilence
56
+ RUN pipx ensurepath && \
57
+ pipx install unsilence
58
 
59
+ # Copy the application code
60
+ COPY --chown=admin . /srv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  # Command to run the application
63
+ # CMD python -m uvicorn App.app:app --host 0.0.0.0 --port 7860 & python -m celery -A App.Worker.celery worker -c 5 --max-tasks-per-child=1 --without-heartbeat
 
 
 
64
 
65
+ CMD python -m uvicorn App.app:app --host 0.0.0.0 --port 7860
66
 
67
+ # Expose port
68
  EXPOSE 7860
Remotion-app/ServerInstructions.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "frames": [0, 30]
3
+ }
Remotion-app/remotion.config.js CHANGED
@@ -4,11 +4,13 @@
4
  // Note: When using the Node.JS APIs, the config file doesn't apply. Instead, pass options directly to the APIs
5
 
6
  import {Config} from '@remotion/cli/config';
7
-
8
  import {enableTailwind} from '@remotion/tailwind';
9
  Config.overrideWebpackConfig((currentConfiguration) => {
10
  return enableTailwind(currentConfiguration);
11
  });
12
 
 
13
  Config.setVideoImageFormat('jpeg');
 
14
  // Config.setConcurrency(15);
 
4
  // Note: When using the Node.JS APIs, the config file doesn't apply. Instead, pass options directly to the APIs
5
 
6
  import {Config} from '@remotion/cli/config';
7
+ import Instructions from './ServerInstructions.json';
8
  import {enableTailwind} from '@remotion/tailwind';
9
  Config.overrideWebpackConfig((currentConfiguration) => {
10
  return enableTailwind(currentConfiguration);
11
  });
12
 
13
+ Config.setC;
14
  Config.setVideoImageFormat('jpeg');
15
+ Config.setFrameRange(Instructions.frames);
16
  // Config.setConcurrency(15);
requirements.txt CHANGED
@@ -9,6 +9,7 @@ starlette==0.25.0
9
  Werkzeug==2.2.2
10
  uvicorn==0.21.1
11
  gunicorn
 
12
  requests
13
  celery==5.3.0
14
  pyrogram==2.0.100
 
9
  Werkzeug==2.2.2
10
  uvicorn==0.21.1
11
  gunicorn
12
+ aiofiles
13
  requests
14
  celery==5.3.0
15
  pyrogram==2.0.100