Eason Lu commited on
Commit
fe8b7a1
1 Parent(s): bd773a2

pipeline refine and cleanup

Browse files

Former-commit-id: e5bfa25c8a96d6d7bd28b8921c9177f05221386b

Files changed (1) hide show
  1. pipeline.py +222 -202
pipeline.py CHANGED
@@ -6,155 +6,120 @@ from tqdm import tqdm
6
  from SRT import SRT_script
7
  import stable_whisper
8
  import whisper
 
9
 
10
  import subprocess
11
 
12
  import time
13
 
14
- parser = argparse.ArgumentParser()
15
- parser.add_argument("--link", help="youtube video link here", default=None, type=str, required=False)
16
- parser.add_argument("--video_file", help="local video path here", default=None, type=str, required=False)
17
- parser.add_argument("--audio_file", help="local audio path here", default=None, type=str, required=False)
18
- parser.add_argument("--srt_file", help="srt file input path here", default=None, type=str, required=False) # New argument
19
- parser.add_argument("--download", help="download path", default='./downloads', type=str, required=False)
20
- parser.add_argument("--output_dir", help="translate result path", default='./results', type=str, required=False)
21
- parser.add_argument("--video_name", help="video name, if use video link as input, the name will auto-filled by youtube video name", default='placeholder', type=str, required=False)
22
- parser.add_argument("--model_name", help="model name only support gpt-4 and gpt-3.5-turbo", type=str, required=False, default="gpt-4") # default change to gpt-4
23
- parser.add_argument("-only_srt", help="set script output to only .srt file", action='store_true')
24
- parser.add_argument("-v", help="auto encode script with video", action='store_true')
25
- args = parser.parse_args()
26
-
27
- # input should be either video file or youtube video link.
28
- if args.link is None and args.video_file is None and args.srt_file is None and args.audio_file is None:
29
- print("need video source or srt file")
30
- exit()
31
-
32
- # set up
33
- openai.api_key = os.getenv("OPENAI_API_KEY")
34
- DOWNLOAD_PATH = args.download
35
- if not os.path.exists(DOWNLOAD_PATH):
36
- os.mkdir(DOWNLOAD_PATH)
37
- os.mkdir(f'{DOWNLOAD_PATH}/audio')
38
- os.mkdir(f'{DOWNLOAD_PATH}/video')
39
-
40
- RESULT_PATH = args.output_dir
41
- if not os.path.exists(RESULT_PATH):
42
- os.mkdir(RESULT_PATH)
43
-
44
- # set video name as the input file name if not specified
45
- if args.video_name == 'placeholder' :
46
- # set video name to upload file name
47
- if args.video_file is not None:
48
- VIDEO_NAME = args.video_file.split('/')[-1].split('.')[0]
49
- elif args.audio_file is not None:
50
- VIDEO_NAME = args.audio_file.split('/')[-1].split('.')[0]
51
- elif args.srt_file is not None:
52
- VIDEO_NAME = args.srt_file.split('/')[-1].split('.')[0].split("_")[0]
53
- else:
54
- VIDEO_NAME = args.video_name
55
- else:
56
- VIDEO_NAME = args.video_name
57
-
58
- model_name = args.model_name
59
-
60
- threshold = 30
61
-
62
- # get source audio
63
- if args.link is not None and args.video_file is None:
64
- # Download audio from YouTube
65
- video_link = args.link
66
- video = None
67
- audio = None
68
- try:
69
- yt = YouTube(video_link)
70
- video = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
71
- if video:
72
- video.download(f'{DOWNLOAD_PATH}/video')
73
- print('Video download completed!')
74
- else:
75
- print("Error: Video stream not found")
76
- audio = yt.streams.filter(only_audio=True, file_extension='mp4').first()
77
- if audio:
78
- audio.download(f'{DOWNLOAD_PATH}/audio')
79
- print('Audio download completed!')
80
- else:
81
- print("Error: Audio stream not found")
82
- except Exception as e:
83
- print("Connection Error")
84
- print(e)
85
- exit()
86
 
87
- video_path = f'{DOWNLOAD_PATH}/video/{video.default_filename}'
88
- audio_path = '{}/audio/{}'.format(DOWNLOAD_PATH, audio.default_filename)
89
- audio_file = open(audio_path, "rb")
90
- if VIDEO_NAME == 'placeholder':
91
- VIDEO_NAME = audio.default_filename.split('.')[0]
92
- elif args.video_file is not None:
93
- # Read from local
94
- video_path = args.video_file
95
- # audio_path = "{DOWNLOAD_PATH}/audio/{VIDEO_NAME}.mp3".format(DOWNLOAD_PATH, VIDEO_NAME)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
96
  if args.audio_file is not None:
97
  audio_file= open(args.audio_file, "rb")
98
  audio_path = args.audio_file
99
- else:
100
- output_audio_path = f'{DOWNLOAD_PATH}/audio/{VIDEO_NAME}.mp3'
101
- subprocess.run(['ffmpeg', '-i', video_path, '-f', 'mp3', '-ab', '192000', '-vn', output_audio_path])
102
- audio_file = open(output_audio_path, "rb")
103
- audio_path = output_audio_path
104
-
105
- if not os.path.exists(f'{RESULT_PATH}/{VIDEO_NAME}'):
106
- os.mkdir(f'{RESULT_PATH}/{VIDEO_NAME}')
107
-
108
- if args.audio_file is not None:
109
- audio_file= open(args.audio_file, "rb")
110
- audio_path = args.audio_file
111
-
112
- # Instead of using the script_en variable directly, we'll use script_input
113
- srt_file_en = args.srt_file
114
-
115
- if srt_file_en is not None:
116
- srt = SRT_script.parse_from_srt_file(srt_file_en)
117
- else:
118
- # using whisper to perform speech-to-text and save it in <video name>_en.txt under RESULT PATH.
119
- srt_file_en = "{}/{}/{}_en.srt".format(RESULT_PATH, VIDEO_NAME, VIDEO_NAME)
120
- if not os.path.exists(srt_file_en):
121
-
122
- # use OpenAI API for transcribe
123
- # transcript = openai.Audio.transcribe("whisper-1", audio_file)
124
-
125
- # use local whisper model
126
- # model = whisper.load_model("base") # using base model in local machine (may use large model on our server)
127
- # transcript = model.transcribe(audio_path)
128
-
129
- # use stable-whisper
130
- model = stable_whisper.load_model('base')
131
- transcript = model.transcribe(audio_path, regroup = False, initial_prompt="Hello, welcome to my lecture. Are you good my friend?")
132
- (
133
- transcript
134
- .split_by_punctuation(['.', '。', '?'])
135
- .merge_by_gap(.15, max_words=3)
136
- .merge_by_punctuation([' '])
137
- .split_by_punctuation(['.', '。', '?'])
138
- )
139
 
140
- transcript = transcript.to_dict()
141
- srt = SRT_script(transcript['segments']) # read segments to SRT class
142
 
143
- else:
 
 
144
  srt = SRT_script.parse_from_srt_file(srt_file_en)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
145
 
146
- # srt class preprocess
147
- srt.form_whole_sentence()
148
- # srt.spell_check_term()
149
- srt.correct_with_force_term()
150
- srt.write_srt_file_src(srt_file_en)
151
- script_input = srt.get_source_only()
152
-
153
- if not args.only_srt:
154
- from srt2ass import srt2ass
155
- assSub_en = srt2ass(srt_file_en, "default", "No", "Modest")
156
- print('ASS subtitle saved as: ' + assSub_en)
157
-
158
 
159
  # Split the video script by sentences and create chunks within the token limit
160
  def script_split(script_in, chunk_size = 1000):
@@ -181,43 +146,38 @@ def script_split(script_in, chunk_size = 1000):
181
  assert len(script_arr) == len(range_arr)
182
  return script_arr, range_arr
183
 
184
- script_arr, range_arr = script_split(script_input)
185
- # print(script_arr, range_arr)
186
-
187
  # check whether previous translation is done
188
- zh_file = "{}/{}/{}_zh.srt".format(RESULT_PATH, VIDEO_NAME, VIDEO_NAME)
189
- segidx = 1
190
- if os.path.exists(zh_file):
191
- temp_file = "{}/{}/temp.srt".format(RESULT_PATH, VIDEO_NAME)
192
- if os.path.exists(temp_file):
193
- os.remove(temp_file)
194
- with open(zh_file, "r") as f0:
195
- for count, _ in enumerate(f0):
196
- pass
197
- count += 1
198
- segidx = int(count/4)+1
199
- en_file = "{}/{}/{}_en.srt".format(RESULT_PATH, VIDEO_NAME, VIDEO_NAME)
200
- if args.srt_file is not None:
201
- en_file = args.srt_file
202
- with open(en_file, "r") as f1, open(temp_file, "a") as f2:
203
- x = f1.readlines()
204
- #print(len(x))
205
- if count >= len(x):
206
- print('Work already done! Please delete {}_zh.srt files in result directory first in order to rework'.format(VIDEO_NAME))
207
- exit()
208
- for i, line in enumerate(x):
209
- if i >= count:
210
- #print(i)
211
- f2.write(line)
212
 
213
- srt = SRT_script.parse_from_srt_file(temp_file)
214
- print('temp_contents')
215
- print(srt.get_source_only())
216
 
217
 
218
  def get_response(model_name, sentence):
219
  if model_name == "gpt-3.5-turbo" or model_name == "gpt-4":
220
- # print(s + "\n")
221
  response = openai.ChatCompletion.create(
222
  model=model_name,
223
  messages = [
@@ -234,39 +194,99 @@ def get_response(model_name, sentence):
234
 
235
 
236
  # Translate and save
237
- previous_length = 0
238
- for sentence, range in tqdm(zip(script_arr, range_arr)):
239
- # update the range based on previous length
240
- range = (range[0]+previous_length, range[1]+previous_length)
241
-
242
- # using chatgpt model
243
- print(f"now translating sentences {range}")
244
- flag = True
245
- while flag:
246
- flag = False
247
- try:
248
- translate = get_response(model_name, sentence)
249
- except Exception as e:
250
- print("An error has occurred during translation:",e)
251
- print("Retrying... the script will continue after 30 seconds.")
252
- time.sleep(30)
253
- flag = True
254
- # add read-time output back and modify the post-processing by using one batch as an unit.
255
- srt.set_translation(translate, range, model_name, VIDEO_NAME, args.link)
256
-
257
- # srt.realtime_bilingual_write_srt(f"{RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}_bi.srt",range, add_length,segidx)
258
-
259
- srt.check_len_and_split()
260
- srt.remove_trans_punctuation()
261
- srt.write_srt_file_translate(f"{RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}_zh.srt")
262
- srt.write_srt_file_bilingual(f"{RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}_bi.srt")
263
-
264
- if not args.only_srt:
265
- assSub_zh = srt2ass(f"{RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}_zh.srt", "default", "No", "Modest")
266
- print('ASS subtitle saved as: ' + assSub_zh)
267
-
268
- if args.v:
269
- if args.only_srt:
270
- os.system(f'ffmpeg -i {video_path} -vf "subtitles={RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}_zh.srt" {RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}.mp4')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
271
  else:
272
- os.system(f'ffmpeg -i {video_path} -vf "subtitles={RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}_zh.ass" {RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}.mp4')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
  from SRT import SRT_script
7
  import stable_whisper
8
  import whisper
9
+ from srt2ass import srt2ass
10
 
11
  import subprocess
12
 
13
  import time
14
 
15
+ def parse_args():
16
+ parser = argparse.ArgumentParser()
17
+ parser.add_argument("--link", help="youtube video link here", default=None, type=str, required=False)
18
+ parser.add_argument("--video_file", help="local video path here", default=None, type=str, required=False)
19
+ parser.add_argument("--audio_file", help="local audio path here", default=None, type=str, required=False)
20
+ parser.add_argument("--srt_file", help="srt file input path here", default=None, type=str, required=False) # New argument
21
+ parser.add_argument("--download", help="download path", default='./downloads', type=str, required=False)
22
+ parser.add_argument("--output_dir", help="translate result path", default='./results', type=str, required=False)
23
+ parser.add_argument("--video_name", help="video name, if use video link as input, the name will auto-filled by youtube video name", default='placeholder', type=str, required=False)
24
+ parser.add_argument("--model_name", help="model name only support gpt-4 and gpt-3.5-turbo", type=str, required=False, default="gpt-4") # default change to gpt-4
25
+ parser.add_argument("-only_srt", help="set script output to only .srt file", action='store_true')
26
+ parser.add_argument("-v", help="auto encode script with video", action='store_true')
27
+ args = parser.parse_args()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
 
29
+ return args
30
+
31
+ def get_sources(args, download_path, result_path, video_name):
32
+ # get source audio
33
+ if args.link is not None and args.video_file is None:
34
+ # Download audio from YouTube
35
+ video_link = args.link
36
+ video = None
37
+ audio = None
38
+ try:
39
+ yt = YouTube(video_link)
40
+ video = yt.streams.filter(progressive=True, file_extension='mp4').order_by('resolution').desc().first()
41
+ if video:
42
+ video.download(f'{download_path}/video')
43
+ print('Video download completed!')
44
+ else:
45
+ print("Error: Video stream not found")
46
+ audio = yt.streams.filter(only_audio=True, file_extension='mp4').first()
47
+ if audio:
48
+ audio.download(f'{download_path}/audio')
49
+ print('Audio download completed!')
50
+ else:
51
+ print("Error: Audio stream not found")
52
+ except Exception as e:
53
+ print("Connection Error")
54
+ print(e)
55
+ exit()
56
+
57
+ video_path = f'{download_path}/video/{video.default_filename}'
58
+ audio_path = '{}/audio/{}'.format(download_path, audio.default_filename)
59
+ audio_file = open(audio_path, "rb")
60
+ if video_name == 'placeholder':
61
+ video_name = audio.default_filename.split('.')[0]
62
+ elif args.video_file is not None:
63
+ # Read from local
64
+ video_path = args.video_file
65
+
66
+ if args.audio_file is not None:
67
+ audio_file= open(args.audio_file, "rb")
68
+ audio_path = args.audio_file
69
+ else:
70
+ output_audio_path = f'{download_path}/audio/{video_name}.mp3'
71
+ subprocess.run(['ffmpeg', '-i', video_path, '-f', 'mp3', '-ab', '192000', '-vn', output_audio_path])
72
+ audio_file = open(output_audio_path, "rb")
73
+ audio_path = output_audio_path
74
+
75
+ if not os.path.exists(f'{result_path}/{video_name}'):
76
+ os.mkdir(f'{result_path}/{video_name}')
77
+
78
  if args.audio_file is not None:
79
  audio_file= open(args.audio_file, "rb")
80
  audio_path = args.audio_file
81
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
82
 
83
+ return audio_path, audio_file, video_path
 
84
 
85
+ def get_srt_class(srt_file_en, result_path, video_name, audio_path, audio_file = None, whisper_model = 'base', method = "stable"):
86
+ # Instead of using the script_en variable directly, we'll use script_input
87
+ if srt_file_en is not None:
88
  srt = SRT_script.parse_from_srt_file(srt_file_en)
89
+ else:
90
+ # using whisper to perform speech-to-text and save it in <video name>_en.txt under RESULT PATH.
91
+ srt_file_en = "{}/{}/{}_en.srt".format(result_path, video_name, video_name)
92
+ if not os.path.exists(srt_file_en):
93
+
94
+ # use OpenAI API for transcribe
95
+ if method == "api":
96
+ transcript = openai.Audio.transcribe("whisper-1", audio_file)
97
+
98
+ # use local whisper model
99
+ elif method == "basic":
100
+ model = whisper.load_model(whisper_model) # using base model in local machine (may use large model on our server)
101
+ transcript = model.transcribe(audio_path)
102
+
103
+ # use stable-whisper
104
+ elif method == "stable":
105
+ model = stable_whisper.load_model(whisper_model)
106
+ transcript = model.transcribe(audio_path, regroup = False, initial_prompt="Hello, welcome to my lecture. Are you good my friend?")
107
+ (
108
+ transcript
109
+ .split_by_punctuation(['.', '。', '?'])
110
+ .merge_by_gap(.15, max_words=3)
111
+ .merge_by_punctuation([' '])
112
+ .split_by_punctuation(['.', '。', '?'])
113
+ )
114
+ transcript = transcript.to_dict()
115
+ else:
116
+ raise ValueError("invalid speech to text method")
117
+
118
+ srt = SRT_script(transcript['segments']) # read segments to SRT class
119
 
120
+ else:
121
+ srt = SRT_script.parse_from_srt_file(srt_file_en)
122
+ return srt_file_en, srt
 
 
 
 
 
 
 
 
 
123
 
124
  # Split the video script by sentences and create chunks within the token limit
125
  def script_split(script_in, chunk_size = 1000):
 
146
  assert len(script_arr) == len(range_arr)
147
  return script_arr, range_arr
148
 
 
 
 
149
  # check whether previous translation is done
150
+ # zh_file = "{}/{}/{}_zh.srt".format(RESULT_PATH, VIDEO_NAME, VIDEO_NAME)
151
+ # segidx = 1
152
+ # if os.path.exists(zh_file):
153
+ # temp_file = "{}/{}/temp.srt".format(RESULT_PATH, VIDEO_NAME)
154
+ # if os.path.exists(temp_file):
155
+ # os.remove(temp_file)
156
+ # with open(zh_file, "r") as f0:
157
+ # for count, _ in enumerate(f0):
158
+ # pass
159
+ # count += 1
160
+ # segidx = int(count/4)+1
161
+ # en_file = "{}/{}/{}_en.srt".format(RESULT_PATH, VIDEO_NAME, VIDEO_NAME)
162
+ # if args.srt_file is not None:
163
+ # en_file = args.srt_file
164
+ # with open(en_file, "r") as f1, open(temp_file, "a") as f2:
165
+ # x = f1.readlines()
166
+ # #print(len(x))
167
+ # if count >= len(x):
168
+ # print('Work already done! Please delete {}_zh.srt files in result directory first in order to rework'.format(VIDEO_NAME))
169
+ # exit()
170
+ # for i, line in enumerate(x):
171
+ # if i >= count:
172
+ # f2.write(line)
 
173
 
174
+ # srt = SRT_script.parse_from_srt_file(temp_file)
175
+ # print('temp_contents')
176
+ # print(srt.get_source_only())
177
 
178
 
179
  def get_response(model_name, sentence):
180
  if model_name == "gpt-3.5-turbo" or model_name == "gpt-4":
 
181
  response = openai.ChatCompletion.create(
182
  model=model_name,
183
  messages = [
 
194
 
195
 
196
  # Translate and save
197
+ def translate(srt, script_arr, range_arr, model_name, video_name, video_link):
198
+ previous_length = 0
199
+ for sentence, range in tqdm(zip(script_arr, range_arr)):
200
+ # update the range based on previous length
201
+ range = (range[0]+previous_length, range[1]+previous_length)
202
+
203
+ # using chatgpt model
204
+ print(f"now translating sentences {range}")
205
+ flag = True
206
+ while flag:
207
+ flag = False
208
+ try:
209
+ translate = get_response(model_name, sentence)
210
+ except Exception as e:
211
+ print("An error has occurred during translation:",e)
212
+ print("Retrying... the script will continue after 30 seconds.")
213
+ time.sleep(30)
214
+ flag = True
215
+ srt.set_translation(translate, range, model_name, video_name, video_link)
216
+
217
+
218
+ def main():
219
+ args = parse_args()
220
+
221
+ # input check: input should be either video file or youtube video link.
222
+ if args.link is None and args.video_file is None and args.srt_file is None and args.audio_file is None:
223
+ print("need video source or srt file")
224
+ exit()
225
+
226
+ # set up
227
+ openai.api_key = os.getenv("OPENAI_API_KEY")
228
+ DOWNLOAD_PATH = args.download
229
+ if not os.path.exists(DOWNLOAD_PATH):
230
+ os.mkdir(DOWNLOAD_PATH)
231
+ os.mkdir(f'{DOWNLOAD_PATH}/audio')
232
+ os.mkdir(f'{DOWNLOAD_PATH}/video')
233
+
234
+ RESULT_PATH = args.output_dir
235
+ if not os.path.exists(RESULT_PATH):
236
+ os.mkdir(RESULT_PATH)
237
+
238
+ # set video name as the input file name if not specified
239
+ if args.video_name == 'placeholder' :
240
+ # set video name to upload file name
241
+ if args.video_file is not None:
242
+ VIDEO_NAME = args.video_file.split('/')[-1].split('.')[0]
243
+ elif args.audio_file is not None:
244
+ VIDEO_NAME = args.audio_file.split('/')[-1].split('.')[0]
245
+ elif args.srt_file is not None:
246
+ VIDEO_NAME = args.srt_file.split('/')[-1].split('.')[0].split("_")[0]
247
+ else:
248
+ VIDEO_NAME = args.video_name
249
  else:
250
+ VIDEO_NAME = args.video_name
251
+
252
+ audio_path, audio_file, video_path = get_sources(args, DOWNLOAD_PATH, RESULT_PATH, VIDEO_NAME)
253
+
254
+ srt_file_en, srt = get_srt_class(args.srt_file, RESULT_PATH, VIDEO_NAME, audio_path, audio_file)
255
+
256
+ # SRT class preprocess
257
+ srt.form_whole_sentence()
258
+ # srt.spell_check_term()
259
+ srt.correct_with_force_term()
260
+ srt.write_srt_file_src(srt_file_en)
261
+ script_input = srt.get_source_only()
262
+
263
+ # write ass
264
+ if not args.only_srt:
265
+ assSub_en = srt2ass(srt_file_en, "default", "No", "Modest")
266
+ print('ASS subtitle saved as: ' + assSub_en)
267
+
268
+ script_arr, range_arr = script_split(script_input)
269
+
270
+ translate(srt, script_arr, range_arr, args.model_name, VIDEO_NAME, args.link)
271
+
272
+ # SRT post-processing
273
+ srt.check_len_and_split()
274
+ srt.remove_trans_punctuation()
275
+ srt.write_srt_file_translate(f"{RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}_zh.srt")
276
+ srt.write_srt_file_bilingual(f"{RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}_bi.srt")
277
+
278
+ # write ass
279
+ if not args.only_srt:
280
+ assSub_zh = srt2ass(f"{RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}_zh.srt", "default", "No", "Modest")
281
+ print('ASS subtitle saved as: ' + assSub_zh)
282
+
283
+ # encode to .mp4 video file
284
+ if args.v:
285
+ if args.only_srt:
286
+ os.system(f'ffmpeg -i {video_path} -vf "subtitles={RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}_zh.srt" {RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}.mp4')
287
+ else:
288
+ os.system(f'ffmpeg -i {video_path} -vf "subtitles={RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}_zh.ass" {RESULT_PATH}/{VIDEO_NAME}/{VIDEO_NAME}.mp4')
289
+
290
+
291
+ if __name__ == "__main__":
292
+ main()