arjunanand13 commited on
Commit
66db48e
1 Parent(s): 74dd548

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +447 -0
app.py ADDED
@@ -0,0 +1,447 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import whisper
4
+ import cv2
5
+ import json
6
+ import tempfile
7
+ import torch
8
+ import transformers
9
+ import re
10
+ import time
11
+ from torch import cuda, bfloat16
12
+ from moviepy.editor import VideoFileClip
13
+ from image_caption import Caption
14
+ from pathlib import Path
15
+ from langchain import PromptTemplate
16
+ from langchain import LLMChain
17
+ from langchain.llms import HuggingFacePipeline
18
+ from difflib import SequenceMatcher
19
+ import argparse
20
+ import shutil
21
+ from PIL import Image
22
+ import google.generativeai as genai
23
+ from huggingface_hub import InferenceClient
24
+
25
+
26
+ class VideoClassifier:
27
+ global audio_time , setup_time , caption_time , classification_time
28
+ audio_time = 0
29
+ setup_time = 0
30
+ caption_time = 0
31
+ classification_time = 0
32
+ def __init__(self, no_of_frames, mode='interface',model='gemini'):
33
+
34
+ self.no_of_frames = no_of_frames
35
+ self.mode = mode
36
+ self.model_name = model.strip().lower()
37
+ print(self.model_name)
38
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
39
+ if self.model_name=='mistral':
40
+ print("Setting up Mistral model for Class Selection")
41
+ self.setup_mistral_model()
42
+ else :
43
+ print("Setting up Gemini model for Class Selection")
44
+ self.setup_gemini_model()
45
+ self.setup_paths()
46
+ self.hf_key = os.environ.get("HF_KEY", None)
47
+ # self.whisper_model = whisper.load_model("base")
48
+
49
+ def setup_paths(self):
50
+ self.path = './results'
51
+ if os.path.exists(self.path):
52
+ shutil.rmtree(self.path)
53
+ os.mkdir(self.path)
54
+
55
+ def setup_gemini_model(self):
56
+ self.genai = genai
57
+ self.genai.configure(api_key="AIzaSyAFG94rVbm9eWepO5uPGsMha8XJ-sHbMdA")
58
+ self.genai_model = genai.GenerativeModel('gemini-pro')
59
+ self.whisper_model = whisper.load_model("base")
60
+ self.img_cap = Caption()
61
+
62
+ def setup_mistral_space_model(self):
63
+ # if not self.hf_key:
64
+ # raise ValueError("Hugging Face API key is not set or invalid.")
65
+
66
+ self.client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
67
+ # self.client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1")
68
+ # self.client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
69
+ self.whisper_model = whisper.load_model("base")
70
+ self.img_cap = Caption()
71
+
72
+
73
+ def setup_mistral_model(self):
74
+ self.model_id = "mistralai/Mistral-7B-Instruct-v0.2"
75
+ self.device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
76
+ # self.device_name = torch.cuda.get_device_name()
77
+ # print(f"Using device: {self.device} ({self.device_name})")
78
+ bnb_config = transformers.BitsAndBytesConfig(
79
+ load_in_4bit=True,
80
+ bnb_4bit_quant_type='nf4',
81
+ bnb_4bit_use_double_quant=True,
82
+ bnb_4bit_compute_dtype=bfloat16,
83
+ )
84
+ hf_auth = self.hf_key
85
+ print(hf_auth)
86
+ model_config = transformers.AutoConfig.from_pretrained(
87
+ self.model_id,
88
+ # use_auth_token=hf_auth
89
+ )
90
+ self.model = transformers.AutoModelForCausalLM.from_pretrained(
91
+ self.model_id,
92
+ trust_remote_code=True,
93
+ config=model_config,
94
+ quantization_config=bnb_config,
95
+ # use_auth_token=hf_auth
96
+ )
97
+ self.model.eval()
98
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
99
+ self.model_id,
100
+ # use_auth_token=hf_auth
101
+ )
102
+ self.generate_text = transformers.pipeline(
103
+ model=self.model, tokenizer=self.tokenizer,
104
+ return_full_text=True,
105
+ task='text-generation',
106
+ temperature=0.01,
107
+ max_new_tokens=32
108
+ )
109
+ self.whisper_model = whisper.load_model("base")
110
+ self.img_cap = Caption()
111
+ self.llm = HuggingFacePipeline(pipeline=self.generate_text)
112
+
113
+ def audio_extraction(self,video_input):
114
+ """When running on local we use this library approach which consumes 3 seconds of gpu inference"""
115
+ global audio_time
116
+ start_time_audio = time.time()
117
+ print(f"Processing video: {video_input} with {self.no_of_frames} frames.")
118
+ mp4_file = video_input
119
+ video_name = mp4_file.split("/")[-1]
120
+ wav_file = "results/audiotrack.wav"
121
+ video_clip = VideoFileClip(mp4_file)
122
+ audioclip = video_clip.audio
123
+ wav_file = audioclip.write_audiofile(wav_file)
124
+ audioclip.close()
125
+ video_clip.close()
126
+ audiotrack = "results/audiotrack.wav"
127
+ result = self.whisper_model.transcribe(audiotrack, fp16=False)
128
+ transcript = result["text"]
129
+ print("TRANSCRIPT",transcript)
130
+ end_time_audio = time.time()
131
+ audio_time=end_time_audio-start_time_audio
132
+ # print("TIME TAKEN FOR AUDIO CONVERSION (WHISPER)",audio_time)
133
+
134
+ return transcript
135
+
136
+ def audio_extraction_space(self,video_input):
137
+ """When running the project in space we use model directly from huggingface to beat the inference time"""
138
+ MODEL_NAME = "openai/whisper-large-v3"
139
+ BATCH_SIZE = 8
140
+ device = "cuda" if torch.cuda.is_available() else "cpu"
141
+ global audio_time
142
+ start_time_audio = time.time()
143
+ print(f"Processing video: {video_input} with {self.no_of_frames} frames.")
144
+ mp4_file = video_input
145
+ video_name = mp4_file.split("/")[-1]
146
+ wav_file = "results/audiotrack.wav"
147
+ video_clip = VideoFileClip(mp4_file)
148
+ audioclip = video_clip.audio
149
+ wav_file = audioclip.write_audiofile(wav_file)
150
+ audioclip.close()
151
+ video_clip.close()
152
+ audiotrack = "results/audiotrack.wav"
153
+ pipe = pipeline(
154
+ "automatic-speech-recognition",
155
+ model=MODEL_NAME,
156
+ device=device
157
+ )
158
+ # if audio_file is None:
159
+ # return "No audio file submitted! Please upload or record an audio file before submitting your request."
160
+
161
+ # if not os.path.exists(audio_file):
162
+ # return "File does not exist. Please check the file path."
163
+
164
+ result = pipe(audiotrack, batch_size=BATCH_SIZE, generate_kwargs={"task": task}, return_timestamps=True)
165
+ return result["text"]
166
+
167
+ def generate_text(self, inputs, parameters=None):
168
+ if parameters is None:
169
+ parameters = {
170
+ "temperature": 0.7,
171
+ "max_new_tokens": 50,
172
+ "top_p": 0.9,
173
+ "repetition_penalty": 1.2
174
+ }
175
+
176
+ return self.client(inputs, parameters)
177
+
178
+ def classify_video(self,video_input):
179
+ global classification_time , caption_time
180
+ transcript=self.audio_extraction_space(video_input)
181
+ start_time_caption = time.time()
182
+ video = cv2.VideoCapture(video_input)
183
+ length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
184
+ no_of_frame = int(self.no_of_frames)
185
+ temp_div = length // no_of_frame
186
+ currentframe = 50
187
+ caption_text = []
188
+
189
+ for i in range(no_of_frame):
190
+ video.set(cv2.CAP_PROP_POS_FRAMES, currentframe)
191
+ ret, frame = video.read()
192
+ if ret:
193
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
194
+ image = Image.fromarray(frame)
195
+ content = self.img_cap.predict_image_caption_gemini(image)
196
+ print("content", content)
197
+ caption_text.append(content)
198
+ currentframe += temp_div - 1
199
+ else:
200
+ break
201
+
202
+ captions = ", ".join(caption_text)
203
+ print("CAPTIONS", captions)
204
+ video.release()
205
+ cv2.destroyAllWindows()
206
+ end_time_caption = time.time()
207
+ caption_time=end_time_caption-start_time_caption
208
+ # print("TIME TAKEN FOR IMAGE CAPTIONING", end_time_caption-start_time_caption)
209
+
210
+ start_time_generation = time.time()
211
+ main_categories = Path("main_classes.txt").read_text()
212
+ main_categories_list = ['Automotive', 'Books and Literature', 'Business and Finance', 'Careers', 'Education','Family and Relationships',
213
+ 'Fine Art', 'Food & Drink', 'Healthy Living', 'Hobbies & Interests', 'Home & Garden','Medical Health', 'Movies', 'Music and Audio',
214
+ 'News and Politics', 'Personal Finance', 'Pets', 'Pop Culture','Real Estate', 'Religion & Spirituality', 'Science', 'Shopping', 'Sports',
215
+ 'Style & Fashion','Technology & Computing', 'Television', 'Travel', 'Video Gaming']
216
+
217
+ generate_kwargs = {
218
+ "temperature": 0.9,
219
+ "max_new_tokens": 256,
220
+ "top_p": 0.95,
221
+ "repetition_penalty": 1.0,
222
+ "do_sample": True,
223
+ "seed": 42,
224
+ "return_full_text": False
225
+ }
226
+
227
+ template1 = '''Given below are the different type of main video classes
228
+ {main_categories}
229
+ You are a text classifier that catergorises the transcript and captions into one main class whose context match with one main class and only generate main class name no need of sub classe or explanation.
230
+ Give more importance to Transcript while classifying .
231
+ Transcript: {transcript}
232
+ Captions: {captions}
233
+ Return only the answer chosen from list and nothing else
234
+ Main-class => '''
235
+
236
+ prompt1 = PromptTemplate(template=template1, input_variables=['main_categories', 'transcript', 'captions'])
237
+ print("PROMPT 1",prompt1)
238
+ # print(self.model)
239
+ # print(f"Current model in use: {self.model}")
240
+ if self.model_name=='mistral':
241
+ try:
242
+ print("Entering mistral chain approach")
243
+ chain1 = LLMChain(llm=self.llm, prompt=prompt1)
244
+ main_class = chain1.predict(main_categories=main_categories, transcript=transcript, captions=captions)
245
+ except:
246
+ print("Entering mistral template approach")
247
+ prompt1 = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
248
+ messages = [{"role": "user", "content": prompt1}]
249
+ stream = self.client.chat_completion(messages, max_tokens=100)
250
+ main_class = stream.choices[0].message.content.strip()
251
+ # output = ""
252
+ # for response in stream:
253
+ # output += response['token'].text
254
+ # print("Streaming output:", output)
255
+
256
+ # main_class = output.strip()
257
+
258
+ print(main_class)
259
+ print("#######################################################")
260
+ try:
261
+ pattern = r"Main-class =>\s*(.+)"
262
+ match = re.search(pattern, main_class)
263
+ if match:
264
+ main_class = match.group(1).strip()
265
+ except:
266
+ main_class=main_class
267
+ else:
268
+ prompt_text = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
269
+ response = self.genai_model.generate_content(contents=prompt_text)
270
+ main_class = response.text
271
+
272
+ print(main_class)
273
+ print("#######################################################")
274
+ print("MAIN CLASS: ",main_class)
275
+ def category_class(class_name,categories_list):
276
+ def similar(str1, str2):
277
+ return SequenceMatcher(None, str1, str2).ratio()
278
+ index_no = 0
279
+ sim = 0
280
+ for sub in categories_list:
281
+ res = similar(class_name, sub)
282
+ if res>sim:
283
+ sim = res
284
+ index_no = categories_list.index(sub)
285
+ class_name = categories_list[index_no]
286
+ return class_name
287
+
288
+ if main_class not in main_categories_list:
289
+ main_class = category_class(main_class,main_categories_list)
290
+ print("POST PROCESSED MAIN CLASS : ",main_class)
291
+ tier_1_index_no = main_categories_list.index(main_class) + 1
292
+
293
+ with open('categories_json.txt') as f:
294
+ data = json.load(f)
295
+ sub_categories_list = data[main_class]
296
+ print("SUB CATEGORIES LIST",sub_categories_list)
297
+ with open("sub_categories.txt", "w") as f:
298
+ no = 1
299
+
300
+ # print(data[main_class])
301
+ for i in data[main_class]:
302
+ f.write(str(no)+')'+str(i) + '\n')
303
+ no = no+1
304
+ sub_categories = Path("sub_categories.txt").read_text()
305
+
306
+ template2 = '''Given below are the sub classes of {main_class}.
307
+ {sub_categories}
308
+ You are a text classifier that catergorises the transcript and captions into one sub class whose context match with one sub class and only generate sub class name, Don't give explanation .
309
+ Give more importance to Transcript while classifying .
310
+ Transcript: {transcript}
311
+ Captions: {captions}
312
+ Return only the Sub-class answer chosen from list and nothing else
313
+ Answer in the format:
314
+ Main-class => {main_class}
315
+ Sub-class =>
316
+ '''
317
+
318
+ prompt2 = PromptTemplate(template=template2, input_variables=['sub_categories', 'transcript', 'captions','main_class'])
319
+
320
+ if self.model_name=='mistral':
321
+ try:
322
+ chain2 = LLMChain(llm=self.llm, prompt=prompt2)
323
+ sub_class = chain2.predict(sub_categories=sub_categories, transcript=transcript, captions=captions,main_class=main_class)
324
+ except:
325
+ prompt2 = template2.format(sub_categories=sub_categories, transcript=transcript, captions=captions,main_class=main_class)
326
+ messages = [{"role": "user", "content": prompt2}]
327
+ stream = self.client.chat_completion(messages, max_tokens=100)
328
+ sub_class = stream.choices[0].message.content.strip()
329
+
330
+ print("Preprocess Answer",sub_class)
331
+
332
+ try:
333
+ pattern = r"Sub-class =>\s*(.+)"
334
+ match = re.search(pattern, sub_class)
335
+ if match:
336
+ sub_class = match.group(1).strip()
337
+ except:
338
+ subclass=sub_class
339
+ else:
340
+ prompt_text2 = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
341
+ response = self.genai_model.generate_content(contents=prompt_text2)
342
+ sub_class = response.text
343
+ print("Preprocess Answer",sub_class)
344
+
345
+ print("SUB CLASS",sub_class)
346
+ if sub_class not in sub_categories_list:
347
+ sub_class = category_class(sub_class,sub_categories_list)
348
+ print("POST PROCESSED SUB CLASS",sub_class)
349
+ tier_2_index_no = sub_categories_list.index(sub_class) + 1
350
+ print("ANSWER:",sub_class)
351
+ final_answer = (f"Tier 1 category : IAB{tier_1_index_no} : {main_class}\nTier 2 category : IAB{tier_1_index_no}-{tier_2_index_no} : {sub_class}")
352
+
353
+ first_video = os.path.join(os.path.dirname(__file__), "American_football_heads_to_India_clip.mp4")
354
+ second_video = os.path.join(os.path.dirname(__file__), "PersonalFinance_clip.mp4")
355
+
356
+ # return final_answer, first_video, second_video
357
+ end_time_generation = time.time()
358
+ classification_time = end_time_generation-start_time_generation
359
+ print ("MODEL USED :",self.model_name)
360
+ print("MODEL SETUP TIME :",setup_time)
361
+ print("TIME TAKEN FOR AUDIO CONVERSION (WHISPER) :",audio_time)
362
+ print("TIME TAKEN FOR IMAGE CAPTIONING :", caption_time)
363
+ print("TIME TAKEN FOR CLASS GENERATION :",classification_time)
364
+ print("TOTAL INFERENCE TIME :",audio_time+caption_time+classification_time)
365
+ return final_answer
366
+
367
+
368
+ def save_model_choice(self,model_name):
369
+ global setup_time
370
+ start_time_setup = time.time()
371
+
372
+ self.model_name = model_name
373
+ if self.model_name=='mistral':
374
+ print("Setting up Mistral model for Class Selection")
375
+ self.setup_mistral_space_model()
376
+ else :
377
+ print("Setting up Gemini model for Class Selection")
378
+ self.setup_gemini_model()
379
+ end_time_setup = time.time()
380
+ setup_time=end_time_setup-start_time_setup
381
+ # print("MODEL SETUP TIME",setup_time)
382
+
383
+ return "Model selected: " + model_name
384
+
385
+ def launch_interface(self):
386
+ css_code = """
387
+ .gradio-container {background-color: #FFFFFF;color:#000000;background-size: 200px; background-image:url(https://gitlab.ignitarium.in/saran/logo/-/raw/aab7c77b4816b8a4bbdc5588eb57ce8b6c15c72d/ign_logo_white.png);background-repeat:no-repeat; position:relative; top:1px; left:5px; padding: 50px;text-align: right;background-position: right top;}
388
+ """
389
+ css_code += """
390
+ :root {
391
+ --body-background-fill: #FFFFFF; /* New value */
392
+ }
393
+ """
394
+ css_code += """
395
+ :root {
396
+ --body-background-fill: #000000; /* New value */
397
+ }
398
+ """
399
+
400
+ interface_1 = gr.Interface(
401
+ self.save_model_choice,
402
+ inputs=gr.Dropdown(choices=['gemini', 'mistral'], label="Select Model", info="Default model: Gemini"),
403
+ # outputs=interface_1_output,
404
+ outputs="text"
405
+
406
+ )
407
+
408
+
409
+ demo = gr.Interface(fn=self.classify_video, inputs="playablevideo",allow_flagging='never', examples=[
410
+ os.path.join(os.path.dirname(__file__),
411
+ "American_football_heads_to_India_clip.mp4"),os.path.join(os.path.dirname(__file__), "PersonalFinance_clip.mp4"),
412
+ os.path.join(os.path.dirname(__file__), "Motorcycle_clip.mp4"),
413
+ os.path.join(os.path.dirname(__file__), "Spirituality_1_clip.mp4"),
414
+ os.path.join(os.path.dirname(__file__), "Science_clip.mp4")],
415
+ cache_examples=False, outputs=["text"],
416
+ css=css_code, title="Interactive Advertising Bureau (IAB) compliant Video-Ad classification")
417
+ # demo.launch(debug=True)
418
+
419
+ gr.TabbedInterface([interface_1, demo], ["Model Selection", "Video Classification"]).launch(debug=True)
420
+
421
+ def run_inference(self, video_path,model):
422
+ result = self.classify_video(video_path)
423
+ print(result)
424
+
425
+
426
+ if __name__ == "__main__":
427
+ parser = argparse.ArgumentParser(description='Process some videos.')
428
+ parser.add_argument("video_path", nargs='?', default=None, help="Path to the video file")
429
+ parser.add_argument("-n", "--no_of_frames", type=int, default=3, help="Number of frames for image captioning")
430
+ parser.add_argument("--mode", choices=['interface', 'inference'], default='interface', help="Mode of operation: interface or inference")
431
+ parser.add_argument("--model", choices=['gemini','mistral'],default='gemini',help="Model for inference")
432
+
433
+ args = parser.parse_args()
434
+
435
+ vc = VideoClassifier(no_of_frames=args.no_of_frames, mode=args.mode , model=args.model)
436
+
437
+
438
+ if args.mode == 'interface':
439
+ vc.launch_interface()
440
+ elif args.mode == 'inference' and args.video_path and args.model:
441
+ vc.run_inference(args.video_path,args.model)
442
+ else:
443
+ print("Error: No video path/model provided for inference mode.")
444
+
445
+ # Usage
446
+ ### python main.py --mode interface
447
+ ### python main.py videos/Spirituality_1_clip.mp4 -n 3 --mode inference --model gemini