arjunanand13 commited on
Commit
8d630c8
1 Parent(s): 3ae5523

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +437 -0
app.py ADDED
@@ -0,0 +1,437 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import whisper
4
+ import cv2
5
+ import json
6
+ import tempfile
7
+ import torch
8
+ import transformers
9
+ import re
10
+ import time
11
+ from torch import cuda, bfloat16
12
+ from moviepy.editor import VideoFileClip
13
+ from image_caption import Caption
14
+ from pathlib import Path
15
+ from langchain import PromptTemplate
16
+ from langchain import LLMChain
17
+ from langchain.llms import HuggingFacePipeline
18
+ from difflib import SequenceMatcher
19
+ import argparse
20
+ import shutil
21
+ from PIL import Image
22
+ import google.generativeai as genai
23
+ from huggingface_hub import InferenceClient
24
+ from threading import Thread
25
+ from typing import Iterator
26
+
27
+ class VideoClassifier:
28
+ global audio_time , setup_time , caption_time , classification_time
29
+ audio_time = 0
30
+ setup_time = 0
31
+ caption_time = 0
32
+ classification_time = 0
33
+ def __init__(self, no_of_frames, mode='interface',model='gemini'):
34
+
35
+ self.no_of_frames = no_of_frames
36
+ self.mode = mode
37
+ self.model_name = model.strip().lower()
38
+ print(self.model_name)
39
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
40
+ if self.model_name=='mistral':
41
+ print("Setting up Mistral model for Class Selection")
42
+ self.setup_mistral_model()
43
+ else :
44
+ print("Setting up Gemini model for Class Selection")
45
+ self.setup_gemini_model()
46
+ self.setup_paths()
47
+ self.hf_key = os.environ.get("HF_KEY", None)
48
+ # self.whisper_model = whisper.load_model("base")
49
+
50
+ def setup_paths(self):
51
+ self.path = './results'
52
+ if os.path.exists(self.path):
53
+ shutil.rmtree(self.path)
54
+ os.mkdir(self.path)
55
+
56
+ def setup_gemini_model(self):
57
+ self.genai = genai
58
+ self.genai.configure(api_key="AIzaSyAFG94rVbm9eWepO5uPGsMha8XJ-sHbMdA")
59
+ self.genai_model = genai.GenerativeModel('gemini-pro')
60
+ self.whisper_model = whisper.load_model("base")
61
+ self.img_cap = Caption()
62
+
63
+ def setup_mistral_space_model(self):
64
+ # if not self.hf_key:
65
+ # raise ValueError("Hugging Face API key is not set or invalid.")
66
+
67
+ # self.client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2")
68
+ self.client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.1")
69
+ # self.client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct")
70
+ self.whisper_model = whisper.load_model("base")
71
+ self.img_cap = Caption()
72
+
73
+
74
+ def setup_mistral_model(self):
75
+ self.model_id = "mistralai/Mistral-7B-Instruct-v0.2"
76
+ self.device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
77
+ # self.device_name = torch.cuda.get_device_name()
78
+ # print(f"Using device: {self.device} ({self.device_name})")
79
+ bnb_config = transformers.BitsAndBytesConfig(
80
+ load_in_4bit=True,
81
+ bnb_4bit_quant_type='nf4',
82
+ bnb_4bit_use_double_quant=True,
83
+ bnb_4bit_compute_dtype=bfloat16,
84
+ )
85
+ hf_auth = self.hf_key
86
+ print(hf_auth)
87
+ model_config = transformers.AutoConfig.from_pretrained(
88
+ self.model_id,
89
+ # use_auth_token=hf_auth
90
+ )
91
+ self.model = transformers.AutoModelForCausalLM.from_pretrained(
92
+ self.model_id,
93
+ trust_remote_code=True,
94
+ config=model_config,
95
+ quantization_config=bnb_config,
96
+ # use_auth_token=hf_auth
97
+ )
98
+ self.model.eval()
99
+ self.tokenizer = transformers.AutoTokenizer.from_pretrained(
100
+ self.model_id,
101
+ # use_auth_token=hf_auth
102
+ )
103
+ self.generate_text = transformers.pipeline(
104
+ model=self.model, tokenizer=self.tokenizer,
105
+ return_full_text=True,
106
+ task='text-generation',
107
+ temperature=0.01,
108
+ max_new_tokens=32
109
+ )
110
+ self.whisper_model = whisper.load_model("base")
111
+ self.img_cap = Caption()
112
+ self.llm = HuggingFacePipeline(pipeline=self.generate_text)
113
+
114
+ def audio_extraction(self,video_input):
115
+ global audio_time
116
+ start_time_audio = time.time()
117
+ print(f"Processing video: {video_input} with {self.no_of_frames} frames.")
118
+ mp4_file = video_input
119
+ video_name = mp4_file.split("/")[-1]
120
+ wav_file = "results/audiotrack.wav"
121
+ video_clip = VideoFileClip(mp4_file)
122
+ audioclip = video_clip.audio
123
+ wav_file = audioclip.write_audiofile(wav_file)
124
+ audioclip.close()
125
+ video_clip.close()
126
+ audiotrack = "results/audiotrack.wav"
127
+ result = self.whisper_model.transcribe(audiotrack, fp16=False)
128
+ transcript = result["text"]
129
+ print("TRANSCRIPT",transcript)
130
+ end_time_audio = time.time()
131
+ audio_time=end_time_audio-start_time_audio
132
+ # print("TIME TAKEN FOR AUDIO CONVERSION (WHISPER)",audio_time)
133
+
134
+ return transcript
135
+
136
+ def generate_text(self, inputs, parameters=None):
137
+ if parameters is None:
138
+ parameters = {
139
+ "temperature": 0.7,
140
+ "max_new_tokens": 50,
141
+ "top_p": 0.9,
142
+ "repetition_penalty": 1.2
143
+ }
144
+
145
+ return self.client(inputs, parameters)
146
+
147
+ def classify_video(self,video_input):
148
+ global classification_time , caption_time
149
+ transcript=self.audio_extraction(video_input)
150
+ start_time_caption = time.time()
151
+ video = cv2.VideoCapture(video_input)
152
+ length = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
153
+ no_of_frame = int(self.no_of_frames)
154
+ temp_div = length // no_of_frame
155
+ currentframe = 50
156
+ caption_text = []
157
+
158
+ for i in range(no_of_frame):
159
+ video.set(cv2.CAP_PROP_POS_FRAMES, currentframe)
160
+ ret, frame = video.read()
161
+ if ret:
162
+ frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
163
+ image = Image.fromarray(frame)
164
+ content = self.img_cap.predict_image_caption_gemini(image)
165
+ print("content", content)
166
+ caption_text.append(content)
167
+ currentframe += temp_div - 1
168
+ else:
169
+ break
170
+
171
+ captions = ", ".join(caption_text)
172
+ print("CAPTIONS", captions)
173
+ video.release()
174
+ cv2.destroyAllWindows()
175
+ end_time_caption = time.time()
176
+ caption_time=end_time_caption-start_time_caption
177
+ # print("TIME TAKEN FOR IMAGE CAPTIONING", end_time_caption-start_time_caption)
178
+
179
+ start_time_generation = time.time()
180
+ main_categories = Path("main_classes.txt").read_text()
181
+ main_categories_list = ['Automotive', 'Books and Literature', 'Business and Finance', 'Careers', 'Education','Family and Relationships',
182
+ 'Fine Art', 'Food & Drink', 'Healthy Living', 'Hobbies & Interests', 'Home & Garden','Medical Health', 'Movies', 'Music and Audio',
183
+ 'News and Politics', 'Personal Finance', 'Pets', 'Pop Culture','Real Estate', 'Religion & Spirituality', 'Science', 'Shopping', 'Sports',
184
+ 'Style & Fashion','Technology & Computing', 'Television', 'Travel', 'Video Gaming']
185
+
186
+ generate_kwargs = {
187
+ "temperature": 0.9,
188
+ "max_new_tokens": 256,
189
+ "top_p": 0.95,
190
+ "repetition_penalty": 1.0,
191
+ "do_sample": True,
192
+ "seed": 42,
193
+ "return_full_text": False
194
+ }
195
+
196
+ def get_classification():
197
+ messages = [{"role": "user", "content": prompt1}]
198
+ try:
199
+ stream = self.client.chat_completion(messages, max_tokens=100)
200
+ main_class = stream.choices[0].message.content.strip()
201
+ except Exception as e:
202
+ main_class = f"Error: {str(e)}"
203
+ output_queue.put(main_class)
204
+
205
+ template1 = '''Given below are the different type of main video classes
206
+ {main_categories}
207
+ You are a text classifier that catergorises the transcript and captions into one main class whose context match with one main class and only generate main class name no need of sub classe or explanation.
208
+ Give more importance to Transcript while classifying .
209
+ Transcript: {transcript}
210
+ Captions: {captions}
211
+ Return only the answer chosen from list and nothing else
212
+ Main-class => '''
213
+
214
+ prompt1 = PromptTemplate(template=template1, input_variables=['main_categories', 'transcript', 'captions'])
215
+ print("PROMPT 1",prompt1)
216
+ # print(self.model)
217
+ # print(f"Current model in use: {self.model}")
218
+ if self.model_name=='mistral':
219
+ try:
220
+ print("Entering mistral chain approach")
221
+ chain1 = LLMChain(llm=self.llm, prompt=prompt1)
222
+ main_class = chain1.predict(main_categories=main_categories, transcript=transcript, captions=captions)
223
+ except:
224
+ print("Entering mistral template approach")
225
+ output_queue = queue.Queue()
226
+ # prompt1 = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
227
+ # messages = [{"role": "user", "content": prompt1}]
228
+ # stream = self.client.chat_completion(messages, max_tokens=100)
229
+ # main_class = stream.choices[0].message.content.strip()
230
+ classification_thread = threading.Thread(target=get_classification)
231
+ classification_thread.start()
232
+ classification_thread.join(timeout=30)
233
+ if classification_thread.is_alive():
234
+ print("Classification timeout occurred.")
235
+ return "Timeout or error during classification."
236
+
237
+ # Get result from queue
238
+ main_class = output_queue.get()
239
+ print("MAIN CLASS template:", main_class)
240
+ return main_class
241
+ # output = ""
242
+ # for response in stream:
243
+ # output += response['token'].text
244
+ # print("Streaming output:", output)
245
+
246
+ # main_class = output.strip()
247
+
248
+ print(main_class)
249
+ print("#######################################################")
250
+ try:
251
+ pattern = r"Main-class =>\s*(.+)"
252
+ match = re.search(pattern, main_class)
253
+ if match:
254
+ main_class = match.group(1).strip()
255
+ except:
256
+ main_class=main_class
257
+ else:
258
+ prompt_text = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
259
+ response = self.genai_model.generate_content(contents=prompt_text)
260
+ main_class = response.text
261
+
262
+ print(main_class)
263
+ print("#######################################################")
264
+ print("MAIN CLASS: ",main_class)
265
+ def category_class(class_name,categories_list):
266
+ def similar(str1, str2):
267
+ return SequenceMatcher(None, str1, str2).ratio()
268
+ index_no = 0
269
+ sim = 0
270
+ for sub in categories_list:
271
+ res = similar(class_name, sub)
272
+ if res>sim:
273
+ sim = res
274
+ index_no = categories_list.index(sub)
275
+ class_name = categories_list[index_no]
276
+ return class_name
277
+
278
+ if main_class not in main_categories_list:
279
+ main_class = category_class(main_class,main_categories_list)
280
+ print("POST PROCESSED MAIN CLASS : ",main_class)
281
+ tier_1_index_no = main_categories_list.index(main_class) + 1
282
+
283
+ with open('categories_json.txt') as f:
284
+ data = json.load(f)
285
+ sub_categories_list = data[main_class]
286
+ print("SUB CATEGORIES LIST",sub_categories_list)
287
+ with open("sub_categories.txt", "w") as f:
288
+ no = 1
289
+
290
+ # print(data[main_class])
291
+ for i in data[main_class]:
292
+ f.write(str(no)+')'+str(i) + '\n')
293
+ no = no+1
294
+ sub_categories = Path("sub_categories.txt").read_text()
295
+
296
+ template2 = '''Given below are the sub classes of {main_class}.
297
+ {sub_categories}
298
+ You are a text classifier that catergorises the transcript and captions into one sub class whose context match with one sub class and only generate sub class name, Don't give explanation .
299
+ Give more importance to Transcript while classifying .
300
+ Transcript: {transcript}
301
+ Captions: {captions}
302
+ Return only the Sub-class answer chosen from list and nothing else
303
+ Answer in the format:
304
+ Main-class => {main_class}
305
+ Sub-class =>
306
+ '''
307
+
308
+ prompt2 = PromptTemplate(template=template2, input_variables=['sub_categories', 'transcript', 'captions','main_class'])
309
+
310
+ if self.model_name=='mistral':
311
+ try:
312
+ chain2 = LLMChain(llm=self.llm, prompt=prompt2)
313
+ sub_class = chain2.predict(sub_categories=sub_categories, transcript=transcript, captions=captions,main_class=main_class)
314
+ except:
315
+ prompt2 = template2.format(sub_categories=sub_categories, transcript=transcript, captions=captions,main_class=main_class)
316
+ messages = [{"role": "user", "content": prompt2}]
317
+ stream = self.client.chat_completion(messages, max_tokens=100)
318
+ sub_class = stream.choices[0].message.content.strip()
319
+
320
+ print("Preprocess Answer",sub_class)
321
+
322
+ try:
323
+ pattern = r"Sub-class =>\s*(.+)"
324
+ match = re.search(pattern, sub_class)
325
+ if match:
326
+ sub_class = match.group(1).strip()
327
+ except:
328
+ subclass=sub_class
329
+ else:
330
+ prompt_text2 = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
331
+ response = self.genai_model.generate_content(contents=prompt_text2)
332
+ sub_class = response.text
333
+ print("Preprocess Answer",sub_class)
334
+
335
+ print("SUB CLASS",sub_class)
336
+ if sub_class not in sub_categories_list:
337
+ sub_class = category_class(sub_class,sub_categories_list)
338
+ print("POST PROCESSED SUB CLASS",sub_class)
339
+ tier_2_index_no = sub_categories_list.index(sub_class) + 1
340
+ print("ANSWER:",sub_class)
341
+ final_answer = (f"Tier 1 category : IAB{tier_1_index_no} : {main_class}\nTier 2 category : IAB{tier_1_index_no}-{tier_2_index_no} : {sub_class}")
342
+
343
+ first_video = os.path.join(os.path.dirname(__file__), "American_football_heads_to_India_clip.mp4")
344
+ second_video = os.path.join(os.path.dirname(__file__), "PersonalFinance_clip.mp4")
345
+
346
+ # return final_answer, first_video, second_video
347
+ end_time_generation = time.time()
348
+ classification_time = end_time_generation-start_time_generation
349
+ print ("MODEL USED :",self.model_name)
350
+ print("MODEL SETUP TIME :",setup_time)
351
+ print("TIME TAKEN FOR AUDIO CONVERSION (WHISPER) :",audio_time)
352
+ print("TIME TAKEN FOR IMAGE CAPTIONING :", caption_time)
353
+ print("TIME TAKEN FOR CLASS GENERATION :",classification_time)
354
+ print("TOTAL INFERENCE TIME :",audio_time+caption_time+classification_time)
355
+ return final_answer
356
+
357
+
358
+ def save_model_choice(self,model_name):
359
+ global setup_time
360
+ start_time_setup = time.time()
361
+
362
+ self.model_name = model_name
363
+ if self.model_name=='mistral':
364
+ print("Setting up Mistral model for Class Selection")
365
+ self.setup_mistral_space_model()
366
+ else :
367
+ print("Setting up Gemini model for Class Selection")
368
+ self.setup_gemini_model()
369
+ end_time_setup = time.time()
370
+ setup_time=end_time_setup-start_time_setup
371
+ # print("MODEL SETUP TIME",setup_time)
372
+
373
+ return "Model selected: " + model_name
374
+
375
+ def launch_interface(self):
376
+ css_code = """
377
+ .gradio-container {background-color: #FFFFFF;color:#000000;background-size: 200px; background-image:url(https://gitlab.ignitarium.in/saran/logo/-/raw/aab7c77b4816b8a4bbdc5588eb57ce8b6c15c72d/ign_logo_white.png);background-repeat:no-repeat; position:relative; top:1px; left:5px; padding: 50px;text-align: right;background-position: right top;}
378
+ """
379
+ css_code += """
380
+ :root {
381
+ --body-background-fill: #FFFFFF; /* New value */
382
+ }
383
+ """
384
+ css_code += """
385
+ :root {
386
+ --body-background-fill: #000000; /* New value */
387
+ }
388
+ """
389
+
390
+ interface_1 = gr.Interface(
391
+ self.save_model_choice,
392
+ inputs=gr.Dropdown(choices=['gemini', 'mistral'], label="Select Model", info="Default model: Gemini"),
393
+ # outputs=interface_1_output,
394
+ outputs="text"
395
+
396
+ )
397
+
398
+
399
+ demo = gr.Interface(fn=self.classify_video, inputs="playablevideo",allow_flagging='never', examples=[
400
+ os.path.join(os.path.dirname(__file__),
401
+ "American_football_heads_to_India_clip.mp4"),os.path.join(os.path.dirname(__file__), "PersonalFinance_clip.mp4"),
402
+ os.path.join(os.path.dirname(__file__), "Motorcycle_clip.mp4"),
403
+ os.path.join(os.path.dirname(__file__), "Spirituality_1_clip.mp4"),
404
+ os.path.join(os.path.dirname(__file__), "Science_clip.mp4")],
405
+ cache_examples=False, outputs=["text"],
406
+ css=css_code, title="Interactive Advertising Bureau (IAB) compliant Video-Ad classification")
407
+ # demo.launch(debug=True)
408
+
409
+ gr.TabbedInterface([interface_1, demo], ["Model Selection", "Video Classification"]).launch(debug=True)
410
+
411
+ def run_inference(self, video_path,model):
412
+ result = self.classify_video(video_path)
413
+ print(result)
414
+
415
+
416
+ if __name__ == "__main__":
417
+ parser = argparse.ArgumentParser(description='Process some videos.')
418
+ parser.add_argument("video_path", nargs='?', default=None, help="Path to the video file")
419
+ parser.add_argument("-n", "--no_of_frames", type=int, default=3, help="Number of frames for image captioning")
420
+ parser.add_argument("--mode", choices=['interface', 'inference'], default='interface', help="Mode of operation: interface or inference")
421
+ parser.add_argument("--model", choices=['gemini','mistral'],default='gemini',help="Model for inference")
422
+
423
+ args = parser.parse_args()
424
+
425
+ vc = VideoClassifier(no_of_frames=args.no_of_frames, mode=args.mode , model=args.model)
426
+
427
+
428
+ if args.mode == 'interface':
429
+ vc.launch_interface()
430
+ elif args.mode == 'inference' and args.video_path and args.model:
431
+ vc.run_inference(args.video_path,args.model)
432
+ else:
433
+ print("Error: No video path/model provided for inference mode.")
434
+
435
+ # Usage
436
+ ### python main.py --mode interface
437
+ ### python main.py videos/Spirituality_1_clip.mp4 -n 3 --mode inference --model gemini