arjunanand13 commited on
Commit
f271e1e
1 Parent(s): 4c167ca

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -6
app.py CHANGED
@@ -20,6 +20,8 @@ import argparse
20
  import shutil
21
  from PIL import Image
22
  import google.generativeai as genai
 
 
23
 
24
  class VideoClassifier:
25
  def __init__(self, no_of_frames, mode='interface',model='gemini'):
@@ -50,6 +52,16 @@ class VideoClassifier:
50
  self.whisper_model = whisper.load_model("base")
51
  self.img_cap = Caption()
52
 
 
 
 
 
 
 
 
 
 
 
53
  def setup_mistral_model(self):
54
  self.model_id = "mistralai/Mistral-7B-Instruct-v0.2"
55
  self.device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
@@ -106,7 +118,17 @@ class VideoClassifier:
106
  print("TRANSCRIPT",transcript)
107
  return transcript
108
 
109
-
 
 
 
 
 
 
 
 
 
 
110
  def classify_video(self,video_input):
111
 
112
  transcript=self.audio_extraction(video_input)
@@ -157,8 +179,13 @@ class VideoClassifier:
157
  # print(self.model)
158
  # print(f"Current model in use: {self.model}")
159
  if self.model_name=='mistral':
160
- chain1 = LLMChain(llm=self.llm, prompt=prompt1)
161
- main_class = chain1.predict(main_categories=main_categories, transcript=transcript, captions=captions)
 
 
 
 
 
162
  print(main_class)
163
  print("#######################################################")
164
  pattern = r"Main-class =>\s*(.+)"
@@ -221,8 +248,12 @@ class VideoClassifier:
221
  prompt2 = PromptTemplate(template=template2, input_variables=['sub_categories', 'transcript', 'captions','main_class'])
222
 
223
  if self.model_name=='mistral':
224
- chain2 = LLMChain(llm=self.llm, prompt=prompt2)
225
- answer = chain2.predict(sub_categories=sub_categories, transcript=transcript, captions=captions,main_class=main_class)
 
 
 
 
226
  print("Preprocess Answer",answer)
227
 
228
 
@@ -257,7 +288,7 @@ class VideoClassifier:
257
  self.model_name = model_name
258
  if self.model_name=='mistral':
259
  print("Setting up Mistral model for Class Selection")
260
- self.setup_mistral_model()
261
  else :
262
  print("Setting up Gemini model for Class Selection")
263
  self.setup_gemini_model()
 
20
  import shutil
21
  from PIL import Image
22
  import google.generativeai as genai
23
+ from huggingface_hub import InferenceClient
24
+
25
 
26
  class VideoClassifier:
27
  def __init__(self, no_of_frames, mode='interface',model='gemini'):
 
52
  self.whisper_model = whisper.load_model("base")
53
  self.img_cap = Caption()
54
 
55
+ def setup_mistral_space_model(self):
56
+ if not self.hf_key:
57
+ raise ValueError("Hugging Face API key is not set or invalid.")
58
+
59
+ self.client = InferenceClient("mistralai/Mistral-7B-Instruct-v0.2", api_token=self.hf_key)
60
+
61
+ self.whisper_model = whisper.load_model("base")
62
+ self.img_cap = Caption()
63
+
64
+
65
  def setup_mistral_model(self):
66
  self.model_id = "mistralai/Mistral-7B-Instruct-v0.2"
67
  self.device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
 
118
  print("TRANSCRIPT",transcript)
119
  return transcript
120
 
121
+ def generate_text(self, inputs, parameters=None):
122
+ if parameters is None:
123
+ parameters = {
124
+ "temperature": 0.7,
125
+ "max_new_tokens": 50,
126
+ "top_p": 0.9,
127
+ "repetition_penalty": 1.2
128
+ }
129
+
130
+ return self.client(inputs, parameters)
131
+
132
  def classify_video(self,video_input):
133
 
134
  transcript=self.audio_extraction(video_input)
 
179
  # print(self.model)
180
  # print(f"Current model in use: {self.model}")
181
  if self.model_name=='mistral':
182
+ try:
183
+ chain1 = LLMChain(llm=self.llm, prompt=prompt1)
184
+ main_class = chain1.predict(main_categories=main_categories, transcript=transcript, captions=captions)
185
+ except:
186
+ response = self.client(text=prompt1)
187
+ main_class = response['generated_text']
188
+
189
  print(main_class)
190
  print("#######################################################")
191
  pattern = r"Main-class =>\s*(.+)"
 
248
  prompt2 = PromptTemplate(template=template2, input_variables=['sub_categories', 'transcript', 'captions','main_class'])
249
 
250
  if self.model_name=='mistral':
251
+ try:
252
+ chain2 = LLMChain(llm=self.llm, prompt=prompt2)
253
+ answer = chain2.predict(sub_categories=sub_categories, transcript=transcript, captions=captions,main_class=main_class)
254
+ except:
255
+ response = self.client(text=prompt2)
256
+ answer = response['generated_text']
257
  print("Preprocess Answer",answer)
258
 
259
 
 
288
  self.model_name = model_name
289
  if self.model_name=='mistral':
290
  print("Setting up Mistral model for Class Selection")
291
+ self.setup_mistral_space_model()
292
  else :
293
  print("Setting up Gemini model for Class Selection")
294
  self.setup_gemini_model()