arjunanand13 commited on
Commit
e5b7605
1 Parent(s): 76a7e86

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +18 -16
app.py CHANGED
@@ -213,39 +213,41 @@ class VideoClassifier:
213
  Return only the answer chosen from list and nothing else
214
  Main-class => '''
215
 
216
- prompt1 = PromptTemplate(template=template1, input_variables=['main_categories', 'transcript', 'captions'])
217
- print("PROMPT 1",prompt1)
 
218
  # print(self.model)
219
  # print(f"Current model in use: {self.model}")
220
  if self.model_name=='mistral':
221
  try:
 
 
222
  print("Entering mistral chain approach")
223
  chain1 = LLMChain(llm=self.llm, prompt=prompt1)
224
  main_class = chain1.predict(main_categories=main_categories, transcript=transcript, captions=captions)
225
  except:
226
- print("Entering mistral template approach")
227
  output_queue = queue.Queue()
228
- # prompt1 = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
229
- # messages = [{"role": "user", "content": prompt1}]
230
- # stream = self.client.chat_completion(messages, max_tokens=100)
231
- # main_class = stream.choices[0].message.content.strip()
 
 
 
 
 
 
232
  classification_thread = threading.Thread(target=get_classification)
233
  classification_thread.start()
234
  classification_thread.join(timeout=30)
 
235
  if classification_thread.is_alive():
236
  print("Classification timeout occurred.")
237
  return "Timeout or error during classification."
238
-
239
  # Get result from queue
240
  main_class = output_queue.get()
241
- print("MAIN CLASS template:", main_class)
242
- return main_class
243
- # output = ""
244
- # for response in stream:
245
- # output += response['token'].text
246
- # print("Streaming output:", output)
247
-
248
- # main_class = output.strip()
249
 
250
  print(main_class)
251
  print("#######################################################")
 
213
  Return only the answer chosen from list and nothing else
214
  Main-class => '''
215
 
216
+ formatted_prompt = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
217
+ print("Formatted prompt:", formatted_prompt)
218
+
219
  # print(self.model)
220
  # print(f"Current model in use: {self.model}")
221
  if self.model_name=='mistral':
222
  try:
223
+ prompt1 = PromptTemplate(template=template1, input_variables=['main_categories', 'transcript', 'captions'])
224
+ print("PROMPT 1",prompt1)
225
  print("Entering mistral chain approach")
226
  chain1 = LLMChain(llm=self.llm, prompt=prompt1)
227
  main_class = chain1.predict(main_categories=main_categories, transcript=transcript, captions=captions)
228
  except:
 
229
  output_queue = queue.Queue()
230
+
231
+ def get_classification():
232
+ messages = [{"role": "user", "content": formatted_prompt}]
233
+ try:
234
+ stream = self.client.chat_completion(messages, max_tokens=100)
235
+ main_class = stream.choices[0].message.content.strip()
236
+ except Exception as ex:
237
+ main_class = f"Error: {str(ex)}"
238
+ output_queue.put(main_class)
239
+
240
  classification_thread = threading.Thread(target=get_classification)
241
  classification_thread.start()
242
  classification_thread.join(timeout=30)
243
+
244
  if classification_thread.is_alive():
245
  print("Classification timeout occurred.")
246
  return "Timeout or error during classification."
247
+
248
  # Get result from queue
249
  main_class = output_queue.get()
250
+ print("MAIN CLASS:", main_class)
 
 
 
 
 
 
 
251
 
252
  print(main_class)
253
  print("#######################################################")