arjunanand13 commited on
Commit
1b728eb
1 Parent(s): 7de3e83

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -15
app.py CHANGED
@@ -192,14 +192,14 @@ class VideoClassifier:
192
  chain1 = LLMChain(llm=self.llm, prompt=prompt1)
193
  main_class = chain1.predict(main_categories=main_categories, transcript=transcript, captions=captions)
194
  except:
195
- prompt_text = template1.format(main_categories=main_categories, transcript=transcript, captions=captions)
196
- stream = self.client.text_generation(prompt_text, **generate_kwargs, stream=True, details=True)
197
- output = ""
198
- for response in stream:
199
- output += response['token'].text
200
- print("Streaming output:", output)
201
 
202
- main_class = output.strip()
203
 
204
  print(main_class)
205
  print("#######################################################")
@@ -267,13 +267,9 @@ class VideoClassifier:
267
  chain2 = LLMChain(llm=self.llm, prompt=prompt2)
268
  answer = chain2.predict(sub_categories=sub_categories, transcript=transcript, captions=captions,main_class=main_class)
269
  except:
270
- stream = self.client.text_generation(prompt2, **generate_kwargs, stream=True, details=True)
271
- output = ""
272
- for response in stream:
273
- output += response['token'].text
274
- print("Streaming output:", output)
275
-
276
- main_class = output.strip()
277
  print("Preprocess Answer",answer)
278
 
279
 
@@ -308,7 +304,7 @@ class VideoClassifier:
308
  self.model_name = model_name
309
  if self.model_name=='mistral':
310
  print("Setting up Mistral model for Class Selection")
311
- self.setup_mistral_model()
312
  else :
313
  print("Setting up Gemini model for Class Selection")
314
  self.setup_gemini_model()
 
192
  chain1 = LLMChain(llm=self.llm, prompt=prompt1)
193
  main_class = chain1.predict(main_categories=main_categories, transcript=transcript, captions=captions)
194
  except:
195
+ messages = [{"role": "user", "content": prompt1}]
196
+ main_class = self.client.chat_completion(messages, max_tokens=100)
197
+ # output = ""
198
+ # for response in stream:
199
+ # output += response['token'].text
200
+ # print("Streaming output:", output)
201
 
202
+ # main_class = output.strip()
203
 
204
  print(main_class)
205
  print("#######################################################")
 
267
  chain2 = LLMChain(llm=self.llm, prompt=prompt2)
268
  answer = chain2.predict(sub_categories=sub_categories, transcript=transcript, captions=captions,main_class=main_class)
269
  except:
270
+ messages = [{"role": "user", "content": prompt1}]
271
+ answer = self.client.chat_completion(messages, max_tokens=100)
272
+
 
 
 
 
273
  print("Preprocess Answer",answer)
274
 
275
 
 
304
  self.model_name = model_name
305
  if self.model_name=='mistral':
306
  print("Setting up Mistral model for Class Selection")
307
+ self.setup_mistral_space_model()
308
  else :
309
  print("Setting up Gemini model for Class Selection")
310
  self.setup_gemini_model()