arjunanand13 commited on
Commit
22545c2
1 Parent(s): 0d7d50f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -183,8 +183,8 @@ class VideoClassifier:
183
  chain1 = LLMChain(llm=self.llm, prompt=prompt1)
184
  main_class = chain1.predict(main_categories=main_categories, transcript=transcript, captions=captions)
185
  except:
186
- response = self.client(text=prompt1)
187
- main_class = response['generated_text']
188
 
189
  print(main_class)
190
  print("#######################################################")
@@ -252,8 +252,10 @@ class VideoClassifier:
252
  chain2 = LLMChain(llm=self.llm, prompt=prompt2)
253
  answer = chain2.predict(sub_categories=sub_categories, transcript=transcript, captions=captions,main_class=main_class)
254
  except:
255
- response = self.client(text=prompt2)
256
- answer = response['generated_text']
 
 
257
  print("Preprocess Answer",answer)
258
 
259
 
@@ -338,7 +340,7 @@ class VideoClassifier:
338
  if __name__ == "__main__":
339
  parser = argparse.ArgumentParser(description='Process some videos.')
340
  parser.add_argument("video_path", nargs='?', default=None, help="Path to the video file")
341
- parser.add_argument("-n", "--no_of_frames", type=int, default=8, help="Number of frames for image captioning")
342
  parser.add_argument("--mode", choices=['interface', 'inference'], default='interface', help="Mode of operation: interface or inference")
343
  parser.add_argument("--model", choices=['gemini','mistral'],default='gemini',help="Model for inference")
344
 
 
183
  chain1 = LLMChain(llm=self.llm, prompt=prompt1)
184
  main_class = chain1.predict(main_categories=main_categories, transcript=transcript, captions=captions)
185
  except:
186
+ response = self.client.call_api(task="text-generation", inputs={"inputs": prompt1})
187
+ main_class = response[0]['generated_text']
188
 
189
  print(main_class)
190
  print("#######################################################")
 
252
  chain2 = LLMChain(llm=self.llm, prompt=prompt2)
253
  answer = chain2.predict(sub_categories=sub_categories, transcript=transcript, captions=captions,main_class=main_class)
254
  except:
255
+ # response = self.client(text=prompt2)
256
+ # answer = response['generated_text']
257
+ response = self.client.call_api(task="text-generation", inputs={"inputs": prompt2})
258
+ main_class = response[0]['generated_text']
259
  print("Preprocess Answer",answer)
260
 
261
 
 
340
  if __name__ == "__main__":
341
  parser = argparse.ArgumentParser(description='Process some videos.')
342
  parser.add_argument("video_path", nargs='?', default=None, help="Path to the video file")
343
+ parser.add_argument("-n", "--no_of_frames", type=int, default=3, help="Number of frames for image captioning")
344
  parser.add_argument("--mode", choices=['interface', 'inference'], default='interface', help="Mode of operation: interface or inference")
345
  parser.add_argument("--model", choices=['gemini','mistral'],default='gemini',help="Model for inference")
346