abrar-adnan commited on
Commit
6d1ef96
1 Parent(s): 8ced839

added output

Browse files
Files changed (1) hide show
  1. optimized.py +9 -4
optimized.py CHANGED
@@ -10,6 +10,7 @@ from fastai.vision.all import load_learner
10
  from transformers import WhisperProcessor, WhisperForConditionalGeneration, pipeline
11
 
12
  emotion_pipeline = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-emotion")
 
13
 
14
  model = load_learner("gaze-recognizer-v3.pkl")
15
 
@@ -17,6 +18,14 @@ def extract_audio(video_path):
17
  clip = mp.VideoFileClip(video_path)
18
  clip.audio.write_audiofile("audio.wav")
19
 
 
 
 
 
 
 
 
 
20
  def get_transcription(path):
21
  extract_audio(path)
22
 
@@ -34,10 +43,6 @@ def get_transcription(path):
34
  transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
35
  return transcription[0]
36
 
37
- def analyze_emotion(text):
38
- result = emotion_pipeline(text)
39
- return result
40
-
41
  def process_frame(frame):
42
  gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
43
  face_locations = face_recognition.face_locations(gray)
 
10
  from transformers import WhisperProcessor, WhisperForConditionalGeneration, pipeline
11
 
12
  emotion_pipeline = pipeline("text-classification", model="cardiffnlp/twitter-roberta-base-emotion")
13
+ sentiment_pipeline = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
14
 
15
  model = load_learner("gaze-recognizer-v3.pkl")
16
 
 
18
  clip = mp.VideoFileClip(video_path)
19
  clip.audio.write_audiofile("audio.wav")
20
 
21
+ def analyze_emotion(text):
22
+ result = emotion_pipeline(text)
23
+ return result
24
+
25
+ def analyze_sentiment(text):
26
+ result = sentiment_pipeline(text)
27
+ return result
28
+
29
  def get_transcription(path):
30
  extract_audio(path)
31
 
 
43
  transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)
44
  return transcription[0]
45
 
 
 
 
 
46
  def process_frame(frame):
47
  gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
48
  face_locations = face_recognition.face_locations(gray)