Julian Arango commited on
Commit
6509135
·
unverified ·
1 Parent(s): 2ce5d1e

Create models.py

Browse files
Files changed (1) hide show
  1. models.py +26 -0
models.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import the necessary libraries
2
+ from transformers import pipeline
3
+
4
+ # Initialize the text classification model with a pre-trained model
5
+ model_text_emotion = pipeline("text-classification", model="j-hartmann/emotion-english-distilroberta-base")
6
+
7
+ # Initialize the audio classification model with a pre-trained SER model
8
+ model_speech_emotion = pipeline("audio-classification", model="aherzberg/ser_model_fixed_label")
9
+
10
+ # Initialize the automatic speech recognition model with a pre-trained model that is capable of converting speech to text
11
+ model_voice2text = pipeline("automatic-speech-recognition", model="openai/whisper-tiny.en")
12
+
13
+ # A function that uses the initialized text classification model to predict the emotion of a given text input
14
+ def infere_text_emotion(text):
15
+ return model_text_emotion(text)[0]["label"].capitalize()
16
+
17
+ # A function that uses the initialized audio classification model to predict the emotion of a given speech input
18
+ def infere_speech_emotion(text):
19
+ # Dict that maps the speech model emotions with the text's ones
20
+ emotions_dict = {"angry": "Anger", "disgust": "Disgust", "fear": "Fear", "happy": "Joy", "neutral": "Neutral", "sad": "Sadness"}
21
+ inference = model_speech_emotion(text)[0]["label"]
22
+ return emotions_dict[inference]
23
+
24
+ # A function that uses the initialized automatic speech recognition model to convert speech (as an audio file) to text
25
+ def infere_voice2text(audio_file):
26
+ return model_voice2text(audio_file)["text"]