enoreyes commited on
Commit
c39fcf1
1 Parent(s): 09a916e

Update utils.py

Browse files
Files changed (1) hide show
  1. utils.py +0 -57
utils.py CHANGED
@@ -84,63 +84,6 @@ def create_fig(x_min, x_max, to_plot, plot_sentences):
84
 
85
  return fig
86
 
87
- def sentiment(diarized, emotion_pipeline):
88
- """
89
- diarized: a list of tuples. Each tuple has a string to be displayed and a label for highlighting.
90
- The start/end times are not highlighted [(speaker text, speaker id), (start time/end time, None)]
91
-
92
- This function gets the customer's sentiment and returns a list for highlighted text as well
93
- as a plot of sentiment over time.
94
- """
95
-
96
- customer_sentiments = []
97
- plot_sentences = []
98
- to_plot = []
99
-
100
- # used to set the x range of ticks on the plot
101
- x_min = 100
102
- x_max = 0
103
-
104
- for i in range(0, len(diarized), 2):
105
- speaker_speech, speaker_id = diarized[i]
106
- times, _ = diarized[i + 1]
107
-
108
- sentences = split_into_sentences(speaker_speech)
109
- start_time, end_time = times[5:].split("-")
110
- start_time, end_time = float(start_time), float(end_time)
111
- interval_size = (end_time - start_time) / len(sentences)
112
-
113
- if "Customer" in speaker_id:
114
-
115
- outputs = emotion_pipeline(sentences)
116
-
117
- for idx, (o, t) in enumerate(zip(outputs, sentences)):
118
- sent = "neutral"
119
- if o["score"] > thresholds[o["label"]]:
120
- customer_sentiments.append(
121
- (t + f"({round(idx*interval_size+start_time,1)} s)", o["label"])
122
- )
123
- if o["label"] in {"joy", "love", "surprise"}:
124
- sent = "positive"
125
- elif o["label"] in {"sadness", "anger", "fear"}:
126
- sent = "negative"
127
- if sent != "neutral":
128
- to_plot.append((start_time + idx * interval_size, sent))
129
- plot_sentences.append(t)
130
-
131
- if start_time < x_min:
132
- x_min = start_time
133
- if end_time > x_max:
134
- x_max = end_time
135
-
136
- x_min -= 5
137
- x_max += 5
138
-
139
- fig = create_fig(x_min, x_max, to_plot, plot_sentences)
140
-
141
- return customer_sentiments, fig
142
-
143
-
144
  def speech_to_text(speech_file, speaker_segmentation, whisper, alignment_model, metadata, whisper_device):
145
  speaker_output = speaker_segmentation(speech_file)
146
  result = whisper.transcribe(speech_file)
 
84
 
85
  return fig
86
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  def speech_to_text(speech_file, speaker_segmentation, whisper, alignment_model, metadata, whisper_device):
88
  speaker_output = speaker_segmentation(speech_file)
89
  result = whisper.transcribe(speech_file)