File size: 4,503 Bytes
218b27d
5edee8f
004f352
5edee8f
004f352
 
 
 
218b27d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
004f352
a65e425
218b27d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4befcc4
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
# import whisper
import gradio as gr
import datetime

import subprocess
import wave
import contextlib

# import torch
# import pyannote.audio
# from pyannote.audio.pipelines.speaker_verification import PretrainedSpeakerEmbedding
# from pyannote.audio import Audio
# from pyannote.core import Segment
# from sklearn.cluster import AgglomerativeClustering
# import numpy as np

# model = whisper.load_model("large-v2")
# embedding_model = PretrainedSpeakerEmbedding( 
#     "speechbrain/spkrec-ecapa-voxceleb",
#     device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# )

# def transcribe(audio, num_speakers):
#   path, error = convert_to_wav(audio)
#   if error is not None:
#     return error

#   duration = get_duration(path)
#   if duration > 4 * 60 * 60:
#     return "Audio duration too long"

#   result = model.transcribe(path)
#   segments = result["segments"]

#   num_speakers = min(max(round(num_speakers), 1), len(segments))
#   if len(segments) == 1:
#     segments[0]['speaker'] = 'SPEAKER 1'
#   else:
#     embeddings = make_embeddings(path, segments, duration)
#     add_speaker_labels(segments, embeddings, num_speakers)
#   output = get_output(segments)
#   return output

# def convert_to_wav(path):
#   if path[-3:] != 'wav':
#     new_path = '.'.join(path.split('.')[:-1]) + '.wav'
#     try:
#       subprocess.call(['ffmpeg', '-i', path, new_path, '-y'])
#     except:
#       return path, 'Error: Could not convert file to .wav'
#     path = new_path
#   return path, None

# def get_duration(path):
#   with contextlib.closing(wave.open(path,'r')) as f:
#     frames = f.getnframes()
#     rate = f.getframerate()
#     return frames / float(rate)

# def make_embeddings(path, segments, duration):
#   embeddings = np.zeros(shape=(len(segments), 192))
#   for i, segment in enumerate(segments):
#     embeddings[i] = segment_embedding(path, segment, duration)
#   return np.nan_to_num(embeddings)

# audio = Audio()

# def segment_embedding(path, segment, duration):
#   start = segment["start"]
#   # Whisper overshoots the end timestamp in the last segment
#   end = min(duration, segment["end"])
#   clip = Segment(start, end)
#   waveform, sample_rate = audio.crop(path, clip)
#   return embedding_model(waveform[None])

# def add_speaker_labels(segments, embeddings, num_speakers):
#   clustering = AgglomerativeClustering(num_speakers).fit(embeddings)
#   labels = clustering.labels_
#   for i in range(len(segments)):
#     segments[i]["speaker"] = 'SPEAKER ' + str(labels[i] + 1)

# def time(secs):
#   return datetime.timedelta(seconds=round(secs))

# def get_output(segments):
#   output = ''
#   for (i, segment) in enumerate(segments):
#     if i == 0 or segments[i - 1]["speaker"] != segment["speaker"]:
#       if i != 0:
#         output += '\n\n'
#       output += segment["speaker"] + ' ' + str(time(segment["start"])) + '\n\n'
#     output += segment["text"][1:] + ' '
#   return output

s = ""

def greet1(name):
    global s
    s = "modified"
    return "Hello " + name + "!"


def greet2(name):
    return "Hi " + name + "!" + " " + s


def greet3(name):
    return "Hola " + name + "!"

with gr.Blocks() as demo:
  with gr.Row():
    with gr.Column():
      audio_file = gr.UploadButton(label="Upload a Audio file (.wav)")
      # name = gr.Textbox(label="Name", placeholder="Name") # TODO: remove
      number_of_speakers = gr.Number(label="Number of Speakers", value=2)
      with gr.Row():
        btn_clear = gr.Button(value="Clear")
        btn_submit = gr.Button(value="Submit")
    with gr.Column():
      title = gr.Textbox(label="Title", placeholder="Title for Conversation")
      short_summary = gr.Textbox(label="Short Summary", placeholder="Short Summary for Conversation")
      sentiment_analysis = gr.Textbox(label="Sentiment Analysis", placeholder="Sentiment Analysis for Conversation")
      quality = gr.Textbox(label="Quality of Conversation", placeholder="Quality of Conversation")
      detailed_summary = gr.Textbox(label="Detailed Summary", placeholder="Detailed Summary for Conversation")
  gr.Markdown("## Examples")
  gr.Examples( 
    examples=[
      [
        "Harsh",
        2,
      ],
      [
        "Rahul",
        2,
      ],
    ],
    inputs=[title],
    outputs=[short_summary],
    fn=greet1,
    cache_examples=True,
  )
  gr.Markdown(
    """
    See [github.com/facebookresearch/audiocraft](https://github.com/facebookresearch/audiocraft)
    for more details.
    """
  )

demo.launch()