dipesh1701
commited on
Commit
•
9df7a04
1
Parent(s):
ebb242c
fix
Browse files
app.py
CHANGED
@@ -1,31 +1,19 @@
|
|
1 |
# !pip install -q gradio
|
2 |
-
# !pip install -q pyChatGPT
|
3 |
# !pip install -q git+https://github.com/openai/whisper.git
|
4 |
# !pip install -q --upgrade git+https://github.com/huggingface/diffusers.git transformers accelerate scipy
|
5 |
-
|
6 |
-
|
|
|
|
|
7 |
import gradio as gr
|
8 |
import time
|
9 |
import warnings
|
10 |
|
11 |
-
from transformers import WhisperProcessor, WhisperForConditionalGeneration
|
12 |
-
|
13 |
-
# load model and processor
|
14 |
-
processor = WhisperProcessor.from_pretrained("openai/whisper-large")
|
15 |
-
model = WhisperForConditionalGeneration.from_pretrained("openai/whisper-large")
|
16 |
-
|
17 |
warnings.filterwarnings("ignore")
|
18 |
|
19 |
-
|
20 |
-
from fairseq.checkpoint_utils import load_model_ensemble_and_task_from_hf_hub
|
21 |
-
|
22 |
-
models, cfg, task = load_model_ensemble_and_task_from_hf_hub(
|
23 |
-
"facebook/fastspeech2-en-ljspeech"
|
24 |
-
)
|
25 |
-
|
26 |
secret_token = ""
|
27 |
|
28 |
-
|
29 |
|
30 |
from diffusers import DiffusionPipeline
|
31 |
|
@@ -44,22 +32,22 @@ pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1")
|
|
44 |
def transcribe(audio):
|
45 |
|
46 |
# load audio and pad/trim it to fit 30 seconds
|
47 |
-
audio =
|
48 |
-
audio =
|
49 |
|
50 |
# make log-Mel spectrogram and move to the same device as the model
|
51 |
-
mel =
|
52 |
|
53 |
# detect the spoken language
|
54 |
_, probs = model.detect_language(mel)
|
55 |
|
56 |
# decode the audio
|
57 |
-
options =
|
58 |
-
result =
|
59 |
result_text = result.text
|
60 |
|
61 |
# Pass the generated text to Audio
|
62 |
-
chatgpt_api =
|
63 |
resp = chatgpt_api.send_message(result_text)
|
64 |
out_result = resp['message']
|
65 |
|
|
|
1 |
# !pip install -q gradio
|
|
|
2 |
# !pip install -q git+https://github.com/openai/whisper.git
|
3 |
# !pip install -q --upgrade git+https://github.com/huggingface/diffusers.git transformers accelerate scipy
|
4 |
+
import os
|
5 |
+
os.system("pip install -q pyChatGPT")
|
6 |
+
os.system("pip install git+https://github.com/openai/whisper.git")
|
7 |
+
import whisper
|
8 |
import gradio as gr
|
9 |
import time
|
10 |
import warnings
|
11 |
|
|
|
|
|
|
|
|
|
|
|
|
|
12 |
warnings.filterwarnings("ignore")
|
13 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
secret_token = ""
|
15 |
|
16 |
+
model = whisper.load_model("base")
|
17 |
|
18 |
from diffusers import DiffusionPipeline
|
19 |
|
|
|
32 |
def transcribe(audio):
|
33 |
|
34 |
# load audio and pad/trim it to fit 30 seconds
|
35 |
+
audio = whisper.load_audio(audio)
|
36 |
+
audio = whisper.pad_or_trim(audio)
|
37 |
|
38 |
# make log-Mel spectrogram and move to the same device as the model
|
39 |
+
mel = whisper.log_mel_spectrogram(audio).to(model.device)
|
40 |
|
41 |
# detect the spoken language
|
42 |
_, probs = model.detect_language(mel)
|
43 |
|
44 |
# decode the audio
|
45 |
+
options = whisper.DecodingOptions()
|
46 |
+
result = whisper.decode(model, mel, options)
|
47 |
result_text = result.text
|
48 |
|
49 |
# Pass the generated text to Audio
|
50 |
+
chatgpt_api = ChatGPT(secret_token)
|
51 |
resp = chatgpt_api.send_message(result_text)
|
52 |
out_result = resp['message']
|
53 |
|