arabic2ChatGPT / app.py
EmadSalem's picture
Update app.py
aaa1050
raw
history blame
11.8 kB
import numpy as np
import tempfile
import gradio as gr
from neon_tts_plugin_coqui import CoquiTTS
LANGUAGES = list(CoquiTTS.langs.keys())
default_lang = "en"
# ChatGPT
from pyChatGPT import ChatGPT
import whisper
whisper_model = whisper.load_model("small")
import os
session_token = os.environ.get('SessionToken')
title="بسم الله الرحمن الرحيم"
css = """
.gradio-container {
font-family: 'IBM Plex Sans', sans-serif;
}
.gr-button {
color: white;
border-color: black;
background: black;
}
input[type='range'] {
accent-color: black;
}
.dark input[type='range'] {
accent-color: #dfdfdf;
}
.container {
max-width: 730px;
margin: auto;
padding-top: 1.5rem;
}
#gallery {
min-height: 22rem;
margin-bottom: 15px;
margin-left: auto;
margin-right: auto;
border-bottom-right-radius: .5rem !important;
border-bottom-left-radius: .5rem !important;
}
#gallery>div>.h-full {
min-height: 20rem;
}
.details:hover {
text-decoration: underline;
}
.gr-button {
white-space: nowrap;
}
.gr-button:focus {
border-color: rgb(147 197 253 / var(--tw-border-opacity));
outline: none;
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
--tw-border-opacity: 1;
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color);
--tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity));
--tw-ring-opacity: .5;
}
.footer {
margin-bottom: 45px;
margin-top: 35px;
text-align: center;
border-bottom: 1px solid #e5e5e5;
}
.footer>p {
font-size: .8rem;
display: inline-block;
padding: 0 10px;
transform: translateY(10px);
background: white;
}
.dark .footer {
border-color: #303030;
}
.dark .footer>p {
background: #0b0f19;
}
.prompt h4{
margin: 1.25em 0 .25em 0;
font-weight: bold;
font-size: 115%;
}
"""
block = gr.Blocks(css=css)
examples = [
[
'戴着眼镜的猫',
'油画(Oil painting)'
]
]
with block:
gr.HTML(
"""
<div style="text-align: center; max-width: 650px; margin: 0 auto;">
<div
style="
display: inline-flex;
gap: 0.8rem;
font-size: 1.75rem;
margin-bottom: 10px;
margin-left: 220px;
justify-content: center;
"
>
<a href="https://github.com/PaddlePaddle/PaddleHub"><img src="https://user-images.githubusercontent.com/22424850/187387422-f6c9ccab-7fda-416e-a24d-7d6084c46f67.jpg" alt="Paddlehub" width="40%"></a>
</div>
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
margin-bottom: 10px;
justify-content: center;
">
<a href="https://github.com/PaddlePaddle/PaddleHub"><h1 style="font-weight: 900; margin-bottom: 7px;">
ERNIE-ViLG Demo
</h1></a>
</div>
<p style="margin-bottom: 10px; font-size: 94%">
ERNIE-ViLG 2.0 is a state-of-the-art text-to-image model that generates
images from Chinese text.
</p>
<a href="https://github.com/PaddlePaddle/PaddleHub"><img src="https://user-images.githubusercontent.com/22424850/188184795-98605a22-9af2-4106-827b-e58548f8892f.png" alt="star Paddlehub" width="100%"></a>
</div>
"""
)
description="ERNIE-ViLG model, which supports text-to-image task."
examples = [
[
'戴着眼镜的猫',
'油画(Oil painting)'
],
[
'A cat with glasses',
'油画(Oil painting)'
],
[
'眼鏡をかけた猫',
'油画(Oil painting)'
],
[
'안경을 쓴 고양이',
'油画(Oil painting)'
],
[
'日落时的城市天际线,史前遗迹风格',
'油画(Oil painting)'
],
[
'一只猫坐在椅子上,戴着一副墨镜, low poly 风格',
'卡通(Cartoon)'
],
[
'A cat sitting on a chair, wearing a pair of sunglasses, low poly style',
'油画(Oil painting)'
],
[
'猫が椅子に座ってサングラスをかけている、low polyスタイル',
'油画(Oil painting)'
],
[
'고양이 한 마리가 의자에 앉아 선글라스를 끼고 low poly 스타일을 하고 있다',
'油画(Oil painting)'
],
[
'一只猫坐在椅子上,戴着一副墨镜,秋天风格',
'探索无限(Explore infinity)'
],
[
'蒙娜丽莎,赛博朋克,宝丽来,33毫米,蒸汽波艺术',
'探索无限(Explore infinity)'
],
[
'一只猫坐在椅子上,戴着一副墨镜,海盗风格',
'探索无限(Explore infinity)'
],
[
'一条由闪电制成的令人敬畏的龙,概念艺术',
'探索无限(Explore infinity)'
],
[
'An awesome dragon made of lightning, conceptual art',
'油画(Oil painting)'
],
[
'稲妻で作られた畏敬の念を抱かせる竜、コンセプトアート',
'油画(Oil painting)'
],
[
'번개로 만든 경외스러운 용, 개념 예술',
'油画(Oil painting)'
],
[
'梵高猫头鹰,蒸汽波艺术',
'探索无限(Explore infinity)'
],
[
'萨尔瓦多·达利描绘古代文明的超现实主义梦幻油画,写实风格',
'探索无限(Explore infinity)'
],
[
'夕阳日落时,阳光落在云层上,海面波涛汹涌,风景,胶片感',
'探索无限(Explore infinity)'
],
[
'Sunset, the sun falls on the clouds, the sea is rough, the scenery is filmy',
'油画(Oil painting)'
],
[
'夕日が沈むと、雲の上に太陽の光が落ち、海面は波が荒く、風景、フィルム感',
'油画(Oil painting)'
],
[
'석양이 질 때 햇빛이 구름 위에 떨어지고, 해수면의 파도가 용솟음치며, 풍경, 필름감',
'油画(Oil painting)'
],
]
with block:
gr.HTML(
"""
<div style="text-align: center; max-width: 650px; margin: 0 auto;">
<div
style="
display: inline-flex;
gap: 0.8rem;
font-size: 1.75rem;
margin-bottom: 10px;
margin-left: 220px;
justify-content: center;
"
>
<a href="https://github.com/PaddlePaddle/PaddleHub"><img src="https://user-images.githubusercontent.com/22424850/187387422-f6c9ccab-7fda-416e-a24d-7d6084c46f67.jpg" alt="Paddlehub" width="40%"></a>
</div>
<div
style="
display: inline-flex;
align-items: center;
gap: 0.8rem;
font-size: 1.75rem;
margin-bottom: 10px;
justify-content: center;
">
<a href="https://github.com/PaddlePaddle/PaddleHub"><h1 style="font-weight: 900; margin-bottom: 7px;">
ERNIE-ViLG Demo
</h1></a>
</div>
<p style="margin-bottom: 10px; font-size: 94%">
ERNIE-ViLG 2.0 is a state-of-the-art text-to-image model that generates
images from Chinese text.
</p>
<a href="https://github.com/PaddlePaddle/PaddleHub"><img src="https://user-images.githubusercontent.com/22424850/188184795-98605a22-9af2-4106-827b-e58548f8892f.png" alt="star Paddlehub" width="100%"></a>
</div>
"""
)
#info = "more info at [Neon Coqui TTS Plugin](https://github.com/NeonGeckoCom/neon-tts-plugin-coqui), [Coqui TTS](https://github.com/coqui-ai/TTS)"
#badge = "https://visitor-badge-reloaded.herokuapp.com/badge?page_id=neongeckocom.neon-tts-plugin-coqui"
#coquiTTS = CoquiTTS()
# ChatGPT
def chat_hf(audio, custom_token, language):
try:
whisper_text = translate(audio)
api = ChatGPT(session_token)
resp = api.send_message(whisper_text)
api.refresh_auth() # refresh the authorization token
api.reset_conversation() # reset the conversation
gpt_response = resp['message']
except:
whisper_text = translate(audio)
api = ChatGPT(custom_token)
resp = api.send_message(whisper_text)
api.refresh_auth() # refresh the authorization token
api.reset_conversation() # reset the conversation
gpt_response = resp['message']
# to voice
with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as fp:
coquiTTS.get_tts(gpt_response, fp, speaker = {"language" : language})
return whisper_text, gpt_response, fp.name
# whisper
def translate(audio):
print("""
Sending audio to Whisper ...
""")
audio = whisper.load_audio(audio)
audio = whisper.pad_or_trim(audio)
mel = whisper.log_mel_spectrogram(audio).to(whisper_model.device)
_, probs = whisper_model.detect_language(mel)
transcript_options = whisper.DecodingOptions(task="transcribe", fp16 = False)
transcription = whisper.decode(whisper_model, mel, transcript_options)
print("language spoken: " + transcription.language)
print("transcript: " + transcription.text)
print("———————————————————————————————————————————")
return transcription.text
with gr.Blocks() as blocks:
gr.Markdown("<h1 style='text-align: center; margin-bottom: 1rem'>"
+ title
+ "</h1>")
#gr.Markdown(description)
with gr.Row(equal_height=True):# equal_height=False
with gr.Column():# variant="panel"
audio_file = gr.inputs.Audio(source="microphone", type="filepath")
custom_token = gr.Textbox(label='If it fails, use your own session token', placeholder="your own session token")
with gr.Row():# mobile_collapse=False
submit = gr.Button("Submit", variant="primary")
with gr.Column():
text1 = gr.Textbox(label="Speech to Text")
text2 = gr.Textbox(label="chatGPT response")
audio = gr.Audio(label="Output", interactive=False)
#gr.Markdown(info)
#gr.Markdown("<center>"
# +f'<img src={badge} alt="visitors badge"/>'
# +"</center>")
# actions
submit.click(
chat_hf,
[audio_file, custom_token],
[text1, text2, audio],
)
#radio.change(lambda lang: CoquiTTS.langs[lang]["sentence"], radio, text2)
blocks.launch(debug=True)