|
import gradio as gr |
|
|
|
from PIL import Image |
|
import os |
|
|
|
import openai |
|
|
|
|
|
|
|
from share_btn import community_icon_html, loading_icon_html, share_js |
|
|
|
token = os.environ.get('HF_TOKEN') |
|
whisper = gr.Interface.load(name="spaces/sanchit-gandhi/whisper-large-v2") |
|
tts = gr.Interface.load(name="spaces/Flux9665/IMS-Toucan") |
|
talking_face = gr.Blocks.load(name="spaces/fffiloni/one-shot-talking-face", api_key=token) |
|
|
|
def infer(audio, openai_api_key): |
|
|
|
whisper_result = whisper(audio, None, "translate", fn_index=0) |
|
|
|
gpt_response = try_api(whisper_result, openai_api_key) |
|
|
|
audio_response = tts(gpt_response[0], "English Text", "English Accent", "English Speaker's Voice", fn_index=0) |
|
|
|
portrait_link = talking_face("wise_woman_portrait.png", audio_response, fn_index=0) |
|
|
|
return gr.Textbox.update(value=whisper_result, visible=True), portrait_link, gr.Textbox.update(value=gpt_response[1], visible=True), gr.Group.update(visible=True), gr.Button.update(visible=True) |
|
|
|
def try_api(message, openai_api_key): |
|
|
|
try: |
|
response = call_api(message, openai_api_key) |
|
return response, "<span class='openai_clear'>no error</span>" |
|
except openai.error.Timeout as e: |
|
|
|
print(f"OpenAI API request timed out: {e}") |
|
return "oups", f"<span class='openai_error'>OpenAI API request timed out: <br />{e}</span>" |
|
except openai.error.APIError as e: |
|
|
|
print(f"OpenAI API returned an API Error: {e}") |
|
return "oups", f"<span class='openai_error'>OpenAI API returned an API Error: <br />{e}</span>" |
|
except openai.error.APIConnectionError as e: |
|
|
|
print(f"OpenAI API request failed to connect: {e}") |
|
return "oups", f"<span class='openai_error'>OpenAI API request failed to connect: <br />{e}</span>" |
|
except openai.error.InvalidRequestError as e: |
|
|
|
print(f"OpenAI API request was invalid: {e}") |
|
return "oups", f"<span class='openai_error'>OpenAI API request was invalid: <br />{e}</span>" |
|
except openai.error.AuthenticationError as e: |
|
|
|
print(f"OpenAI API request was not authorized: {e}") |
|
return "oups", f"<span class='openai_error'>OpenAI API request was not authorized: <br />{e}</span>" |
|
except openai.error.PermissionError as e: |
|
|
|
print(f"OpenAI API request was not permitted: {e}") |
|
return "oups", f"<span class='openai_error'>OpenAI API request was not permitted: <br />{e}</span>" |
|
except openai.error.RateLimitError as e: |
|
|
|
print(f"OpenAI API request exceeded rate limit: {e}") |
|
return "oups", f"<span class='openai_error'>OpenAI API request exceeded rate limit: <br />{e}</span>" |
|
|
|
def call_api(message, openai_api_key): |
|
|
|
print("starting open ai") |
|
augmented_prompt = message + prevent_code_gen |
|
openai.api_key = openai_api_key |
|
|
|
response = openai.Completion.create( |
|
model="text-davinci-003", |
|
prompt=augmented_prompt, |
|
temperature=0.5, |
|
max_tokens=2048, |
|
top_p=1, |
|
frequency_penalty=0, |
|
presence_penalty=0.6 |
|
) |
|
|
|
print(response) |
|
|
|
|
|
return str(response.choices[0].text) |
|
|
|
def clean_components(): |
|
return gr.Audio.update(value=None), gr.HTML.update(visible=False), gr.Textbox.update(visible=False), gr.Video.update(value=None), gr.Group.update(visible=False), gr.Button.update(visible=False) |
|
|
|
title = """ |
|
<div style="text-align: center; max-width: 500px; margin: 0 auto;"> |
|
<div |
|
style=" |
|
display: inline-flex; |
|
align-items: center; |
|
gap: 0.8rem; |
|
font-size: 1.75rem; |
|
margin-bottom: 10px; |
|
" |
|
> |
|
<h1 style="font-weight: 600; margin-bottom: 7px;"> |
|
GPT Talking Portrait |
|
</h1> |
|
</div> |
|
<p style="margin-bottom: 10px;font-size: 94%;font-weight: 100;line-height: 1.5em;"> |
|
Use Whisper to ask, alive portrait responds ! |
|
</p> |
|
</div> |
|
""" |
|
|
|
article = """ |
|
<div class="footer"> |
|
<p> |
|
Whisper & chatGPT by <a href="https://openai.com/" target="_blank">OpenAI</a> - |
|
Follow π€ <a href="https://twitter.com/fffiloni" target="_blank">Sylvain Filoni</a> for demo updates |
|
</p> |
|
</div> |
|
|
|
<div id="may-like-container" style="display: flex;justify-content: center;flex-direction: column;align-items: center;"> |
|
<p style="font-size: 0.8em;margin-bottom: 4px;">You may also like: </p> |
|
<div id="may-like" style="display:flex; align-items:center; justify-content: center;height:20px;"> |
|
<svg height="20" width="198" style="margin-left:4px"> |
|
<a href="https://huggingface.co/spaces/camenduru/one-shot-talking-face" target="_blank"> |
|
<image href="https://img.shields.io/badge/π€ Spaces-One Shot Talking Face-blue" src="https://img.shields.io/badge/π€ Spaces-One Shot Talking Face-blue.png" height="20"/> |
|
</a> |
|
</svg> |
|
<svg height="20" width="106" style="margin-left:4px"> |
|
<a href="https://huggingface.co/spaces/Pie31415/rome" target="_blank"> |
|
<image href="https://img.shields.io/badge/π€ Spaces-ROME-blue" src="https://img.shields.io/badge/π€ Spaces-ROME-blue.png" height="20"/> |
|
</a> |
|
</svg> |
|
</div> |
|
</div> |
|
""" |
|
|
|
prevent_code_gen = """ |
|
If i am asking for code generation, do not provide me with code. Instead, give me a summury of good hints about how i could do what i asked, but shortly. |
|
If i am not asking for code generation, do as usual. |
|
""" |
|
with gr.Blocks(css="style.css") as demo: |
|
|
|
with gr.Column(elem_id="col-container"): |
|
|
|
gr.HTML(title) |
|
|
|
gpt_response = gr.Video(label="Talking Portrait response", elem_id="video_out") |
|
whisper_tr = gr.Textbox(label="whisper english translation", elem_id="text_inp", visible=False) |
|
|
|
with gr.Row(elem_id="secondary-buttons"): |
|
clean_btn = gr.Button(value="Clean", elem_id="clean-btn", visible=False) |
|
with gr.Group(elem_id="share-btn-container", visible=False) as share_group: |
|
community_icon = gr.HTML(community_icon_html) |
|
loading_icon = gr.HTML(loading_icon_html) |
|
share_button = gr.Button("Share to community", elem_id="share-btn") |
|
|
|
error_handler = gr.HTML(visible=False, show_label=False, elem_id="error_handler") |
|
|
|
with gr.Column(elem_id="col-container-2"): |
|
with gr.Column(): |
|
with gr.Row(): |
|
record_input = gr.Audio(source="microphone",type="filepath", label="Audio input", show_label=True, elem_id="record_btn") |
|
openai_api_key = gr.Textbox(max_lines=1, type="password", label="π Your OpenAI API Key", placeholder="sk-123abc...") |
|
|
|
send_btn = gr.Button("Send my request !") |
|
|
|
gr.HTML(article) |
|
|
|
clean_btn.click(clean_components, scroll_to_output=True, inputs=[], outputs=[record_input, error_handler, whisper_tr, gpt_response, share_group, clean_btn]) |
|
send_btn.click(infer, inputs=[record_input, openai_api_key], outputs=[whisper_tr, gpt_response, error_handler, share_group, clean_btn]) |
|
share_button.click(None, [], [], _js=share_js) |
|
|
|
demo.queue(max_size=32, concurrency_count=20).launch(debug=True) |
|
|
|
|
|
|