Spaces:
Running
Running
import streamlit as st | |
import os | |
import requests | |
import io | |
from PIL import Image | |
from IPython.display import Audio, display | |
from freeGPT import Client | |
from gradio_client import Client | |
api_token = os.environ.get("API_TOKEN") | |
API_URL = "https://api-inference.huggingface.co/models/facebook/musicgen-small" | |
API_URL_IMG = "https://api-inference.huggingface.co/models/Salesforce/blip-image-captioning-large" | |
headers = {"Authorization": f"Bearer {api_token}"} | |
def get_speech(text, voice): | |
client = Client("https://collabora-whisperspeech.hf.space/") | |
result = client.predict( | |
text, # str in 'Enter multilingual textπ¬π' Textbox component | |
voice, # filepath in 'Upload or Record Speaker Audio (optional)π¬οΈπ¬' Audio component | |
"", # str in 'alternatively, you can paste in an audio file URL:' Textbox component | |
14, # float (numeric value between 10 and 15) in 'Tempo (in characters per second)' Slider component | |
api_name="/whisper_speech_demo" | |
) | |
print(result) | |
return result | |
def get_dreamtalk(image_in, speech): | |
client = Client("https://fffiloni-dreamtalk.hf.space/") | |
result = client.predict( | |
speech, # filepath in 'Audio input' Audio component | |
image_in, # filepath in 'Image' Image component | |
"M030_front_neutral_level1_001.mat", # Literal['M030_front_angry_level3_001.mat', 'M030_front_contempt_level3_001.mat', 'M030_front_disgusted_level3_001.mat', 'M030_front_fear_level3_001.mat', 'M030_front_happy_level3_001.mat', 'M030_front_neutral_level1_001.mat', 'M030_front_sad_level3_001.mat', 'M030_front_surprised_level3_001.mat', 'W009_front_angry_level3_001.mat', 'W009_front_contempt_level3_001.mat', 'W009_front_disgusted_level3_001.mat', 'W009_front_fear_level3_001.mat', 'W009_front_happy_level3_001.mat', 'W009_front_neutral_level1_001.mat', 'W009_front_sad_level3_001.mat', 'W009_front_surprised_level3_001.mat', 'W011_front_angry_level3_001.mat', 'W011_front_contempt_level3_001.mat', 'W011_front_disgusted_level3_001.mat', 'W011_front_fear_level3_001.mat', 'W011_front_happy_level3_001.mat', 'W011_front_neutral_level1_001.mat', 'W011_front_sad_level3_001.mat', 'W011_front_surprised_level3_001.mat'] in 'emotional style' Dropdown component | |
api_name="/infer" | |
) | |
print(result) | |
return result['video'] | |
st.sidebar.title("β¨ Your AI Girl π") | |
st.sidebar.write("We will make a perfect talking girlfriend for you, just upload photo") | |
img_prompt = st.sidebar.file_uploader("Upload Image of your girl", type=["jpeg", "jpg", "png"]) | |
gender = st.sidebar.selectbox( | |
'What will be your partner gender?', | |
('Female', 'Male') | |
) | |
sumbit_btn = st.sidebar.button("β¨ Create partner") | |
voice = None | |
female_voice = "female.mp3" | |
male_voice = "male.mp3" | |
if gender == 'Female': | |
voice = female_voice | |
else: | |
voice = male_voice | |
if sumbit_btn: | |
messages = st.container(height=300) | |
input_user = st.chat_input(placeholder='Enter message for your partner...') | |
answer = Client.create_completion("gpt3", f"Answer on this question as beautiful nice kind {gender}: " + input_user) | |
if input_user: | |
output_audio = get_speech(input_user, answer) | |
output_video = get_dreamtalk(img_prompt, output_audio) | |
with messages.message("user"): | |
st.write(input_user) | |
with messages.message("Partner", avatar=img_prompt): | |
st.video(output_video) |