import gradio as gr import numpy as np from audioldm import text_to_audio, build_model from share_btn import community_icon_html, loading_icon_html, share_js model_id="haoheliu/AudioLDM-S-Full" audioldm = build_model() # audioldm=None # def predict(input, history=[]): # # tokenize the new input sentence # new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt') # # append the new user input tokens to the chat history # bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1) # # generate a response # history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist() # # convert the tokens to text, and then split the responses into lines # response = tokenizer.decode(history[0]).split("<|endoftext|>") # response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list # return response, history def text2audio(text, duration, guidance_scale, random_seed, n_candidates): # print(text, length, guidance_scale) waveform = text_to_audio(audioldm, text, random_seed, duration=duration, guidance_scale=guidance_scale, n_candidate_gen_per_text=int(n_candidates)) # [bs, 1, samples] waveform = [gr.make_waveform((16000, wave[0]), bg_image="bg.png") for wave in waveform] # waveform = [(16000, np.random.randn(16000)), (16000, np.random.randn(16000))] if(len(waveform) == 1): waveform = waveform[0] return waveform # iface = gr.Interface(fn=text2audio, inputs=[ # gr.Textbox(value="A man is speaking in a huge room", max_lines=1), # gr.Slider(2.5, 10, value=5, step=2.5), # gr.Slider(0, 5, value=2.5, step=0.5), # gr.Number(value=42) # ], outputs=[gr.Audio(label="Output", type="numpy"), gr.Audio(label="Output", type="numpy")], # allow_flagging="never" # ) # iface.launch(share=True) css = """ a { color: inherit; text-decoration: underline; } .gradio-container { font-family: 'IBM Plex Sans', sans-serif; } .gr-button { color: white; border-color: #000000; background: #000000; } input[type='range'] { accent-color: #000000; } .dark input[type='range'] { accent-color: #dfdfdf; } .container { max-width: 730px; margin: auto; padding-top: 1.5rem; } #gallery { min-height: 22rem; margin-bottom: 15px; margin-left: auto; margin-right: auto; border-bottom-right-radius: .5rem !important; border-bottom-left-radius: .5rem !important; } #gallery>div>.h-full { min-height: 20rem; } .details:hover { text-decoration: underline; } .gr-button { white-space: nowrap; } .gr-button:focus { border-color: rgb(147 197 253 / var(--tw-border-opacity)); outline: none; box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000); --tw-border-opacity: 1; --tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color); --tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(3px var(--tw-ring-offset-width)) var(--tw-ring-color); --tw-ring-color: rgb(191 219 254 / var(--tw-ring-opacity)); --tw-ring-opacity: .5; } #advanced-btn { font-size: .7rem !important; line-height: 19px; margin-top: 12px; margin-bottom: 12px; padding: 2px 8px; border-radius: 14px !important; } #advanced-options { margin-bottom: 20px; } .footer { margin-bottom: 45px; margin-top: 35px; text-align: center; border-bottom: 1px solid #e5e5e5; } .footer>p { font-size: .8rem; display: inline-block; padding: 0 10px; transform: translateY(10px); background: white; } .dark .footer { border-color: #303030; } .dark .footer>p { background: #0b0f19; } .acknowledgments h4{ margin: 1.25em 0 .25em 0; font-weight: bold; font-size: 115%; } #container-advanced-btns{ display: flex; flex-wrap: wrap; justify-content: space-between; align-items: center; } .animate-spin { animation: spin 1s linear infinite; } @keyframes spin { from { transform: rotate(0deg); } to { transform: rotate(360deg); } } #share-btn-container { display: flex; padding-left: 0.5rem !important; padding-right: 0.5rem !important; background-color: #000000; justify-content: center; align-items: center; border-radius: 9999px !important; width: 13rem; margin-top: 10px; margin-left: auto; } #share-btn { all: initial; color: #ffffff;font-weight: 600; cursor:pointer; font-family: 'IBM Plex Sans', sans-serif; margin-left: 0.5rem !important; padding-top: 0.25rem !important; padding-bottom: 0.25rem !important;right:0; } #share-btn * { all: unset; } #share-btn-container div:nth-child(-n+2){ width: auto !important; min-height: 0px !important; } #share-btn-container .wrap { display: none !important; } .gr-form{ flex: 1 1 50%; border-top-right-radius: 0; border-bottom-right-radius: 0; } #prompt-container{ gap: 0; } #generated_id{ min-height: 700px } #setting_id{ margin-bottom: 12px; text-align: center; font-weight: 900; } """ iface = gr.Blocks(css=css) with iface: gr.HTML( """
For faster inference without waiting in queue, you may duplicate the space and upgrade to GPU in settings.
Essential Tricks for Enhancing the Quality of Your Generated Audio
1. Try to use more adjectives to describe your sound. For example: "A man is speaking clearly and slowly in a large room" is better than "A man is speaking". This can make sure AudioLDM understands what you want.
2. Try to use different random seeds, which can affect the generation quality significantly sometimes.
3. It's better to use general terms like 'man' or 'woman' instead of specific names for individuals or abstract objects that humans may not be familiar with, such as 'mummy'.
We build the model with data from AudioSet, Freesound and BBC Sound Effect library. We share this demo based on the UK copyright exception of data for academic research.
This demo is strictly for research demo purpose only. For commercial use please contact us.
iface.queue(concurrency_count=3) iface.launch(debug=True) # iface.launch(debug=True, share=True)