import gradio as gr from gradio_client import Client import os import tempfile # Crea il client Gradio per l'inferenza inference_client = Client("http://127.0.0.1:6969/") # Cartella di output in assets output_folder = "assets/generated" os.makedirs(output_folder, exist_ok=True) def process_audio(audio_file): if audio_file is None: return None, "Nessun file audio caricato." # Ottieni il percorso temporaneo del file caricato input_file = audio_file.name try: # Prepara il percorso di output output_filename = f"generated_{os.path.basename(input_file)}" output_path = os.path.join(output_folder, output_filename) # Esegui la predizione result = inference_client.predict( -24, # Pece 0, # Raggio del filtro 0, # Rapporto feature di ricerca 0, # Inviluppo del volume 0, # Proteggi le consonanti sorde 1, # Lunghezza del luppolo "pm", # Algoritmo di estrazione del passo input_file, # Percorso del file audio di input output_path, # Percorso del file audio di output "logs/master/master.pth", # Modello vocale "logs/master/added_IVF124_Flat_nprobe_1_master-v2_v2.index", # File di indice True, # Dividere l'audio True, # Sintonizzazione automatica True, # Audio pulito 0, # Forza pulita "WAV", # Export Format api_name="/run_infer_script" ) return output_path, f"Elaborazione completata. File salvato in {output_path}" except Exception as e: return None, f"Errore durante l'elaborazione: {str(e)}" # Creazione dell'interfaccia Gradio iface = gr.Interface( fn=process_audio, inputs=gr.Audio(type="filepath", label="Carica file audio (WAV o MP3)"), outputs=[ gr.Audio(type="filepath", label="Audio elaborato"), gr.Textbox(label="Messaggio") ], title="Elaborazione Audio con Applio", description="Carica un file audio WAV o MP3 per elaborarlo con Applio." ) # Avvio dell'interfaccia iface.launch() # import gradio as gr # import sys # import os # import logging # now_dir = os.getcwd() # sys.path.append(now_dir) # # Tabs # from tabs.inference.inference import inference_tab # from tabs.train.train import train_tab # from tabs.extra.extra import extra_tab # from tabs.report.report import report_tab # from tabs.download.download import download_tab # from tabs.tts.tts import tts_tab # from tabs.voice_blender.voice_blender import voice_blender_tab # from tabs.settings.presence import presence_tab, load_config_presence # from tabs.settings.flask_server import flask_server_tab # from tabs.settings.fake_gpu import fake_gpu_tab, gpu_available, load_fake_gpu # from tabs.settings.themes import theme_tab # from tabs.plugins.plugins import plugins_tab # from tabs.settings.version import version_tab # from tabs.settings.lang import lang_tab # from tabs.settings.restart import restart_tab # # Assets # import assets.themes.loadThemes as loadThemes # from assets.i18n.i18n import I18nAuto # import assets.installation_checker as installation_checker # from assets.discord_presence import RPCManager # from assets.flask.server import start_flask, load_config_flask # from core import run_prerequisites_script # run_prerequisites_script("False", "True", "True", "True") # i18n = I18nAuto() # if load_config_presence() == True: # RPCManager.start_presence() # installation_checker.check_installation() # logging.getLogger("uvicorn").disabled = True # logging.getLogger("fairseq").disabled = True # if load_config_flask() == True: # print("Starting Flask server") # start_flask() # my_applio = loadThemes.load_json() # if my_applio: # pass # else: # my_applio = "ParityError/Interstellar" # with gr.Blocks(theme=my_applio, title="Applio") as Applio: # gr.Markdown("# Applio") # gr.Markdown( # i18n( # "Ultimate voice cloning tool, meticulously optimized for unrivaled power, modularity, and user-friendly experience." # ) # ) # gr.Markdown( # i18n( # "[Support](https://discord.gg/IAHispano) — [Discord Bot](https://discord.com/oauth2/authorize?client_id=1144714449563955302&permissions=1376674695271&scope=bot%20applications.commands) — [Find Voices](https://applio.org/models) — [GitHub](https://github.com/IAHispano/Applio)" # ) # ) # with gr.Tab(i18n("Inference")): # inference_tab() # with gr.Tab(i18n("Train")): # if gpu_available() or load_fake_gpu(): # train_tab() # else: # gr.Markdown( # i18n( # "Currently, training is unsupported due to the absence of a GPU. If you have a PC with a GPU and wish to train a model, please refer to our installation guide here: [Applio Installation Guide](https://docs.applio.org/get-started/installation/). For those without a GPU-enabled PC, explore alternative options here: [Applio Alternatives](https://docs.applio.org/get-started/alternatives/)." # ) # ) # with gr.Tab(i18n("TTS")): # tts_tab() # # with gr.Tab(i18n("Voice Blender")): # # voice_blender_tab() # # with gr.Tab(i18n("Plugins")): # # plugins_tab() # with gr.Tab(i18n("Download")): # download_tab() # with gr.Tab(i18n("Report a Bug")): # report_tab() # with gr.Tab(i18n("Extra")): # extra_tab() # # with gr.Tab(i18n("Settings")): # # presence_tab() # # flask_server_tab() # # if not gpu_available(): # # fake_gpu_tab() # # theme_tab() # # version_tab() # # lang_tab() # # restart_tab() # def launch_gradio(): # Applio.launch() # if __name__ == "__main__": # launch_gradio()