|
|
import gradio as gr |
|
|
from transformers import pipeline |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
|
Ah, the spire tremblesโa rift in the weave... |
|
|
(This stanza is now preserved safely as a docstring.) |
|
|
""" |
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
from audiocraft.models import musicgen |
|
|
def generate_music(prompt): |
|
|
"""Generate music using local Audiocraft (MusicGen).""" |
|
|
model = musicgen.MusicGen.get_pretrained("facebook/musicgen-small") |
|
|
model.set_generation_params(duration=10) |
|
|
wav = model.generate([prompt]) |
|
|
import tempfile, soundfile as sf |
|
|
temp_wav = tempfile.NamedTemporaryFile(suffix=".wav", delete=False) |
|
|
sf.write(temp_wav.name, wav[0].cpu().numpy().T, 32000) |
|
|
return temp_wav.name |
|
|
MUSICGEN_MODE = "Audiocraft (local)" |
|
|
except Exception: |
|
|
|
|
|
musicgen = pipeline("text-to-audio", model="facebook/musicgen-small") |
|
|
def generate_music(prompt): |
|
|
"""Generate music using Transformers pipeline fallback.""" |
|
|
result = musicgen(prompt) |
|
|
return result["audio"] |
|
|
MUSICGEN_MODE = "Transformers (cloud)" |
|
|
|
|
|
|
|
|
whisper = pipeline("automatic-speech-recognition", model="openai/whisper-base") |
|
|
sentiment = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english") |
|
|
|
|
|
|
|
|
def transcribe_audio(audio_path): |
|
|
result = whisper(audio_path) |
|
|
return result["text"] |
|
|
|
|
|
def analyze_sentiment(text): |
|
|
result = sentiment(text) |
|
|
return f"{result[0]['label']} ({result[0]['score']:.2f})" |
|
|
|
|
|
|
|
|
with gr.Blocks(title="PiMusic3 ๐ต") as demo: |
|
|
gr.Markdown(f"### ๐ถ PiMusic3 โ Pi Forge Music Player\nMode: **{MUSICGEN_MODE}**\nGenerate, transcribe, and analyze sound ethically.") |
|
|
|
|
|
with gr.Tab("MusicGen"): |
|
|
prompt = gr.Textbox(label="Music Prompt", placeholder="Describe your sound...") |
|
|
generate_btn = gr.Button("๐ผ Generate") |
|
|
audio_out = gr.Audio(label="Generated Music") |
|
|
generate_btn.click(fn=generate_music, inputs=prompt, outputs=audio_out) |
|
|
|
|
|
with gr.Tab("Whisper Transcribe"): |
|
|
mic = gr.Audio(sources=["microphone", "upload"], type="filepath", label="๐๏ธ Record or Upload Audio") |
|
|
transcribe_btn = gr.Button("๐ Transcribe") |
|
|
transcript = gr.Textbox(label="Transcription") |
|
|
transcribe_btn.click(fn=transcribe_audio, inputs=mic, outputs=transcript) |
|
|
|
|
|
with gr.Tab("Sentiment Analysis"): |
|
|
text_in = gr.Textbox(label="Enter text for sentiment check") |
|
|
analyze_btn = gr.Button("๐ Analyze") |
|
|
sentiment_out = gr.Textbox(label="Result") |
|
|
analyze_btn.click(fn=analyze_sentiment, inputs=text_in, outputs=sentiment_out) |
|
|
|
|
|
demo.launch() |