| | |
| | """ |
| | VYNL Complete - HuggingFace Edition |
| | Full-featured music production suite with AI generation and voice cloning. |
| | |
| | Copyright (c) 2024-2026 Robert T. Lackey. All rights reserved. |
| | Stone and Lantern Music Group |
| | """ |
| |
|
| | import os |
| |
|
| | |
| | |
| | IS_HF_SPACE = os.environ.get('SPACE_ID') is not None |
| |
|
| | if IS_HF_SPACE: |
| | import spaces |
| | HAS_ZEROGPU = True |
| | print("ZeroGPU available - running on HuggingFace Spaces") |
| | else: |
| | HAS_ZEROGPU = False |
| | print("Running locally - ZeroGPU not available") |
| | |
| | class spaces: |
| | @staticmethod |
| | def GPU(duration=60): |
| | def decorator(func): |
| | return func |
| | return decorator |
| |
|
| | import gradio as gr |
| | import json |
| | import tempfile |
| | import shutil |
| | from pathlib import Path |
| | from datetime import datetime |
| | import warnings |
| | warnings.filterwarnings('ignore') |
| |
|
| | |
| | |
| | |
| |
|
| | QUOTA_ERROR_MSG = """⚠️ ZeroGPU Quota Exceeded |
| | |
| | You've run out of free GPU quota for today. |
| | |
| | To get more GPU time: |
| | 1. Sign up for free: https://huggingface.co/join |
| | 2. Or log in: https://huggingface.co/login |
| | |
| | Logged-in users get significantly more GPU quota! |
| | """ |
| |
|
| | def is_quota_error(error_msg: str) -> bool: |
| | """Check if an error is a ZeroGPU quota error""" |
| | quota_keywords = ['quota', 'ZeroGPU', 'daily', 'limit', 'exceeded'] |
| | error_lower = str(error_msg).lower() |
| | return any(kw.lower() in error_lower for kw in quota_keywords) |
| |
|
| | def handle_gpu_error(e: Exception) -> str: |
| | """Return user-friendly message for GPU errors""" |
| | error_msg = str(e) |
| | if is_quota_error(error_msg): |
| | return QUOTA_ERROR_MSG |
| | return f"GPU Error: {error_msg}" |
| |
|
| | |
| | |
| | |
| |
|
| | |
| | from token_system import ( |
| | user_manager, check_can_process, deduct_token, |
| | get_status_display, DEMO_MAX_DURATION, VALID_LICENSES |
| | ) |
| |
|
| | |
| | from mastering import master_audio, format_analysis, analyze_audio |
| |
|
| | |
| | try: |
| | from vynl_catalog import ( |
| | SongCatalog, VoiceCatalog, GeneratedCatalog, |
| | get_all_audio_for_user |
| | ) |
| | HAS_CATALOG = True |
| | except ImportError: |
| | HAS_CATALOG = False |
| | print("Catalog module not available") |
| |
|
| | |
| | try: |
| | from modules.chords import extract_chords, format_chord_chart |
| | HAS_CHORD_DETECTION = True |
| | except ImportError: |
| | HAS_CHORD_DETECTION = False |
| | print("Chord detection module not available") |
| | def format_chord_chart(*args, **kwargs): |
| | return "" |
| |
|
| | |
| | try: |
| | from modules.stems import separate_stems as module_separate_stems, list_stems |
| | HAS_STEM_MODULE = True |
| | except ImportError: |
| | HAS_STEM_MODULE = False |
| | print("Stem separation module not available") |
| |
|
| | |
| | try: |
| | from modules.reaper import create_reaper_project |
| | HAS_REAPER_MODULE = True |
| | except ImportError: |
| | HAS_REAPER_MODULE = False |
| | print("Reaper module not available") |
| |
|
| | import zipfile |
| |
|
| | |
| | HAS_AUDIOCRAFT = False |
| | try: |
| | from vynl_generator import ( |
| | generate_song, KEYS, TIME_SIGNATURES, GENRES, MOODS, INSTRUMENTS, |
| | HAS_AUDIOCRAFT, build_prompt |
| | ) |
| | except ImportError as e: |
| | print(f"AI Generator not available: {e}") |
| | KEYS = ["C major", "C minor", "D major", "D minor", "E major", "E minor", |
| | "F major", "F minor", "G major", "G minor", "A major", "A minor", "B major", "B minor"] |
| | TIME_SIGNATURES = ["4/4", "3/4", "6/8", "2/4", "5/4", "7/8"] |
| | GENRES = ["Pop", "Rock", "Jazz", "Blues", "Electronic", "Classical", "Hip Hop", "R&B", "Country", "Folk"] |
| | MOODS = ["Happy", "Sad", "Energetic", "Calm", "Dark", "Uplifting", "Melancholic"] |
| | INSTRUMENTS = ["Piano", "Guitar", "Drums", "Bass", "Strings", "Synth", "Brass", "Woodwinds"] |
| | def generate_song(*args, **kwargs): |
| | return None, "AI Generator not available - install audiocraft: pip install audiocraft xformers" |
| |
|
| | |
| | HAS_RVC = False |
| | RVC_INFO = "RVC not loaded" |
| | PRESET_VOICES = { |
| | "male_tenor": {"name": "Male Tenor", "pitch_shift": 0}, |
| | "male_bass": {"name": "Male Bass", "pitch_shift": -5}, |
| | "female_alto": {"name": "Female Alto", "pitch_shift": 12}, |
| | "female_soprano": {"name": "Female Soprano", "pitch_shift": 15}, |
| | } |
| | try: |
| | from vynl_rvc import ( |
| | clone_voice, train_voice_model, get_model_registry, |
| | get_voice_dataset, PRESET_VOICES as RVC_PRESETS, check_rvc_installation |
| | ) |
| | HAS_RVC, RVC_INFO = check_rvc_installation() |
| | if RVC_PRESETS: |
| | PRESET_VOICES = RVC_PRESETS |
| | except ImportError as e: |
| | print(f"RVC not available: {e}") |
| | def clone_voice(*args, **kwargs): |
| | return None, "RVC not available - install with: pip install rvc-python" |
| | def train_voice_model(*args, **kwargs): |
| | return None, "RVC not available" |
| |
|
| | |
| | try: |
| | import librosa |
| | import numpy as np |
| | HAS_LIBROSA = True |
| | except ImportError: |
| | HAS_LIBROSA = False |
| | print("Librosa not available") |
| |
|
| | try: |
| | import yt_dlp |
| | HAS_YTDLP = True |
| | except ImportError: |
| | HAS_YTDLP = False |
| | print("yt-dlp not available") |
| |
|
| | |
| | |
| | |
| |
|
| | VERSION = "2.2" |
| | OUTPUT_DIR = Path(tempfile.gettempdir()) / 'vynl_output' |
| | OUTPUT_DIR.mkdir(parents=True, exist_ok=True) |
| |
|
| | |
| | |
| | |
| |
|
| | CSS = """ |
| | /* VYNL HuggingFace - High Contrast Dark Theme */ |
| | :root { |
| | --bg-dark: #121212; |
| | --bg-panel: #1E1E1E; |
| | --bg-input: #2A2A2A; |
| | --border: #404040; |
| | --text: #FFFFFF; |
| | --text-dim: #B0B0B0; |
| | --accent: #FF6B4A; |
| | --accent-cyan: #00D4FF; |
| | --accent-green: #7CFF00; |
| | } |
| | |
| | /* Global */ |
| | .gradio-container, .gradio-container * { color: var(--text) !important; } |
| | body, .gradio-container { background: var(--bg-dark) !important; max-width: 1400px !important; } |
| | |
| | /* Header */ |
| | .vynl-header { |
| | text-align: center; |
| | padding: 24px; |
| | background: linear-gradient(180deg, rgba(255,107,74,0.15) 0%, transparent 100%); |
| | border-bottom: 3px solid; |
| | border-image: linear-gradient(90deg, var(--accent-cyan), var(--accent), var(--accent-green)) 1; |
| | margin-bottom: 16px; |
| | } |
| | .vynl-header h1 { |
| | font-size: 3rem; |
| | font-weight: 900; |
| | letter-spacing: 0.2em; |
| | background: linear-gradient(90deg, var(--accent-cyan), var(--accent), var(--accent-green)); |
| | -webkit-background-clip: text; |
| | -webkit-text-fill-color: transparent; |
| | margin: 0; |
| | } |
| | .vynl-header p { color: var(--text-dim) !important; margin: 8px 0 0; } |
| | |
| | /* Status bar */ |
| | .status-bar { |
| | background: rgba(0,212,255,0.15); |
| | border: 1px solid rgba(0,212,255,0.4); |
| | border-radius: 25px; |
| | padding: 10px 24px; |
| | margin: 12px auto; |
| | max-width: 700px; |
| | text-align: center; |
| | color: var(--text) !important; |
| | } |
| | |
| | /* Tabs */ |
| | .tabs { border: none !important; } |
| | .tab-nav { background: transparent !important; gap: 8px !important; justify-content: center !important; flex-wrap: wrap !important; } |
| | .tab-nav button { |
| | background: var(--bg-panel) !important; |
| | border: 1px solid var(--border) !important; |
| | color: var(--text) !important; |
| | padding: 12px 24px !important; |
| | border-radius: 10px !important; |
| | font-weight: 600 !important; |
| | } |
| | .tab-nav button:hover { border-color: var(--accent) !important; background: #252525 !important; } |
| | .tab-nav button.selected { background: var(--accent) !important; border-color: var(--accent) !important; } |
| | |
| | /* Inputs */ |
| | input, textarea, select { |
| | background: var(--bg-input) !important; |
| | border: 1px solid var(--border) !important; |
| | color: var(--text) !important; |
| | border-radius: 8px !important; |
| | } |
| | input::placeholder, textarea::placeholder { color: #707070 !important; } |
| | input:focus, textarea:focus { border-color: var(--accent) !important; } |
| | |
| | /* Labels */ |
| | label { color: var(--text) !important; font-weight: 500 !important; } |
| | .markdown, .gr-markdown, p, h1, h2, h3, h4, li, span { color: var(--text) !important; } |
| | |
| | /* Panels */ |
| | .gr-box, .gr-panel, .gr-form, .gr-block { |
| | background: var(--bg-panel) !important; |
| | border: 1px solid var(--border) !important; |
| | border-radius: 10px !important; |
| | } |
| | |
| | /* Buttons */ |
| | button, .gr-button { color: var(--text) !important; } |
| | .gr-button-primary { background: linear-gradient(135deg, var(--accent), #FF8C42) !important; border: none !important; } |
| | .gr-button-secondary { background: var(--bg-panel) !important; border: 1px solid var(--border) !important; } |
| | |
| | /* Accordion */ |
| | .gr-accordion { background: var(--bg-panel) !important; border: 1px solid var(--border) !important; } |
| | .gr-accordion summary { color: var(--text) !important; } |
| | |
| | /* Dropdowns */ |
| | .gr-dropdown, [data-testid="dropdown"] { background: var(--bg-input) !important; } |
| | .gr-dropdown span, .gr-dropdown label { color: var(--text) !important; } |
| | .gr-dropdown ul, .gr-dropdown li { background: var(--bg-input) !important; color: var(--text) !important; } |
| | |
| | /* Slider */ |
| | .gr-slider span, .gr-slider label { color: var(--text) !important; } |
| | |
| | /* Audio */ |
| | .gr-audio { background: var(--bg-panel) !important; } |
| | |
| | /* Tables */ |
| | table, th, td { color: var(--text) !important; background: var(--bg-panel) !important; } |
| | th { background: var(--bg-input) !important; } |
| | |
| | /* Footer */ |
| | footer { display: none !important; } |
| | .vynl-footer { |
| | text-align: center; |
| | padding: 20px; |
| | color: var(--text-dim) !important; |
| | border-top: 1px solid var(--border); |
| | margin-top: 24px; |
| | } |
| | .vynl-footer p { color: var(--text-dim) !important; } |
| | .vynl-footer strong { color: var(--text) !important; } |
| | """ |
| |
|
| | |
| | |
| | |
| |
|
| | def download_youtube(url: str) -> tuple: |
| | """Download audio from YouTube using CLI for better cookie support""" |
| | import subprocess |
| | import shutil |
| |
|
| | yt_dlp_path = shutil.which('yt-dlp') |
| | if not yt_dlp_path: |
| | return None, "yt-dlp not installed" |
| |
|
| | try: |
| | output_dir = OUTPUT_DIR / f"yt_{datetime.now().strftime('%H%M%S')}" |
| | output_dir.mkdir(parents=True, exist_ok=True) |
| | audio_path = output_dir / "audio.wav" |
| |
|
| | |
| | cookies_file = Path(__file__).parent / "cookies.txt" |
| |
|
| | def build_cmd(with_cookies=None): |
| | cmd = [yt_dlp_path, '--socket-timeout', '60', '--retries', '5', '--no-warnings'] |
| | if with_cookies == 'file' and cookies_file.exists(): |
| | cmd.extend(['--cookies', str(cookies_file)]) |
| | elif with_cookies and not IS_HF_SPACE: |
| | cmd.extend(['--cookies-from-browser', with_cookies]) |
| | return cmd |
| |
|
| | |
| | cookie_sources = [] |
| | if cookies_file.exists(): |
| | cookie_sources.append('file') |
| | if not IS_HF_SPACE: |
| | cookie_sources.extend(['chrome', 'edge', 'firefox', 'brave']) |
| | cookie_sources.append(None) |
| |
|
| | title = 'Unknown' |
| | download_success = False |
| | last_error = "" |
| |
|
| | for cookie_source in cookie_sources: |
| | source_name = "cookies.txt" if cookie_source == 'file' else (cookie_source or 'no cookies') |
| | print(f"Trying YouTube download with {source_name}...") |
| |
|
| | base_cmd = build_cmd(cookie_source) |
| |
|
| | |
| | title_cmd = base_cmd + ['--print', 'title', '--no-download', url] |
| | result = subprocess.run(title_cmd, capture_output=True, text=True, timeout=30) |
| |
|
| | if result.returncode == 0 and result.stdout.strip(): |
| | title = result.stdout.strip() |
| |
|
| | |
| | audio_cmd = base_cmd + [ |
| | '-f', 'bestaudio/best', |
| | '-x', '--audio-format', 'wav', |
| | '-o', str(audio_path).replace('.wav', '.%(ext)s'), |
| | url |
| | ] |
| |
|
| | print(f"Downloading: {title}") |
| | result = subprocess.run(audio_cmd, capture_output=True, text=True, timeout=300) |
| |
|
| | if result.returncode == 0: |
| | download_success = True |
| | break |
| |
|
| | last_error = result.stderr if result.stderr else "Unknown error" |
| | if 'Sign in' not in last_error and 'bot' not in last_error.lower(): |
| | break |
| |
|
| | if not download_success: |
| | if IS_HF_SPACE: |
| | return None, "YouTube requires authentication on HuggingFace. Please upload the audio file directly." |
| | return None, f"{last_error}\n\nTry uploading the audio file directly instead." |
| |
|
| | |
| | if not audio_path.exists(): |
| | for f in output_dir.glob('audio.*'): |
| | if f.suffix == '.wav': |
| | audio_path = f |
| | break |
| |
|
| | return str(audio_path), title |
| |
|
| | except subprocess.TimeoutExpired: |
| | return None, "Download timed out. Try a shorter video." |
| | except Exception as e: |
| | return None, str(e) |
| |
|
| | @spaces.GPU(duration=120) |
| | def _separate_stems_gpu(audio_path: str, progress=None, two_stem: bool = False) -> dict: |
| | """Separate audio into stems using Demucs (GPU accelerated) - internal""" |
| | try: |
| | import subprocess |
| | mode = "2stem" if two_stem else "6stem" |
| | stems_dir = OUTPUT_DIR / f"stems_{mode}_{datetime.now().strftime('%H%M%S')}" |
| | stems_dir.mkdir(exist_ok=True) |
| |
|
| | if progress: |
| | if two_stem: |
| | progress(0.3, "Running Demucs 2-stem separation (vocals/instrumental)...") |
| | else: |
| | progress(0.3, "Running Demucs 6-stem separation (drums, bass, guitar, keys, other, vocals)...") |
| |
|
| | |
| | if two_stem: |
| | cmd = ['python', '-m', 'demucs', '--two-stems=vocals', '-o', str(stems_dir), '--mp3', '--mp3-bitrate=320', audio_path] |
| | else: |
| | cmd = ['python', '-m', 'demucs', '-n', 'htdemucs_6s', '-o', str(stems_dir), '--mp3', '--mp3-bitrate=320', audio_path] |
| |
|
| | subprocess.run(cmd, capture_output=True, check=True) |
| |
|
| | |
| | model_dir = list(stems_dir.glob('*'))[0] |
| | song_dir = list(model_dir.glob('*'))[0] |
| |
|
| | stems = {} |
| | for stem_file in song_dir.glob('*.mp3'): |
| | stem_name = stem_file.stem |
| | stems[stem_name] = str(stem_file) |
| |
|
| | |
| | for stem_file in song_dir.glob('*.wav'): |
| | stem_name = stem_file.stem |
| | if stem_name not in stems: |
| | stems[stem_name] = str(stem_file) |
| |
|
| | return {'stems': stems, 'stems_dir': str(song_dir)} |
| | except Exception as e: |
| | return {'error': str(e)} |
| |
|
| | def separate_stems(audio_path: str, progress=None, two_stem: bool = False) -> dict: |
| | """Separate stems - wrapper with quota error handling""" |
| | try: |
| | return _separate_stems_gpu(audio_path, progress, two_stem) |
| | except Exception as e: |
| | return {'error': handle_gpu_error(e)} |
| |
|
| | def detect_key_and_tempo(audio_path: str) -> dict: |
| | """Detect musical key and tempo""" |
| | if not HAS_LIBROSA: |
| | return {'key': 'Unknown', 'bpm': 120} |
| |
|
| | try: |
| | y, sr = librosa.load(audio_path, duration=60) |
| |
|
| | |
| | tempo, _ = librosa.beat.beat_track(y=y, sr=sr) |
| | bpm = float(tempo) if hasattr(tempo, '__float__') else float(tempo[0]) |
| |
|
| | |
| | chroma = librosa.feature.chroma_cqt(y=y, sr=sr) |
| | chroma_avg = np.mean(chroma, axis=1) |
| | key_idx = np.argmax(chroma_avg) |
| | key_names = ['C', 'C#', 'D', 'D#', 'E', 'F', 'F#', 'G', 'G#', 'A', 'A#', 'B'] |
| | key = key_names[key_idx] |
| |
|
| | |
| | major_profile = np.array([6.35, 2.23, 3.48, 2.33, 4.38, 4.09, 2.52, 5.19, 2.39, 3.66, 2.29, 2.88]) |
| | minor_profile = np.array([6.33, 2.68, 3.52, 5.38, 2.60, 3.53, 2.54, 4.75, 3.98, 2.69, 3.34, 3.17]) |
| |
|
| | major_corr = np.corrcoef(chroma_avg, np.roll(major_profile, key_idx))[0, 1] |
| | minor_corr = np.corrcoef(chroma_avg, np.roll(minor_profile, key_idx))[0, 1] |
| |
|
| | mode = "major" if major_corr > minor_corr else "minor" |
| | duration = librosa.get_duration(y=y, sr=sr) |
| |
|
| | return { |
| | 'key': f"{key} {mode}", |
| | 'bpm': round(bpm), |
| | 'duration': duration |
| | } |
| | except Exception as e: |
| | return {'key': 'Unknown', 'bpm': 120, 'error': str(e)} |
| |
|
| | |
| | |
| | |
| |
|
| | def process_song(audio, yt_url, name, lyrics, do_stems, do_chords, do_daw, user_email, progress=gr.Progress()): |
| | """Process a song - stems, chords, analysis""" |
| | progress(0.1, "Starting processing...") |
| |
|
| | |
| | audio_path = None |
| | song_name = name |
| |
|
| | if audio: |
| | audio_path = audio |
| | if not song_name: |
| | song_name = Path(audio).stem |
| | elif yt_url: |
| | progress(0.15, "Downloading from YouTube...") |
| | audio_path, title = download_youtube(yt_url) |
| | if not audio_path: |
| | return f"Download failed: {title}", None, get_status_display(user_email) |
| | if not song_name: |
| | song_name = title |
| | else: |
| | return "Please provide audio file or YouTube URL", None, get_status_display(user_email) |
| |
|
| | log = [f"Processing: {song_name}"] |
| |
|
| | |
| | progress(0.2, "Analyzing audio...") |
| | analysis = detect_key_and_tempo(audio_path) |
| | log.append(f"Key: {analysis.get('key', 'Unknown')} | BPM: {analysis.get('bpm', 120)}") |
| |
|
| | |
| | stems = {} |
| | if do_stems: |
| | progress(0.3, "Separating stems...") |
| | try: |
| | stems = separate_stems(audio_path, progress) |
| | if 'error' in stems: |
| | error_msg = stems['error'] |
| | if is_quota_error(error_msg): |
| | log.append(QUOTA_ERROR_MSG) |
| | else: |
| | log.append(f"Stem separation error: {error_msg}") |
| | else: |
| | log.append(f"Stems: {', '.join(stems.keys())}") |
| | except Exception as e: |
| | log.append(handle_gpu_error(e)) |
| |
|
| | |
| | chords_text = None |
| | chords_list = [] |
| | if do_chords: |
| | progress(0.7, "Detecting chords...") |
| | if HAS_CHORD_DETECTION: |
| | try: |
| | chords_list = extract_chords(audio_path, min_duration=1.0) |
| | if chords_list: |
| | |
| | duration = analysis.get('duration', 180) |
| | chords_text = format_chord_chart( |
| | chords=chords_list, |
| | lyrics=lyrics, |
| | duration=duration, |
| | key=analysis.get('key', 'C'), |
| | bpm=analysis.get('bpm', 120), |
| | song_name=song_name |
| | ) |
| | if lyrics: |
| | log.append(f"Chord chart: {len(chords_list)} chords aligned to lyrics") |
| | else: |
| | log.append(f"Chord chart: {len(chords_list)} changes detected (add lyrics for aligned chart)") |
| | else: |
| | chords_text = f"# {song_name}\nKey: {analysis.get('key', 'C')}\nBPM: {analysis.get('bpm', 120)}\n\n(No chord changes detected)" |
| | log.append("Chord detection: no changes found") |
| | except Exception as e: |
| | chords_text = f"# {song_name}\nChord detection error: {str(e)}" |
| | log.append(f"Chord detection error: {e}") |
| | else: |
| | chords_text = f"# {song_name}\nKey: {analysis.get('key', 'C')}\nBPM: {analysis.get('bpm', 120)}\n\n(Chord detection not available)" |
| | log.append("Chord detection not available") |
| |
|
| | |
| | if HAS_CATALOG: |
| | progress(0.9, "Saving to catalog...") |
| | catalog = SongCatalog(user_email or "demo") |
| | song_id = catalog.add_song( |
| | name=song_name, |
| | original_path=audio_path, |
| | stems=stems if stems and 'error' not in stems else None, |
| | chords=chords_text, |
| | lyrics=lyrics, |
| | key=analysis.get('key'), |
| | bpm=analysis.get('bpm'), |
| | duration=analysis.get('duration'), |
| | source='youtube' if yt_url else 'upload' |
| | ) |
| | log.append(f"Saved to catalog: {song_id}") |
| |
|
| | progress(1.0, "Complete!") |
| | return "\n".join(log), audio_path, get_status_display(user_email) |
| |
|
| | |
| | |
| | |
| |
|
| | def create_song_package(audio, yt_url, name, lyrics, user_email, progress=gr.Progress()): |
| | """ |
| | Create a complete song package with: |
| | - Pass 1 stems (vocals, instrumental) |
| | - Pass 2 stems (drums, bass, guitar, keys, other, vocals) |
| | - Mastered original (Warm preset) |
| | - Chord chart |
| | - Reaper project file |
| | All bundled in a zip named after the song |
| | """ |
| | progress(0.05, "Starting song package creation...") |
| |
|
| | |
| | audio_path = None |
| | song_name = name |
| |
|
| | if audio: |
| | audio_path = audio |
| | if not song_name: |
| | song_name = Path(audio).stem |
| | elif yt_url: |
| | progress(0.08, "Downloading from YouTube...") |
| | audio_path, title = download_youtube(yt_url) |
| | if not audio_path: |
| | return f"Download failed: {title}", None, get_status_display(user_email) |
| | if not song_name: |
| | song_name = title |
| | else: |
| | return "Please provide audio file or YouTube URL", None, get_status_display(user_email) |
| |
|
| | |
| | safe_name = "".join(c for c in song_name if c.isalnum() or c in (' ', '-', '_')).strip() |
| | safe_name = safe_name.replace(' ', '_') |
| |
|
| | log = [f"Creating package for: {song_name}"] |
| |
|
| | |
| | package_dir = OUTPUT_DIR / f"package_{safe_name}_{datetime.now().strftime('%H%M%S')}" |
| | package_dir.mkdir(parents=True, exist_ok=True) |
| |
|
| | |
| | progress(0.10, "Analyzing audio...") |
| | analysis = detect_key_and_tempo(audio_path) |
| | tempo = analysis.get('bpm', 120) |
| | key = analysis.get('key', 'C') |
| | log.append(f"Key: {key} | BPM: {tempo}") |
| |
|
| | |
| | stems_pass1 = {} |
| | stems_pass1_dir = None |
| |
|
| | progress(0.15, "Pass 1: Separating vocals/instrumental (GPU)...") |
| | try: |
| | result = separate_stems(audio_path, progress, two_stem=True) |
| | if 'error' in result: |
| | error_msg = result['error'] |
| | if is_quota_error(str(error_msg)): |
| | log.append(QUOTA_ERROR_MSG) |
| | else: |
| | log.append(f"Pass 1 error: {error_msg}") |
| | else: |
| | stems_pass1 = result.get('stems', {}) |
| | stems_pass1_dir = result.get('stems_dir') |
| | log.append(f"Pass 1 stems: {', '.join(stems_pass1.keys())}") |
| | except Exception as e: |
| | log.append(f"Pass 1 error: {handle_gpu_error(e)}") |
| |
|
| | |
| | stems_pass2 = {} |
| | stems_pass2_dir = None |
| |
|
| | progress(0.35, "Pass 2: Separating detailed stems (GPU)...") |
| | try: |
| | result = separate_stems(audio_path, progress, two_stem=False) |
| | if 'error' in result: |
| | error_msg = result['error'] |
| | if is_quota_error(str(error_msg)): |
| | log.append(QUOTA_ERROR_MSG) |
| | else: |
| | log.append(f"Pass 2 error: {error_msg}") |
| | else: |
| | stems_pass2 = result.get('stems', {}) |
| | stems_pass2_dir = result.get('stems_dir') |
| | log.append(f"Pass 2 stems: {', '.join(stems_pass2.keys())}") |
| | except Exception as e: |
| | log.append(f"Pass 2 error: {handle_gpu_error(e)}") |
| |
|
| | |
| | mastered_path = None |
| | progress(0.60, "Mastering original audio (Warm preset)...") |
| | try: |
| | mastered_output = package_dir / f"{safe_name}_mastered.wav" |
| | mastered_path, master_analysis = master_audio( |
| | audio_path, |
| | str(mastered_output), |
| | preset='Warm' |
| | ) |
| | if mastered_path: |
| | log.append(f"Mastered: {Path(mastered_path).name}") |
| | if master_analysis and 'output' in master_analysis: |
| | lufs = master_analysis['output'].get('lufs', 'N/A') |
| | log.append(f" LUFS: {lufs:.1f}" if isinstance(lufs, float) else f" LUFS: {lufs}") |
| | except Exception as e: |
| | log.append(f"Mastering error: {str(e)}") |
| |
|
| | |
| | chords_text = None |
| | chords_list = [] |
| | chords_file = None |
| |
|
| | progress(0.70, "Detecting chords...") |
| | try: |
| | if HAS_CHORD_DETECTION: |
| | chords_list = extract_chords(audio_path, min_duration=1.0) |
| | if chords_list: |
| | |
| | duration = analysis.get('duration', 180) |
| | chords_text = format_chord_chart( |
| | chords=chords_list, |
| | lyrics=lyrics, |
| | duration=duration, |
| | key=key, |
| | bpm=tempo, |
| | song_name=song_name |
| | ) |
| |
|
| | |
| | chords_file = package_dir / f"{safe_name}_chords.txt" |
| | with open(chords_file, 'w') as f: |
| | f.write(chords_text) |
| |
|
| | if lyrics: |
| | log.append(f"Chord chart: {len(chords_list)} chords aligned to lyrics") |
| | else: |
| | log.append(f"Chord chart: {len(chords_list)} changes") |
| | else: |
| | log.append("No chord changes detected") |
| | else: |
| | log.append("Chord detection not available") |
| | except Exception as e: |
| | log.append(f"Chord detection error: {str(e)}") |
| |
|
| | |
| | reaper_file = None |
| | progress(0.80, "Creating Reaper project...") |
| | try: |
| | if HAS_REAPER_MODULE: |
| | |
| | stems_for_reaper = stems_pass2_dir if stems_pass2 else stems_pass1_dir |
| | if stems_for_reaper and Path(stems_for_reaper).exists(): |
| | rpp_content = create_reaper_project( |
| | song_name=song_name, |
| | stems_dir=stems_for_reaper, |
| | tempo=tempo, |
| | key=key, |
| | chords=chords_list, |
| | audio_file=audio_path |
| | ) |
| |
|
| | reaper_file = package_dir / f"{safe_name}.rpp" |
| | with open(reaper_file, 'w') as f: |
| | f.write(rpp_content) |
| | log.append(f"Reaper project: {reaper_file.name}") |
| | else: |
| | log.append("Reaper: No stems available for project") |
| | else: |
| | log.append("Reaper module not available") |
| | except Exception as e: |
| | log.append(f"Reaper project error: {str(e)}") |
| |
|
| | |
| | progress(0.90, "Creating zip bundle...") |
| | zip_path = OUTPUT_DIR / f"{safe_name}.zip" |
| |
|
| | try: |
| | with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_DEFLATED) as zf: |
| | files_added = [] |
| |
|
| | |
| | zf.write(audio_path, f"{safe_name}/original/{Path(audio_path).name}") |
| | files_added.append("original") |
| |
|
| | |
| | if mastered_path and Path(mastered_path).exists(): |
| | zf.write(mastered_path, f"{safe_name}/mastered/{Path(mastered_path).name}") |
| | files_added.append("mastered") |
| |
|
| | |
| | stems1_added = 0 |
| | for stem_name, stem_path in stems_pass1.items(): |
| | if Path(stem_path).exists(): |
| | zf.write(stem_path, f"{safe_name}/stems_basic/{stem_name}.mp3") |
| | stems1_added += 1 |
| | if stems1_added > 0: |
| | files_added.append(f"stems_basic ({stems1_added})") |
| |
|
| | |
| | stems2_added = 0 |
| | for stem_name, stem_path in stems_pass2.items(): |
| | if Path(stem_path).exists(): |
| | zf.write(stem_path, f"{safe_name}/stems_detailed/{stem_name}.mp3") |
| | stems2_added += 1 |
| | if stems2_added > 0: |
| | files_added.append(f"stems_detailed ({stems2_added})") |
| |
|
| | |
| | if chords_file and Path(chords_file).exists(): |
| | zf.write(str(chords_file), f"{safe_name}/{chords_file.name}") |
| | files_added.append("chords") |
| |
|
| | |
| | if reaper_file and Path(reaper_file).exists(): |
| | zf.write(str(reaper_file), f"{safe_name}/{reaper_file.name}") |
| | files_added.append("reaper") |
| |
|
| | |
| | if lyrics: |
| | lyrics_path = package_dir / f"{safe_name}_lyrics.txt" |
| | with open(lyrics_path, 'w') as f: |
| | f.write(lyrics) |
| | zf.write(str(lyrics_path), f"{safe_name}/{lyrics_path.name}") |
| | files_added.append("lyrics") |
| |
|
| | log.append(f"\nPackage created: {safe_name}.zip") |
| | log.append(f"Contents: {', '.join(files_added)}") |
| | log.append(f"Size: {zip_path.stat().st_size / (1024*1024):.1f} MB") |
| |
|
| | except Exception as e: |
| | log.append(f"Zip creation error: {str(e)}") |
| | zip_path = None |
| |
|
| | |
| | try: |
| | shutil.rmtree(package_dir) |
| | except: |
| | pass |
| |
|
| | progress(1.0, "Package complete!") |
| | return "\n".join(log), str(zip_path) if zip_path else None, get_status_display(user_email) |
| |
|
| | |
| | |
| | |
| |
|
| | @spaces.GPU(duration=300) |
| | def _generate_ai_music_gpu(prompt, genre, mood, key, bpm, time_sig, duration, instruments, temperature, user_email, progress=gr.Progress()): |
| | """Generate AI music with full parameters (GPU accelerated) - internal""" |
| | if not HAS_AUDIOCRAFT: |
| | return None, "AudioCraft not installed.\n\nThis feature requires a GPU-enabled environment.\n\nFor local installation with GPU:\npip install audiocraft xformers\npip install torch torchaudio --index-url https://download.pytorch.org/whl/cu118" |
| |
|
| | progress(0.1, "Preparing generation...") |
| |
|
| | |
| | inst_list = [i.strip() for i in instruments.split(',')] if instruments else [] |
| |
|
| | |
| | selected_key = None if key == "Auto" else key |
| |
|
| | |
| | audio_path, status = generate_song( |
| | prompt=prompt, |
| | genre=genre, |
| | mood=mood, |
| | key=selected_key, |
| | bpm=int(bpm) if bpm else 120, |
| | time_sig=time_sig, |
| | duration=float(duration), |
| | instruments=inst_list, |
| | temperature=temperature, |
| | progress_callback=lambda p, m: progress(p, m) |
| | ) |
| |
|
| | if audio_path and HAS_CATALOG: |
| | |
| | gen_catalog = GeneratedCatalog(user_email or "demo") |
| | gen_catalog.add_generated( |
| | name=f"Generated_{datetime.now().strftime('%Y%m%d_%H%M%S')}", |
| | audio_path=audio_path, |
| | prompt=f"{prompt} | {genre} | {mood}", |
| | model="musicgen", |
| | duration=duration, |
| | key=selected_key, |
| | bpm=bpm, |
| | time_signature=time_sig |
| | ) |
| |
|
| | return audio_path, status |
| |
|
| | def generate_ai_music(prompt, genre, mood, key, bpm, time_sig, duration, instruments, temperature, user_email, progress=gr.Progress()): |
| | """Generate AI music - wrapper with quota error handling""" |
| | try: |
| | return _generate_ai_music_gpu(prompt, genre, mood, key, bpm, time_sig, duration, instruments, temperature, user_email, progress) |
| | except Exception as e: |
| | return None, handle_gpu_error(e) |
| |
|
| | @spaces.GPU(duration=120) |
| | def _apply_voice_clone_gpu(audio, voice_model, pitch_shift, user_email, progress=gr.Progress()): |
| | """Apply voice cloning/conversion (GPU accelerated) - internal""" |
| | if not audio: |
| | return None, "Please provide audio" |
| |
|
| | progress(0.2, "Processing voice...") |
| |
|
| | |
| | if voice_model in PRESET_VOICES: |
| | preset = PRESET_VOICES[voice_model] |
| | total_pitch = int(pitch_shift) + preset.get('pitch_shift', 0) |
| |
|
| | if HAS_LIBROSA: |
| | try: |
| | y, sr = librosa.load(audio, sr=None) |
| | y_shifted = librosa.effects.pitch_shift(y, sr=sr, n_steps=total_pitch) |
| |
|
| | import soundfile as sf |
| | output_path = tempfile.mktemp(suffix='.wav') |
| | sf.write(output_path, y_shifted, sr) |
| |
|
| | progress(1.0, "Done!") |
| | return output_path, f"Applied {preset.get('name', voice_model)} with {total_pitch} semitone shift" |
| | except Exception as e: |
| | return None, f"Error: {str(e)}" |
| | else: |
| | return None, "Librosa required for pitch shifting" |
| |
|
| | |
| | if HAS_RVC: |
| | try: |
| | output, status = clone_voice( |
| | source_audio=audio, |
| | target_voice=voice_model, |
| | pitch_shift=int(pitch_shift), |
| | progress_callback=lambda p, m: progress(p, m) |
| | ) |
| | if is_quota_error(str(status)): |
| | return None, QUOTA_ERROR_MSG |
| | return output, status |
| | except Exception as e: |
| | return None, handle_gpu_error(e) |
| |
|
| | return None, "Voice model not found" |
| |
|
| | def apply_voice_clone(audio, voice_model, pitch_shift, user_email, progress=gr.Progress()): |
| | """Apply voice cloning - wrapper with quota error handling""" |
| | try: |
| | return _apply_voice_clone_gpu(audio, voice_model, pitch_shift, user_email, progress) |
| | except Exception as e: |
| | return None, handle_gpu_error(e) |
| |
|
| | def train_custom_voice(name, description, audio_files, epochs, user_email, progress=gr.Progress()): |
| | """Train custom voice model""" |
| | if not audio_files: |
| | return "Please provide training audio files" |
| |
|
| | if not HAS_RVC: |
| | return f"RVC not available.\n\n{RVC_INFO}\n\nFor full RVC support, use the local installation." |
| |
|
| | progress(0.1, "Preparing training data...") |
| |
|
| | |
| | file_paths = [] |
| | for f in audio_files: |
| | if hasattr(f, 'name'): |
| | file_paths.append(f.name) |
| | elif isinstance(f, str): |
| | file_paths.append(f) |
| |
|
| | model_id, status = train_voice_model( |
| | name=name, |
| | training_files=file_paths, |
| | description=description, |
| | epochs=int(epochs), |
| | user_email=user_email or "demo", |
| | progress_callback=lambda p, m: progress(p, m) |
| | ) |
| |
|
| | return status |
| |
|
| | |
| | |
| | |
| |
|
| | def get_catalog_list(user_email): |
| | """Get catalog items for display""" |
| | if not HAS_CATALOG: |
| | return [["Catalog not available", "-", "-", "-", "-"]] |
| |
|
| | items = get_all_audio_for_user(user_email or "demo") |
| | data = [] |
| | for item in items[:50]: |
| | data.append([ |
| | item.get('name', 'Unknown'), |
| | item.get('type', 'song'), |
| | item.get('key', '-'), |
| | str(item.get('bpm', '-')), |
| | item.get('created', '')[:10] |
| | ]) |
| |
|
| | if not data: |
| | data = [["No items in catalog", "-", "-", "-", "-"]] |
| |
|
| | return data |
| |
|
| | |
| | |
| | |
| |
|
| | def login_user(email, password): |
| | """Login user""" |
| | if not email or not password: |
| | return "Enter email and password", get_status_display(""), "" |
| |
|
| | success, user = user_manager.login(email, password) |
| | if success: |
| | return f"Welcome back, {user['name']}!", get_status_display(email), email |
| | return "Invalid credentials", get_status_display(""), "" |
| |
|
| | def register_user(email, password, name): |
| | """Register new user""" |
| | success, msg = user_manager.create_account(email, password, name) |
| | if success: |
| | return msg, get_status_display(email), email |
| | return msg, get_status_display(""), "" |
| |
|
| | def activate_license(email, license_key): |
| | """Activate license""" |
| | if not email: |
| | return "Please login first", get_status_display("") |
| |
|
| | success, msg = user_manager.activate_license(email, license_key) |
| | return msg, get_status_display(email) |
| |
|
| | |
| | |
| | |
| |
|
| | with gr.Blocks(css=CSS, title=f"VYNL v{VERSION}") as demo: |
| |
|
| | current_user = gr.State("") |
| |
|
| | |
| | gr.HTML(f""" |
| | <div class="vynl-header"> |
| | <h1>VYNL</h1> |
| | <p>Complete Music Production Suite v{VERSION}</p> |
| | </div> |
| | """) |
| |
|
| | |
| | status_display = gr.HTML('<div class="status-bar">DEMO MODE | 3 free processes available</div>') |
| |
|
| | |
| | with gr.Accordion("Account / License", open=False): |
| | with gr.Row(): |
| | with gr.Column(): |
| | login_email = gr.Textbox(label="Email") |
| | login_pass = gr.Textbox(label="Password", type="password") |
| | with gr.Row(): |
| | login_btn = gr.Button("Login", variant="primary", size="sm") |
| | reg_btn = gr.Button("Register", size="sm") |
| | auth_msg = gr.Textbox(label="Status", interactive=False) |
| |
|
| | with gr.Column(): |
| | lic_key = gr.Textbox(label="License Key", placeholder="VYNL-XXXX-XXXX-XXXX-XXXX") |
| | lic_btn = gr.Button("Activate License") |
| | lic_msg = gr.Textbox(label="License Status", interactive=False) |
| |
|
| | |
| | with gr.Accordion("License Agreement", open=False): |
| | gr.Markdown(""" |
| | ### VYNL Software License Agreement |
| | **Copyright (c) 2024-2026 Robert T. Lackey. All rights reserved.** |
| | |
| | By using this software, you agree to the following terms: |
| | - All output files (stems, charts, generated audio) are yours to use commercially |
| | - The software itself remains the property of Robert T. Lackey |
| | - You may NOT redistribute, sublicense, or reverse engineer this software |
| | |
| | **Stone and Lantern Music Group** |
| | Contact: rtlackey@icloud.com |
| | """) |
| |
|
| | |
| | with gr.Tabs(): |
| |
|
| | |
| | with gr.Tab("PROCESS"): |
| | gr.Markdown("### Analyze, Separate Stems, Detect Chords") |
| |
|
| | with gr.Row(): |
| | with gr.Column(): |
| | proc_audio = gr.Audio(label="Upload Audio", type="filepath") |
| | proc_yt = gr.Textbox(label="Or YouTube URL") |
| | proc_name = gr.Textbox(label="Song Name (optional)") |
| | proc_lyrics = gr.Textbox(label="Lyrics (optional)", lines=3) |
| |
|
| | with gr.Row(): |
| | proc_stems = gr.Checkbox(label="Stems", value=True) |
| | proc_chords = gr.Checkbox(label="Chords", value=True) |
| | proc_daw = gr.Checkbox(label="DAW Project", value=False) |
| |
|
| | with gr.Row(): |
| | proc_btn = gr.Button("PROCESS", variant="primary") |
| | package_btn = gr.Button("CREATE FULL PACKAGE", variant="secondary") |
| |
|
| | gr.Markdown(""" |
| | <small>**PROCESS**: Quick analysis with selected options<br> |
| | **CREATE FULL PACKAGE**: Complete ZIP with all stems (2-pass), mastered audio, chords, Reaper project</small> |
| | """) |
| |
|
| | with gr.Column(): |
| | proc_log = gr.Textbox(label="Output Log", lines=15, interactive=False) |
| | proc_output = gr.File(label="Download") |
| |
|
| | |
| | with gr.Tab("AI STUDIO"): |
| | gr.Markdown("### AI Music Generation & Voice Cloning") |
| |
|
| | with gr.Tabs(): |
| | |
| | with gr.Tab("Generate Music"): |
| | with gr.Row(): |
| | with gr.Column(): |
| | gen_prompt = gr.Textbox(label="Describe your music", lines=2, |
| | placeholder="Upbeat electronic track with driving bass and atmospheric synths...") |
| |
|
| | with gr.Row(): |
| | gen_genre = gr.Dropdown(choices=[""] + GENRES, label="Genre", value="") |
| | gen_mood = gr.Dropdown(choices=[""] + MOODS, label="Mood", value="") |
| |
|
| | with gr.Row(): |
| | gen_key = gr.Dropdown(choices=["Auto"] + KEYS, label="Key", value="Auto") |
| | gen_bpm = gr.Slider(60, 200, value=120, step=1, label="BPM") |
| |
|
| | with gr.Row(): |
| | gen_time = gr.Dropdown(choices=TIME_SIGNATURES, label="Time Signature", value="4/4") |
| | gen_duration = gr.Slider(10, 600, value=60, step=10, |
| | label="Duration (seconds) - up to 10 min") |
| |
|
| | gen_instruments = gr.Textbox(label="Instruments (comma-separated)", |
| | placeholder="Piano, Drums, Bass, Synth") |
| | gen_temp = gr.Slider(0.5, 1.5, value=1.0, step=0.1, label="Creativity (Temperature)") |
| | gen_btn = gr.Button("GENERATE MUSIC", variant="primary") |
| |
|
| | with gr.Column(): |
| | gen_output = gr.Audio(label="Generated Audio") |
| | gen_status = gr.Textbox(label="Status", lines=5, interactive=False) |
| |
|
| | gr.Markdown(""" |
| | **Note:** AI music generation requires GPU and AudioCraft. |
| | For full features, use the local installation. |
| | """) |
| |
|
| | |
| | with gr.Tab("Voice Cloning"): |
| | with gr.Row(): |
| | with gr.Column(): |
| | voice_audio = gr.Audio(label="Source Audio (vocals to convert)", type="filepath") |
| | voice_choices = list(PRESET_VOICES.keys()) |
| | voice_model = gr.Dropdown(choices=voice_choices, label="Voice Model/Preset", |
| | value=voice_choices[0] if voice_choices else None) |
| | voice_pitch = gr.Slider(-24, 24, value=0, step=1, label="Additional Pitch Shift (semitones)") |
| | voice_btn = gr.Button("CONVERT VOICE", variant="primary") |
| |
|
| | with gr.Column(): |
| | voice_output = gr.Audio(label="Converted Output") |
| | voice_status = gr.Textbox(label="Status", lines=3, interactive=False) |
| |
|
| | gr.Markdown(""" |
| | **Available Presets:** |
| | - **Male Tenor** - Standard male voice |
| | - **Male Bass** - Deep male voice (-5 semitones) |
| | - **Female Alto** - Standard female voice (+12 semitones) |
| | - **Female Soprano** - High female voice (+15 semitones) |
| | """) |
| |
|
| | |
| | with gr.Tab("Train Custom Voice"): |
| | with gr.Row(): |
| | with gr.Column(): |
| | train_name = gr.Textbox(label="Voice Model Name", placeholder="My Custom Voice") |
| | train_desc = gr.Textbox(label="Description", placeholder="Describe the voice characteristics") |
| | train_files = gr.File(label="Training Audio Files (upload multiple clean vocal recordings)", |
| | file_count="multiple", file_types=["audio"]) |
| | train_epochs = gr.Slider(50, 500, value=100, step=50, label="Training Epochs") |
| | train_btn = gr.Button("START TRAINING", variant="primary") |
| |
|
| | with gr.Column(): |
| | train_status = gr.Textbox(label="Training Status", lines=12, interactive=False) |
| |
|
| | gr.Markdown(""" |
| | **Training Tips:** |
| | - Use 10-30 minutes of clean vocal recordings |
| | - Avoid background music or noise |
| | - Include variety (different pitches, vowels) |
| | - More epochs = better quality (but takes longer) |
| | |
| | **Note:** Full voice training requires local GPU installation. |
| | """) |
| |
|
| | |
| | with gr.Tab("MASTER"): |
| | gr.Markdown("### AI Mastering") |
| |
|
| | with gr.Row(): |
| | with gr.Column(): |
| | master_input = gr.Audio(label="Input (Unmastered Mix)", type="filepath") |
| | master_ref = gr.Audio(label="Reference Track (optional)", type="filepath") |
| | master_lufs = gr.Slider(-20, -8, value=-14, step=0.5, label="Target LUFS") |
| | master_preset = gr.Radio( |
| | ["Balanced", "Warm", "Bright", "Punchy", "Reference Match"], |
| | label="Mastering Preset", value="Balanced" |
| | ) |
| | master_btn = gr.Button("MASTER", variant="primary") |
| |
|
| | with gr.Column(): |
| | master_output = gr.Audio(label="Mastered Output") |
| | master_status = gr.Textbox(label="Analysis Report", lines=10, interactive=False) |
| |
|
| | |
| | with gr.Tab("CATALOG"): |
| | gr.Markdown("### Your Music Library - All processed and generated audio") |
| |
|
| | cat_refresh = gr.Button("Refresh Catalog") |
| | cat_table = gr.Dataframe( |
| | headers=["Name", "Type", "Key", "BPM", "Date"], |
| | label="Your Music", |
| | interactive=False |
| | ) |
| |
|
| | |
| | with gr.Tab("SESSIONS"): |
| | gr.Markdown("### Setlist Management & Teleprompter") |
| |
|
| | with gr.Row(): |
| | with gr.Column(): |
| | sess_name = gr.Textbox(label="Session/Setlist Name") |
| | sess_songs = gr.Textbox(label="Songs (one per line)", lines=12, |
| | placeholder="Song 1\nSong 2\nSong 3...") |
| | sess_save = gr.Button("Save Session") |
| |
|
| | with gr.Column(): |
| | gr.Markdown("### Teleprompter Preview") |
| | sess_display = gr.HTML(""" |
| | <div style="background:#0D0D0D;padding:30px;border:2px solid #FF6B4A;border-radius:12px;min-height:350px;font-family:monospace;"> |
| | <p style="color:#666;text-align:center;font-size:1.2em;">Load a session to display teleprompter</p> |
| | </div> |
| | """) |
| |
|
| | |
| | gr.HTML(f""" |
| | <div class="vynl-footer"> |
| | <p><strong>VYNL v{VERSION} - Complete Music Production Suite</strong></p> |
| | <p>Copyright (c) 2024-2026 Robert T. Lackey. All rights reserved.</p> |
| | <p style="margin-top:8px;">Stone and Lantern Music Group | rtlackey@icloud.com</p> |
| | <p style="margin-top:4px;font-size:0.85em;color:#666;"> |
| | This software is proprietary. Unauthorized distribution is prohibited. |
| | </p> |
| | </div> |
| | """) |
| |
|
| | |
| |
|
| | |
| | login_btn.click(login_user, [login_email, login_pass], [auth_msg, status_display, current_user]) |
| | reg_btn.click(register_user, [login_email, login_pass, login_email], [auth_msg, status_display, current_user]) |
| | lic_btn.click(activate_license, [current_user, lic_key], [lic_msg, status_display]) |
| |
|
| | |
| | proc_btn.click( |
| | process_song, |
| | [proc_audio, proc_yt, proc_name, proc_lyrics, proc_stems, proc_chords, proc_daw, current_user], |
| | [proc_log, proc_output, status_display] |
| | ) |
| |
|
| | |
| | package_btn.click( |
| | create_song_package, |
| | [proc_audio, proc_yt, proc_name, proc_lyrics, current_user], |
| | [proc_log, proc_output, status_display] |
| | ) |
| |
|
| | |
| | gen_btn.click( |
| | generate_ai_music, |
| | [gen_prompt, gen_genre, gen_mood, gen_key, gen_bpm, gen_time, gen_duration, gen_instruments, gen_temp, current_user], |
| | [gen_output, gen_status] |
| | ) |
| |
|
| | |
| | voice_btn.click( |
| | apply_voice_clone, |
| | [voice_audio, voice_model, voice_pitch, current_user], |
| | [voice_output, voice_status] |
| | ) |
| |
|
| | |
| | train_btn.click( |
| | train_custom_voice, |
| | [train_name, train_desc, train_files, train_epochs, current_user], |
| | [train_status] |
| | ) |
| |
|
| | |
| | def master_track(input_audio, ref_audio, lufs, preset, user): |
| | if not input_audio: |
| | return None, "Please provide audio to master" |
| | try: |
| | output, analysis = master_audio(input_audio, ref_audio, lufs, preset) |
| | return output, format_analysis(analysis) if analysis else "Mastering complete" |
| | except Exception as e: |
| | return None, f"Mastering error: {str(e)}" |
| |
|
| | master_btn.click( |
| | master_track, |
| | [master_input, master_ref, master_lufs, master_preset, current_user], |
| | [master_output, master_status] |
| | ) |
| |
|
| | |
| | cat_refresh.click(get_catalog_list, [current_user], [cat_table]) |
| |
|
| | |
| | |
| | |
| |
|
| | if __name__ == "__main__": |
| | print(f"\n{'='*60}") |
| | print(f"VYNL v{VERSION} - HuggingFace Edition") |
| | print(f"Copyright (c) 2024-2026 Robert T. Lackey") |
| | print(f"{'='*60}") |
| | print(f"\nSystem Status:") |
| | print(f" AudioCraft: {'Available' if HAS_AUDIOCRAFT else 'Not available'}") |
| | print(f" RVC: {'Available' if HAS_RVC else 'Not available'}") |
| | print(f" Librosa: {'Available' if HAS_LIBROSA else 'Not installed'}") |
| | print(f" yt-dlp: {'Available' if HAS_YTDLP else 'Not installed'}") |
| | print(f" Catalog: {'Available' if HAS_CATALOG else 'Not available'}") |
| | print(f"\nStarting server at http://localhost:7860") |
| | print(f"{'='*60}\n") |
| |
|
| | demo.launch(server_name="0.0.0.0", server_port=7860) |
| |
|