diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..ca5e3f268b9f9dd4c03963d2ae1754d70e21fc8f --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +venv/ +.idea/ \ No newline at end of file diff --git a/README2.md b/README2.md new file mode 100644 index 0000000000000000000000000000000000000000..b958b95f51977dbe0a1a7d20f48fff84fd74a6ec --- /dev/null +++ b/README2.md @@ -0,0 +1,256 @@ +# Ultimate Vocal Remover GUI v5.6 + + +[![Release](https://img.shields.io/github/release/anjok07/ultimatevocalremovergui.svg)](https://github.com/anjok07/ultimatevocalremovergui/releases/latest) +[![Downloads](https://img.shields.io/github/downloads/anjok07/ultimatevocalremovergui/total.svg)](https://github.com/anjok07/ultimatevocalremovergui/releases) + +## About + +This application uses state-of-the-art source separation models to remove vocals from audio files. UVR's core developers trained all of the models provided in this package (except for the Demucs v3 and v4 4-stem models). + +- **Core Developers** + - [Anjok07](https://github.com/anjok07) + - [aufr33](https://github.com/aufr33) + +- **Support the Project** + - [Donate](https://www.buymeacoffee.com/uvr5) + +## Installation + +These bundles contain the UVR interface, Python, PyTorch, and other dependencies needed to run the application effectively. No prerequisites are required. + +### Windows Installation + +- Please Note: + - This installer is intended for those running Windows 10 or higher. + - Application functionality for systems running Windows 7 or lower is not guaranteed. + - Application functionality for Intel Pentium & Celeron CPUs systems is not guaranteed. + - You must install UVR to the main C:\ drive. Installing UVR to a secondary drive will cause instability. + +- Download the UVR installer for Windows via the link below: + - [Main Download Link](https://github.com/Anjok07/ultimatevocalremovergui/releases/download/v5.6/UVR_v5.6.0_setup.exe) + - [Main Download Link mirror](https://www.mediafire.com/file_premium/jiatpgp0ljou52p/UVR_v5.6.0_setup.exe/file) +- If you use an **AMD Radeon or Intel Arc graphics card**, you can try the OpenCL version: + - [OpenCL Version - Main Download Link](https://github.com/Anjok07/ultimatevocalremovergui/releases/download/v5.6/UVR_v5.6.0_setup_opencl.exe) +- Update Package instructions for those who have UVR already installed: + - If you already have UVR installed you can install this package over it or download it straight from the application or [click here for the patch](https://github.com/Anjok07/ultimatevocalremovergui/releases/download/v5.6/UVR_Patch_10_6_23_4_27.exe). + +
+ Windows Manual Installation + +### Manual Windows Installation + +- Download and extract the repository [here](https://github.com/Anjok07/ultimatevocalremovergui/archive/refs/heads/master.zip) +- Download and install Python [here](https://www.python.org/ftp/python/3.9.8/python-3.9.8-amd64.exe) + - Make sure to check "Add python.exe to PATH" during the install +- Run the following commands from the extracted repo directory: + +``` +python.exe -m pip install -r requirements.txt +``` + +If you have a compatible Nvidia GPU, run the following command: + +``` +python.exe -m pip install --upgrade torch --extra-index-url https://download.pytorch.org/whl/cu117 +``` + +If you do not have FFmpeg or Rubber Band installed and want to avoid going through the process of installing them the long way, follow the instructions below. + +**FFmpeg Installation** + +- Download the precompiled build [here](https://www.gyan.dev/ffmpeg/builds/ffmpeg-release-essentials.zip) +- From the archive, extract the following file to the UVR application directory: + - ```ffmpeg-5.1.2-essentials_build/bin/ffmpeg.exe``` + +**Rubber Band Installation** + +In order to use the Time Stretch or Change Pitch tool, you'll need Rubber Band. + +- Download the precompiled build [here](https://breakfastquay.com/files/releases/rubberband-3.1.2-gpl-executable-windows.zip) +- From the archive, extract the following files to the UVR application directory: + - ```rubberband-3.1.2-gpl-executable-windows/rubberband.exe``` + - ```rubberband-3.1.2-gpl-executable-windows/sndfile.dll``` + +
+ +### MacOS Installation +- Please Note: + - The MacOS Sonoma mouse clicking issue has been fixed. + - MPS (GPU) acceleration for Mac M1 has been expanded to work with Demucs v4 and all MDX-Net models. + - This bundle is intended for those running macOS Big Sur and above. + - Application functionality for systems running macOS Catalina or lower is not guaranteed. + - Application functionality for older or budget Mac systems is not guaranteed. + - Once everything is installed, the application may take up to 5-10 minutes to start for the first time (depending on your Macbook). + +- Download the UVR dmg for MacOS via one of the links below: + - Mac M1 (arm64) users: + - [Main Download Link](https://github.com/Anjok07/ultimatevocalremovergui/releases/download/v5.6/Ultimate_Vocal_Remover_v5_6_MacOS_arm64.dmg) + - [Main Download Link mirror](https://www.mediafire.com/file_premium/u3rk54wsqadpy93/Ultimate_Vocal_Remover_v5_6_MacOS_arm64.dmg/file) + + - Mac Intel (x86_64) users: + - [Main Download Link](https://github.com/Anjok07/ultimatevocalremovergui/releases/download/v5.6/Ultimate_Vocal_Remover_v5_6_MacOS_x86_64.dmg) + - [Main Download Link mirror](https://www.mediafire.com/file_premium/2gf1werx5ly5ylz/Ultimate_Vocal_Remover_v5_6_MacOS_x86_64.dmg/file) + +
+ MacOS Users: Having Trouble Opening UVR? + +> Due to Apples strict application security, you may need to follow these steps to open UVR. +> +> First, run the following command via Terminal.app to allow applications to run from all sources (it's recommended that you re-enable this once UVR opens properly.) +> +> ```bash +> sudo spctl --master-disable +> ``` +> +> Second, run the following command to bypass Notarization: +> +> ```bash +> sudo xattr -rd com.apple.quarantine /Applications/Ultimate\ Vocal\ Remover.app +> ``` + +
+ +
+ Manual MacOS Installation + +### Manual MacOS Installation + +- Download and save this repository [here](https://github.com/Anjok07/ultimatevocalremovergui/archive/refs/heads/master.zip) +- Download and install Python 3.10 [here](https://www.python.org/ftp/python/3.10.9/python-3.10.9-macos11.pkg) +- From the saved directory run the following - + +``` +pip3 install -r requirements.txt +``` + +- If your Mac is running with an M1, please run the following command next. If not, skip this step. - + +``` +cp /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/_soundfile_data/libsndfile_arm64.dylib /Library/Frameworks/Python.framework/Versions/3.10/lib/python3.10/site-packages/_soundfile_data/libsndfile.dylib +``` + +**FFmpeg Installation** + +- Once everything is done installing, download the correct FFmpeg binary for your system [here](http://www.osxexperts.net) and place it into the main application directory. + +**Rubber Band Installation** + +In order to use the Time Stretch or Change Pitch tool, you'll need Rubber Band. + +- Download the precompiled build [here](https://breakfastquay.com/files/releases/rubberband-3.1.2-gpl-executable-windows.zip) +- From the archive, extract the following files to the UVR/lib_v5 application directory: + - ```rubberband-3.1.2-gpl-executable-macos/rubberband``` + +This process has been tested on a MacBook Pro 2021 (using M1) and a MacBook Air 2017 and is confirmed to be working on both. + +
+ +### Linux Installation + +
+ See Linux Installation Instructions + +
+ +**These install instructions are for Debian & Arch based Linux systems.** + +- Download and save this repository [here](https://github.com/Anjok07/ultimatevocalremovergui/archive/refs/heads/master.zip) +- From the saved directory run the following commands in this order- + +**For Debian Based (Ubuntu, Mint, etc.):** +``` +sudo apt update && sudo apt upgrade +sudo apt-get update +sudo apt install ffmpeg +sudo apt install python3-pip +sudo apt-get -y install python3-tk +pip3 install -r requirements.txt +python3 UVR.py +``` + +**For Arch Based (EndeavourOS):** +``` +sudo pacman -Syu +sudo pacman -Sy +sudo pacman -S python-pip +sudo pacman -S --noconfirm tk +sudo pacman -S ffmpeg +``` + +To bypass environment setup and proceed with the installation, use: + +- Take caution; this modifies system files. + +``` +sudo rm /usr/lib/python3.11/EXTERNALLY-MANAGED +``` + +Then proceed with the following in order: + +``` +chmod +x install_packages.sh +./install_packages.sh +python UVR.py +``` + +
+ +### Other Application Notes +- Nvidia RTX 1060 6GB is the minimum requirement for GPU conversions. +- Nvidia GPUs with at least 8GBs of V-RAM are recommended. +- AMD Radeon GPU supported is limited at this time. + - There is currently a working branch for AMD GPU users [here](https://github.com/Anjok07/ultimatevocalremovergui/tree/v5.6-amd-gpu) +- This application is only compatible with 64-bit platforms. +- This application relies on the Rubber Band library for the Time-Stretch and Pitch-Shift options. +- This application relies on FFmpeg to process non-wav audio files. +- The application will automatically remember your settings when closed. +- Conversion times will significantly depend on your hardware. +- These models are computationally intensive. + +### Performance: +- Model load times are faster. +- Importing/exporting audio files is faster. + +## Troubleshooting + +### Common Issues + +- If FFmpeg is not installed, the application will throw an error if the user attempts to convert a non-WAV file. +- Memory allocation errors can usually be resolved by lowering the "Segment" or "Window" sizes. + +#### MacOS Sonoma Left-click Bug +There's a known issue on MacOS Sonoma where left-clicks aren't registering correctly within the app. This was impacting all applications built with Tkinter on Sonoma and has since been resolved. Please download the latest version via the following link if you are still experiencing issues - [link](https://github.com/Anjok07/ultimatevocalremovergui/releases/tag/v5.6) + +This issue was being tracked [here](https://github.com/Anjok07/ultimatevocalremovergui/issues/840). + +### Issue Reporting + +Please be as detailed as possible when posting a new issue. + +If possible, click the "Settings Button" to the left of the "Start Processing" button and click the "Error Log" button for detailed error information that can be provided to us. + +## License + +The **Ultimate Vocal Remover GUI** code is [MIT-licensed](LICENSE). + +- **Please Note:** For all third-party application developers who wish to use our models, please honor the MIT license by providing credit to UVR and its developers. + +## Credits +- [ZFTurbo](https://github.com/ZFTurbo) - Created & trained the weights for the new MDX23C models. +- [DilanBoskan](https://github.com/DilanBoskan) - Your contributions at the start of this project were essential to the success of UVR. Thank you! +- [Bas Curtiz](https://www.youtube.com/user/bascurtiz) - Designed the official UVR logo, icon, banner, and splash screen. +- [tsurumeso](https://github.com/tsurumeso) - Developed the original VR Architecture code. +- [Kuielab & Woosung Choi](https://github.com/kuielab) - Developed the original MDX-Net AI code. +- [Adefossez & Demucs](https://github.com/facebookresearch/demucs) - Developed the original Demucs AI code. +- [KimberleyJSN](https://github.com/KimberleyJensen) - Advised and aided the implementation of the training scripts for MDX-Net and Demucs. Thank you! +- [Hv](https://github.com/NaJeongMo/Colab-for-MDX_B) - Helped implement chunks into the MDX-Net AI code. Thank you! + +## Contributing + +- For anyone interested in the ongoing development of **Ultimate Vocal Remover GUI**, please send us a pull request, and we will review it. +- This project is 100% open-source and free for anyone to use and modify as they wish. +- We only maintain the development and support for the **Ultimate Vocal Remover GUI** and the models provided. + +## References +- [1] Takahashi et al., "Multi-scale Multi-band DenseNets for Audio Source Separation", https://arxiv.org/pdf/1706.09588.pdf diff --git a/UVR.py b/UVR.py new file mode 100644 index 0000000000000000000000000000000000000000..3d32253b8a598af1b326d8803ac7335c57ec708b --- /dev/null +++ b/UVR.py @@ -0,0 +1,7268 @@ +# GUI modules +import time +#start_time = time.time() +import audioread +import gui_data.sv_ttk +import hashlib +import json +import librosa +import math +import natsort +import os +import pickle +import psutil +from pyglet import font as pyglet_font +import pyperclip +import base64 +import queue +import shutil +import subprocess +import soundfile as sf +import torch +import urllib.request +import webbrowser +import wget +import traceback +import matchering as match +import tkinter as tk +import tkinter.ttk as ttk +from tkinter.font import Font +from tkinter import filedialog +from tkinter import messagebox +from collections import Counter +from __version__ import VERSION, PATCH, PATCH_MAC, PATCH_LINUX +from cryptography.fernet import Fernet +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC +from datetime import datetime +from gui_data.constants import * +from gui_data.app_size_values import * +from gui_data.error_handling import error_text, error_dialouge +from gui_data.old_data_check import file_check, remove_unneeded_yamls, remove_temps +from gui_data.tkinterdnd2 import TkinterDnD, DND_FILES +from lib_v5.vr_network.model_param_init import ModelParameters +from kthread import KThread +from lib_v5 import spec_utils +from pathlib import Path +from separate import ( + SeperateDemucs, SeperateMDX, SeperateMDXC, SeperateVR, # Model-related + save_format, clear_gpu_cache, # Utility functions + cuda_available, mps_available, #directml_available, +) +from playsound import playsound +from typing import List +import onnx +import re +import sys +import yaml +from ml_collections import ConfigDict +from collections import Counter + +# if not is_macos: +# import torch_directml + +# is_choose_arch = cuda_available and directml_available +# is_opencl_only = not cuda_available and directml_available +# is_cuda_only = cuda_available and not directml_available + +is_gpu_available = cuda_available or mps_available# or directml_available + +# Change the current working directory to the directory +# this file sits in +if getattr(sys, 'frozen', False): + # If the application is run as a bundle, the PyInstaller bootloader + # extends the sys module by a flag frozen=True and sets the app + # path into variable _MEIPASS'. + BASE_PATH = sys._MEIPASS +else: + BASE_PATH = os.path.dirname(os.path.abspath(__file__)) + +os.chdir(BASE_PATH) # Change the current working directory to the base path + +SPLASH_DOC = os.path.join(BASE_PATH, 'tmp', 'splash.txt') + +if os.path.isfile(SPLASH_DOC): + os.remove(SPLASH_DOC) + +def get_execution_time(function, name): + start = time.time() + function() + end = time.time() + time_difference = end - start + print(f'{name} Execution Time: ', time_difference) + +PREVIOUS_PATCH_WIN = 'UVR_Patch_10_6_23_4_27' + +is_dnd_compatible = True +banner_placement = -2 + +if OPERATING_SYSTEM=="Darwin": + OPEN_FILE_func = lambda input_string:subprocess.Popen(["open", input_string]) + dnd_path_check = MAC_DND_CHECK + banner_placement = -8 + current_patch = PATCH_MAC + is_windows = False + is_macos = True + right_click_button = '' + application_extension = ".dmg" +elif OPERATING_SYSTEM=="Linux": + OPEN_FILE_func = lambda input_string:subprocess.Popen(["xdg-open", input_string]) + dnd_path_check = LINUX_DND_CHECK + current_patch = PATCH_LINUX + is_windows = False + is_macos = False + right_click_button = '' + application_extension = ".zip" +elif OPERATING_SYSTEM=="Windows": + OPEN_FILE_func = lambda input_string:os.startfile(input_string) + dnd_path_check = WINDOWS_DND_CHECK + current_patch = PATCH + is_windows = True + is_macos = False + right_click_button = '' + application_extension = ".exe" + +def right_click_release_linux(window, top_win=None): + if OPERATING_SYSTEM=="Linux": + root.bind('', lambda e:window.destroy()) + if top_win: + top_win.bind('', lambda e:window.destroy()) + +if not is_windows: + import ssl + ssl._create_default_https_context = ssl._create_unverified_context +else: + from ctypes import windll, wintypes + +def close_process(q:queue.Queue): + def close_splash(): + name = "UVR_Launcher.exe" + for process in psutil.process_iter(attrs=["name"]): + process_name = process.info.get("name") + + if process_name == name: + try: + process.terminate() + q.put(f"{name} terminated.") # Push message to queue + break + except psutil.NoSuchProcess as e: + q.put(f"Error terminating {name}: {e}") # Push error to queue + + try: + with open(SPLASH_DOC, 'w') as f: + f.write('1') + except: + print('No splash screen.') + + thread = KThread(target=close_splash) + thread.start() + +def save_data(data): + """ + Saves given data as a .pkl (pickle) file + + Paramters: + data(dict): + Dictionary containing all the necessary data to save + """ + # Open data file, create it if it does not exist + with open('data.pkl', 'wb') as data_file: + pickle.dump(data, data_file) + +def load_data() -> dict: + """ + Loads saved pkl file and returns the stored data + + Returns(dict): + Dictionary containing all the saved data + """ + try: + with open('data.pkl', 'rb') as data_file: # Open data file + data = pickle.load(data_file) + + return data + except (ValueError, FileNotFoundError): + # Data File is corrupted or not found so recreate it + + save_data(data=DEFAULT_DATA) + + return load_data() + +def load_model_hash_data(dictionary): + '''Get the model hash dictionary''' + with open(dictionary, 'r') as d: + return json.load(d) + +def font_checker(font_file): + chosen_font_name = None + chosen_font_file = None + + try: + if os.path.isfile(font_file): + with open(font_file, 'r') as d: + chosen_font = json.load(d) + + chosen_font_name = chosen_font["font_name"] + if chosen_font["font_file"]: + chosen_font_file = os.path.join(OTHER_FONT_PATH, chosen_font["font_file"]) + chosen_font_file = chosen_font_file if os.path.isfile(chosen_font_file) else None + except Exception as e: + print(e) + + chosen_font = chosen_font_name, chosen_font_file + + return chosen_font + +debugger = [] + +#--Constants-- +#Models +MODELS_DIR = os.path.join(BASE_PATH, 'models') +VR_MODELS_DIR = os.path.join(MODELS_DIR, 'VR_Models') +MDX_MODELS_DIR = os.path.join(MODELS_DIR, 'MDX_Net_Models') +DEMUCS_MODELS_DIR = os.path.join(MODELS_DIR, 'Demucs_Models') +DEMUCS_NEWER_REPO_DIR = os.path.join(DEMUCS_MODELS_DIR, 'v3_v4_repo') +MDX_MIXER_PATH = os.path.join(BASE_PATH, 'lib_v5', 'mixer.ckpt') + +#Cache & Parameters +VR_HASH_DIR = os.path.join(VR_MODELS_DIR, 'model_data') +VR_HASH_JSON = os.path.join(VR_MODELS_DIR, 'model_data', 'model_data.json') +MDX_HASH_DIR = os.path.join(MDX_MODELS_DIR, 'model_data') +MDX_HASH_JSON = os.path.join(MDX_HASH_DIR, 'model_data.json') +MDX_C_CONFIG_PATH = os.path.join(MDX_HASH_DIR, 'mdx_c_configs') + +DEMUCS_MODEL_NAME_SELECT = os.path.join(DEMUCS_MODELS_DIR, 'model_data', 'model_name_mapper.json') +MDX_MODEL_NAME_SELECT = os.path.join(MDX_MODELS_DIR, 'model_data', 'model_name_mapper.json') +ENSEMBLE_CACHE_DIR = os.path.join(BASE_PATH, 'gui_data', 'saved_ensembles') +SETTINGS_CACHE_DIR = os.path.join(BASE_PATH, 'gui_data', 'saved_settings') +VR_PARAM_DIR = os.path.join(BASE_PATH, 'lib_v5', 'vr_network', 'modelparams') +SAMPLE_CLIP_PATH = os.path.join(BASE_PATH, 'temp_sample_clips') +ENSEMBLE_TEMP_PATH = os.path.join(BASE_PATH, 'ensemble_temps') +DOWNLOAD_MODEL_CACHE = os.path.join(BASE_PATH, 'gui_data', 'model_manual_download.json') + +#CR Text +CR_TEXT = os.path.join(BASE_PATH, 'gui_data', 'cr_text.txt') + +#Style +ICON_IMG_PATH = os.path.join(BASE_PATH, 'gui_data', 'img', 'GUI-Icon.ico') +if not is_windows: + MAIN_ICON_IMG_PATH = os.path.join(BASE_PATH, 'gui_data', 'img', 'GUI-Icon.png') + +OWN_FONT_PATH = os.path.join(BASE_PATH, 'gui_data', 'own_font.json') + +MAIN_FONT_NAME = 'Montserrat' +SEC_FONT_NAME = 'Century Gothic' +FONT_PATH = os.path.join(BASE_PATH, 'gui_data', 'fonts', 'Montserrat', 'Montserrat.ttf')# +SEC_FONT_PATH = os.path.join(BASE_PATH, 'gui_data', 'fonts', 'centurygothic', 'GOTHIC.ttf')# +OTHER_FONT_PATH = os.path.join(BASE_PATH, 'gui_data', 'fonts', 'other')# + +FONT_MAPPER = {MAIN_FONT_NAME:FONT_PATH, + SEC_FONT_NAME:SEC_FONT_PATH} + +#Other +COMPLETE_CHIME = os.path.join(BASE_PATH, 'gui_data', 'complete_chime.wav') +FAIL_CHIME = os.path.join(BASE_PATH, 'gui_data', 'fail_chime.wav') +CHANGE_LOG = os.path.join(BASE_PATH, 'gui_data', 'change_log.txt') + +DENOISER_MODEL_PATH = os.path.join(VR_MODELS_DIR, 'UVR-DeNoise-Lite.pth') +DEVERBER_MODEL_PATH = os.path.join(VR_MODELS_DIR, 'UVR-DeEcho-DeReverb.pth') + +MODEL_DATA_URLS = [VR_MODEL_DATA_LINK, MDX_MODEL_DATA_LINK, MDX_MODEL_NAME_DATA_LINK, DEMUCS_MODEL_NAME_DATA_LINK] +MODEL_DATA_FILES = [VR_HASH_JSON, MDX_HASH_JSON, MDX_MODEL_NAME_SELECT, DEMUCS_MODEL_NAME_SELECT] + +file_check(os.path.join(MODELS_DIR, 'Main_Models'), VR_MODELS_DIR) +file_check(os.path.join(DEMUCS_MODELS_DIR, 'v3_repo'), DEMUCS_NEWER_REPO_DIR) +remove_unneeded_yamls(DEMUCS_MODELS_DIR) + +remove_temps(ENSEMBLE_TEMP_PATH) +remove_temps(SAMPLE_CLIP_PATH) +remove_temps(os.path.join(BASE_PATH, 'img')) + +if not os.path.isdir(ENSEMBLE_TEMP_PATH): + os.mkdir(ENSEMBLE_TEMP_PATH) + +if not os.path.isdir(SAMPLE_CLIP_PATH): + os.mkdir(SAMPLE_CLIP_PATH) + +model_hash_table = {} +data = load_data() + +def drop(event, accept_mode: str = 'files'): + path = event.data + if accept_mode == 'folder': + path = path.replace('{', '').replace('}', '') + if not os.path.isdir(path): + messagebox.showerror(parent=root, + title=INVALID_FOLDER_ERROR_TEXT[0], + message=INVALID_FOLDER_ERROR_TEXT[1]) + return + root.export_path_var.set(path) + elif accept_mode in ['files', FILE_1, FILE_2, FILE_1_LB, FILE_2_LB]: + path = path.replace("{", "").replace("}", "") + for dnd_file in dnd_path_check: + path = path.replace(f" {dnd_file}", f";{dnd_file}") + path = path.split(';') + path[-1] = path[-1].replace(';', '') + + if accept_mode == 'files': + root.inputPaths = tuple(path) + root.process_input_selections() + root.update_inputPaths() + elif accept_mode in [FILE_1, FILE_2]: + if len(path) == 2: + root.select_audiofile(path[0]) + root.select_audiofile(path[1], is_primary=False) + root.DualBatch_inputPaths = [] + root.check_dual_paths() + elif len(path) == 1: + if accept_mode == FILE_1: + root.select_audiofile(path[0]) + else: + root.select_audiofile(path[0], is_primary=False) + + elif accept_mode in [FILE_1_LB, FILE_2_LB]: + return path + else: + return + +class ModelData(): + def __init__(self, model_name: str, + selected_process_method=ENSEMBLE_MODE, + is_secondary_model=False, + primary_model_primary_stem=None, + is_primary_model_primary_stem_only=False, + is_primary_model_secondary_stem_only=False, + is_pre_proc_model=False, + is_dry_check=False, + is_change_def=False, + is_get_hash_dir_only=False, + is_vocal_split_model=False): + + device_set = root.device_set_var.get() + self.DENOISER_MODEL = DENOISER_MODEL_PATH + self.DEVERBER_MODEL = DEVERBER_MODEL_PATH + self.is_deverb_vocals = root.is_deverb_vocals_var.get() if os.path.isfile(DEVERBER_MODEL_PATH) else False + self.deverb_vocal_opt = DEVERB_MAPPER[root.deverb_vocal_opt_var.get()] + self.is_denoise_model = True if root.denoise_option_var.get() == DENOISE_M and os.path.isfile(DENOISER_MODEL_PATH) else False + self.is_gpu_conversion = 0 if root.is_gpu_conversion_var.get() else -1 + self.is_normalization = root.is_normalization_var.get()# + self.is_use_opencl = False#True if is_opencl_only else root.is_use_opencl_var.get() + self.is_primary_stem_only = root.is_primary_stem_only_var.get() + self.is_secondary_stem_only = root.is_secondary_stem_only_var.get() + self.is_denoise = True if not root.denoise_option_var.get() == DENOISE_NONE else False + self.is_mdx_c_seg_def = root.is_mdx_c_seg_def_var.get()# + self.mdx_batch_size = 1 if root.mdx_batch_size_var.get() == DEF_OPT else int(root.mdx_batch_size_var.get()) + self.mdxnet_stem_select = root.mdxnet_stems_var.get() + self.overlap = float(root.overlap_var.get()) if not root.overlap_var.get() == DEFAULT else 0.25 + self.overlap_mdx = float(root.overlap_mdx_var.get()) if not root.overlap_mdx_var.get() == DEFAULT else root.overlap_mdx_var.get() + self.overlap_mdx23 = int(float(root.overlap_mdx23_var.get())) + self.semitone_shift = float(root.semitone_shift_var.get()) + self.is_pitch_change = False if self.semitone_shift == 0 else True + self.is_match_frequency_pitch = root.is_match_frequency_pitch_var.get() + self.is_mdx_ckpt = False + self.is_mdx_c = False + self.is_mdx_combine_stems = root.is_mdx23_combine_stems_var.get()# + self.mdx_c_configs = None + self.mdx_model_stems = [] + self.mdx_dim_f_set = None + self.mdx_dim_t_set = None + self.mdx_stem_count = 1 + self.compensate = None + self.mdx_n_fft_scale_set = None + self.wav_type_set = root.wav_type_set# + self.device_set = device_set.split(':')[-1].strip() if ':' in device_set else device_set + self.mp3_bit_set = root.mp3_bit_set_var.get() + self.save_format = root.save_format_var.get() + self.is_invert_spec = root.is_invert_spec_var.get()# + self.is_mixer_mode = False# + self.demucs_stems = root.demucs_stems_var.get() + self.is_demucs_combine_stems = root.is_demucs_combine_stems_var.get() + self.demucs_source_list = [] + self.demucs_stem_count = 0 + self.mixer_path = MDX_MIXER_PATH + self.model_name = model_name + self.process_method = selected_process_method + self.model_status = False if self.model_name == CHOOSE_MODEL or self.model_name == NO_MODEL else True + self.primary_stem = None + self.secondary_stem = None + self.primary_stem_native = None + self.is_ensemble_mode = False + self.ensemble_primary_stem = None + self.ensemble_secondary_stem = None + self.primary_model_primary_stem = primary_model_primary_stem + self.is_secondary_model = True if is_vocal_split_model else is_secondary_model + self.secondary_model = None + self.secondary_model_scale = None + self.demucs_4_stem_added_count = 0 + self.is_demucs_4_stem_secondaries = False + self.is_4_stem_ensemble = False + self.pre_proc_model = None + self.pre_proc_model_activated = False + self.is_pre_proc_model = is_pre_proc_model + self.is_dry_check = is_dry_check + self.model_samplerate = 44100 + self.model_capacity = 32, 128 + self.is_vr_51_model = False + self.is_demucs_pre_proc_model_inst_mix = False + self.manual_download_Button = None + self.secondary_model_4_stem = [] + self.secondary_model_4_stem_scale = [] + self.secondary_model_4_stem_names = [] + self.secondary_model_4_stem_model_names_list = [] + self.all_models = [] + self.secondary_model_other = None + self.secondary_model_scale_other = None + self.secondary_model_bass = None + self.secondary_model_scale_bass = None + self.secondary_model_drums = None + self.secondary_model_scale_drums = None + self.is_multi_stem_ensemble = False + self.is_karaoke = False + self.is_bv_model = False + self.bv_model_rebalance = 0 + self.is_sec_bv_rebalance = False + self.is_change_def = is_change_def + self.model_hash_dir = None + self.is_get_hash_dir_only = is_get_hash_dir_only + self.is_secondary_model_activated = False + self.vocal_split_model = None + self.is_vocal_split_model = is_vocal_split_model + self.is_vocal_split_model_activated = False + self.is_save_inst_vocal_splitter = root.is_save_inst_set_vocal_splitter_var.get() + self.is_inst_only_voc_splitter = root.check_only_selection_stem(INST_STEM_ONLY) + self.is_save_vocal_only = root.check_only_selection_stem(IS_SAVE_VOC_ONLY) + + if selected_process_method == ENSEMBLE_MODE: + self.process_method, _, self.model_name = model_name.partition(ENSEMBLE_PARTITION) + self.model_and_process_tag = model_name + self.ensemble_primary_stem, self.ensemble_secondary_stem = root.return_ensemble_stems() + + is_not_secondary_or_pre_proc = not is_secondary_model and not is_pre_proc_model + self.is_ensemble_mode = is_not_secondary_or_pre_proc + + if root.ensemble_main_stem_var.get() == FOUR_STEM_ENSEMBLE: + self.is_4_stem_ensemble = self.is_ensemble_mode + elif root.ensemble_main_stem_var.get() == MULTI_STEM_ENSEMBLE and root.chosen_process_method_var.get() == ENSEMBLE_MODE: + self.is_multi_stem_ensemble = True + + is_not_vocal_stem = self.ensemble_primary_stem != VOCAL_STEM + self.pre_proc_model_activated = root.is_demucs_pre_proc_model_activate_var.get() if is_not_vocal_stem else False + + if self.process_method == VR_ARCH_TYPE: + self.is_secondary_model_activated = root.vr_is_secondary_model_activate_var.get() if not is_secondary_model else False + self.aggression_setting = float(int(root.aggression_setting_var.get())/100) + self.is_tta = root.is_tta_var.get() + self.is_post_process = root.is_post_process_var.get() + self.window_size = int(root.window_size_var.get()) + self.batch_size = 1 if root.batch_size_var.get() == DEF_OPT else int(root.batch_size_var.get()) + self.crop_size = int(root.crop_size_var.get()) + self.is_high_end_process = 'mirroring' if root.is_high_end_process_var.get() else 'None' + self.post_process_threshold = float(root.post_process_threshold_var.get()) + self.model_capacity = 32, 128 + self.model_path = os.path.join(VR_MODELS_DIR, f"{self.model_name}.pth") + self.get_model_hash() + if self.model_hash: + self.model_hash_dir = os.path.join(VR_HASH_DIR, f"{self.model_hash}.json") + if is_change_def: + self.model_data = self.change_model_data() + else: + self.model_data = self.get_model_data(VR_HASH_DIR, root.vr_hash_MAPPER) if not self.model_hash == WOOD_INST_MODEL_HASH else WOOD_INST_PARAMS + if self.model_data: + vr_model_param = os.path.join(VR_PARAM_DIR, "{}.json".format(self.model_data["vr_model_param"])) + self.primary_stem = self.model_data["primary_stem"] + self.secondary_stem = secondary_stem(self.primary_stem) + self.vr_model_param = ModelParameters(vr_model_param) + self.model_samplerate = self.vr_model_param.param['sr'] + self.primary_stem_native = self.primary_stem + if "nout" in self.model_data.keys() and "nout_lstm" in self.model_data.keys(): + self.model_capacity = self.model_data["nout"], self.model_data["nout_lstm"] + self.is_vr_51_model = True + self.check_if_karaokee_model() + + else: + self.model_status = False + + if self.process_method == MDX_ARCH_TYPE: + self.is_secondary_model_activated = root.mdx_is_secondary_model_activate_var.get() if not is_secondary_model else False + self.margin = int(root.margin_var.get()) + self.chunks = 0 + self.mdx_segment_size = int(root.mdx_segment_size_var.get()) + self.get_mdx_model_path() + self.get_model_hash() + if self.model_hash: + self.model_hash_dir = os.path.join(MDX_HASH_DIR, f"{self.model_hash}.json") + if is_change_def: + self.model_data = self.change_model_data() + else: + self.model_data = self.get_model_data(MDX_HASH_DIR, root.mdx_hash_MAPPER) + if self.model_data: + + if "config_yaml" in self.model_data: + self.is_mdx_c = True + config_path = os.path.join(MDX_C_CONFIG_PATH, self.model_data["config_yaml"]) + if os.path.isfile(config_path): + with open(config_path) as f: + config = ConfigDict(yaml.load(f, Loader=yaml.FullLoader)) + + self.mdx_c_configs = config + + if self.mdx_c_configs.training.target_instrument: + # Use target_instrument as the primary stem and set 4-stem ensemble to False + target = self.mdx_c_configs.training.target_instrument + self.mdx_model_stems = [target] + self.primary_stem = target + else: + # If no specific target_instrument, use all instruments in the training config + self.mdx_model_stems = self.mdx_c_configs.training.instruments + self.mdx_stem_count = len(self.mdx_model_stems) + + # Set primary stem based on stem count + if self.mdx_stem_count == 2: + self.primary_stem = self.mdx_model_stems[0] + else: + self.primary_stem = self.mdxnet_stem_select + + # Update mdxnet_stem_select based on ensemble mode + if self.is_ensemble_mode: + self.mdxnet_stem_select = self.ensemble_primary_stem + else: + self.model_status = False + else: + self.compensate = self.model_data["compensate"] if root.compensate_var.get() == AUTO_SELECT else float(root.compensate_var.get()) + self.mdx_dim_f_set = self.model_data["mdx_dim_f_set"] + self.mdx_dim_t_set = self.model_data["mdx_dim_t_set"] + self.mdx_n_fft_scale_set = self.model_data["mdx_n_fft_scale_set"] + self.primary_stem = self.model_data["primary_stem"] + self.primary_stem_native = self.model_data["primary_stem"] + self.check_if_karaokee_model() + + self.secondary_stem = secondary_stem(self.primary_stem) + else: + self.model_status = False + + if self.process_method == DEMUCS_ARCH_TYPE: + self.is_secondary_model_activated = root.demucs_is_secondary_model_activate_var.get() if not is_secondary_model else False + if not self.is_ensemble_mode: + self.pre_proc_model_activated = root.is_demucs_pre_proc_model_activate_var.get() if not root.demucs_stems_var.get() in [VOCAL_STEM, INST_STEM] else False + self.margin_demucs = int(root.margin_demucs_var.get()) + self.chunks_demucs = 0 + self.shifts = int(root.shifts_var.get()) + self.is_split_mode = root.is_split_mode_var.get() + self.segment = root.segment_var.get() + self.is_chunk_demucs = root.is_chunk_demucs_var.get() + self.is_primary_stem_only = root.is_primary_stem_only_var.get() if self.is_ensemble_mode else root.is_primary_stem_only_Demucs_var.get() + self.is_secondary_stem_only = root.is_secondary_stem_only_var.get() if self.is_ensemble_mode else root.is_secondary_stem_only_Demucs_var.get() + self.get_demucs_model_data() + self.get_demucs_model_path() + + if self.model_status: + self.model_basename = os.path.splitext(os.path.basename(self.model_path))[0] + else: + self.model_basename = None + + self.pre_proc_model_activated = self.pre_proc_model_activated if not self.is_secondary_model else False + + self.is_primary_model_primary_stem_only = is_primary_model_primary_stem_only + self.is_primary_model_secondary_stem_only = is_primary_model_secondary_stem_only + + is_secondary_activated_and_status = self.is_secondary_model_activated and self.model_status + is_demucs = self.process_method == DEMUCS_ARCH_TYPE + is_all_stems = root.demucs_stems_var.get() == ALL_STEMS + is_valid_ensemble = not self.is_ensemble_mode and is_all_stems and is_demucs + is_multi_stem_ensemble_demucs = self.is_multi_stem_ensemble and is_demucs + + if is_secondary_activated_and_status: + if is_valid_ensemble or self.is_4_stem_ensemble or is_multi_stem_ensemble_demucs: + for key in DEMUCS_4_SOURCE_LIST: + self.secondary_model_data(key) + self.secondary_model_4_stem.append(self.secondary_model) + self.secondary_model_4_stem_scale.append(self.secondary_model_scale) + self.secondary_model_4_stem_names.append(key) + + self.demucs_4_stem_added_count = sum(i is not None for i in self.secondary_model_4_stem) + self.is_secondary_model_activated = any(i is not None for i in self.secondary_model_4_stem) + self.demucs_4_stem_added_count -= 1 if self.is_secondary_model_activated else 0 + + if self.is_secondary_model_activated: + self.secondary_model_4_stem_model_names_list = [i.model_basename if i is not None else None for i in self.secondary_model_4_stem] + self.is_demucs_4_stem_secondaries = True + else: + primary_stem = self.ensemble_primary_stem if self.is_ensemble_mode and is_demucs else self.primary_stem + self.secondary_model_data(primary_stem) + + if self.process_method == DEMUCS_ARCH_TYPE and not is_secondary_model: + if self.demucs_stem_count >= 3 and self.pre_proc_model_activated: + self.pre_proc_model = root.process_determine_demucs_pre_proc_model(self.primary_stem) + self.pre_proc_model_activated = True if self.pre_proc_model else False + self.is_demucs_pre_proc_model_inst_mix = root.is_demucs_pre_proc_model_inst_mix_var.get() if self.pre_proc_model else False + + if self.is_vocal_split_model and self.model_status: + self.is_secondary_model_activated = False + if self.is_bv_model: + primary = BV_VOCAL_STEM if self.primary_stem_native == VOCAL_STEM else LEAD_VOCAL_STEM + else: + primary = LEAD_VOCAL_STEM if self.primary_stem_native == VOCAL_STEM else BV_VOCAL_STEM + self.primary_stem, self.secondary_stem = primary, secondary_stem(primary) + + self.vocal_splitter_model_data() + + def vocal_splitter_model_data(self): + if not self.is_secondary_model and self.model_status: + self.vocal_split_model = root.process_determine_vocal_split_model() + self.is_vocal_split_model_activated = True if self.vocal_split_model else False + + if self.vocal_split_model: + if self.vocal_split_model.bv_model_rebalance: + self.is_sec_bv_rebalance = True + + def secondary_model_data(self, primary_stem): + secondary_model_data = root.process_determine_secondary_model(self.process_method, primary_stem, self.is_primary_stem_only, self.is_secondary_stem_only) + self.secondary_model = secondary_model_data[0] + self.secondary_model_scale = secondary_model_data[1] + self.is_secondary_model_activated = False if not self.secondary_model else True + if self.secondary_model: + self.is_secondary_model_activated = False if self.secondary_model.model_basename == self.model_basename else True + + #print("self.is_secondary_model_activated: ", self.is_secondary_model_activated) + + def check_if_karaokee_model(self): + if IS_KARAOKEE in self.model_data.keys(): + self.is_karaoke = self.model_data[IS_KARAOKEE] + if IS_BV_MODEL in self.model_data.keys(): + self.is_bv_model = self.model_data[IS_BV_MODEL]# + if IS_BV_MODEL_REBAL in self.model_data.keys() and self.is_bv_model: + self.bv_model_rebalance = self.model_data[IS_BV_MODEL_REBAL]# + + def get_mdx_model_path(self): + + if self.model_name.endswith(CKPT): + self.is_mdx_ckpt = True + + ext = '' if self.is_mdx_ckpt else ONNX + + for file_name, chosen_mdx_model in root.mdx_name_select_MAPPER.items(): + if self.model_name in chosen_mdx_model: + if file_name.endswith(CKPT): + ext = '' + self.model_path = os.path.join(MDX_MODELS_DIR, f"{file_name}{ext}") + break + else: + self.model_path = os.path.join(MDX_MODELS_DIR, f"{self.model_name}{ext}") + + self.mixer_path = os.path.join(MDX_MODELS_DIR, f"mixer_val.ckpt") + + def get_demucs_model_path(self): + + demucs_newer = self.demucs_version in {DEMUCS_V3, DEMUCS_V4} + demucs_model_dir = DEMUCS_NEWER_REPO_DIR if demucs_newer else DEMUCS_MODELS_DIR + + for file_name, chosen_model in root.demucs_name_select_MAPPER.items(): + if self.model_name == chosen_model: + self.model_path = os.path.join(demucs_model_dir, file_name) + break + else: + self.model_path = os.path.join(DEMUCS_NEWER_REPO_DIR, f'{self.model_name}.yaml') + + def get_demucs_model_data(self): + + self.demucs_version = DEMUCS_V4 + + for key, value in DEMUCS_VERSION_MAPPER.items(): + if value in self.model_name: + self.demucs_version = key + + if DEMUCS_UVR_MODEL in self.model_name: + self.demucs_source_list, self.demucs_source_map, self.demucs_stem_count = DEMUCS_2_SOURCE, DEMUCS_2_SOURCE_MAPPER, 2 + else: + self.demucs_source_list, self.demucs_source_map, self.demucs_stem_count = DEMUCS_4_SOURCE, DEMUCS_4_SOURCE_MAPPER, 4 + + if not self.is_ensemble_mode: + self.primary_stem = PRIMARY_STEM if self.demucs_stems == ALL_STEMS else self.demucs_stems + self.secondary_stem = secondary_stem(self.primary_stem) + + def get_model_data(self, model_hash_dir, hash_mapper:dict): + model_settings_json = os.path.join(model_hash_dir, f"{self.model_hash}.json") + + if os.path.isfile(model_settings_json): + with open(model_settings_json, 'r') as json_file: + return json.load(json_file) + else: + for hash, settings in hash_mapper.items(): + if self.model_hash in hash: + return settings + + return self.get_model_data_from_popup() + + def change_model_data(self): + if self.is_get_hash_dir_only: + return None + else: + return self.get_model_data_from_popup() + + def get_model_data_from_popup(self): + if self.is_dry_check: + return None + + if not self.is_change_def: + confirm = messagebox.askyesno( + title=UNRECOGNIZED_MODEL[0], + message=f'"{self.model_name}"{UNRECOGNIZED_MODEL[1]}', + parent=root + ) + if not confirm: + return None + + if self.process_method == VR_ARCH_TYPE: + root.pop_up_vr_param(self.model_hash) + return root.vr_model_params + elif self.process_method == MDX_ARCH_TYPE: + root.pop_up_mdx_model(self.model_hash, self.model_path) + return root.mdx_model_params + + def get_model_hash(self): + self.model_hash = None + + if not os.path.isfile(self.model_path): + self.model_status = False + self.model_hash is None + else: + if model_hash_table: + for (key, value) in model_hash_table.items(): + if self.model_path == key: + self.model_hash = value + break + + if not self.model_hash: + try: + with open(self.model_path, 'rb') as f: + f.seek(- 10000 * 1024, 2) + self.model_hash = hashlib.md5(f.read()).hexdigest() + except: + self.model_hash = hashlib.md5(open(self.model_path,'rb').read()).hexdigest() + + table_entry = {self.model_path: self.model_hash} + model_hash_table.update(table_entry) + + #print(self.model_name," - ", self.model_hash) + +class Ensembler(): + def __init__(self, is_manual_ensemble=False): + self.is_save_all_outputs_ensemble = root.is_save_all_outputs_ensemble_var.get() + chosen_ensemble_name = '{}'.format(root.chosen_ensemble_var.get().replace(" ", "_")) if not root.chosen_ensemble_var.get() == CHOOSE_ENSEMBLE_OPTION else 'Ensembled' + ensemble_algorithm = root.ensemble_type_var.get().partition("/") + ensemble_main_stem_pair = root.ensemble_main_stem_var.get().partition("/") + time_stamp = round(time.time()) + self.audio_tool = MANUAL_ENSEMBLE + self.main_export_path = Path(root.export_path_var.get()) + self.chosen_ensemble = f"_{chosen_ensemble_name}" if root.is_append_ensemble_name_var.get() else '' + ensemble_folder_name = self.main_export_path if self.is_save_all_outputs_ensemble else ENSEMBLE_TEMP_PATH + self.ensemble_folder_name = os.path.join(ensemble_folder_name, '{}_Outputs_{}'.format(chosen_ensemble_name, time_stamp)) + self.is_testing_audio = f"{time_stamp}_" if root.is_testing_audio_var.get() else '' + self.primary_algorithm = ensemble_algorithm[0] + self.secondary_algorithm = ensemble_algorithm[2] + self.ensemble_primary_stem = ensemble_main_stem_pair[0] + self.ensemble_secondary_stem = ensemble_main_stem_pair[2] + self.is_normalization = root.is_normalization_var.get() + self.is_wav_ensemble = root.is_wav_ensemble_var.get() + self.wav_type_set = root.wav_type_set + self.mp3_bit_set = root.mp3_bit_set_var.get() + self.save_format = root.save_format_var.get() + if not is_manual_ensemble: + os.mkdir(self.ensemble_folder_name) + + def ensemble_outputs(self, audio_file_base, export_path, stem, is_4_stem=False, is_inst_mix=False): + """Processes the given outputs and ensembles them with the chosen algorithm""" + + if is_4_stem: + algorithm = root.ensemble_type_var.get() + stem_tag = stem + else: + if is_inst_mix: + algorithm = self.secondary_algorithm + stem_tag = f"{self.ensemble_secondary_stem} {INST_STEM}" + else: + algorithm = self.primary_algorithm if stem == PRIMARY_STEM else self.secondary_algorithm + stem_tag = self.ensemble_primary_stem if stem == PRIMARY_STEM else self.ensemble_secondary_stem + + stem_outputs = self.get_files_to_ensemble(folder=export_path, prefix=audio_file_base, suffix=f"_({stem_tag}).wav") + audio_file_output = f"{self.is_testing_audio}{audio_file_base}{self.chosen_ensemble}_({stem_tag})" + stem_save_path = os.path.join('{}'.format(self.main_export_path),'{}.wav'.format(audio_file_output)) + + #print("get_files_to_ensemble: ", stem_outputs) + + if len(stem_outputs) > 1: + spec_utils.ensemble_inputs(stem_outputs, algorithm, self.is_normalization, self.wav_type_set, stem_save_path, is_wave=self.is_wav_ensemble) + save_format(stem_save_path, self.save_format, self.mp3_bit_set) + + if self.is_save_all_outputs_ensemble: + for i in stem_outputs: + save_format(i, self.save_format, self.mp3_bit_set) + else: + for i in stem_outputs: + try: + os.remove(i) + except Exception as e: + print(e) + + def ensemble_manual(self, audio_inputs, audio_file_base, is_bulk=False): + """Processes the given outputs and ensembles them with the chosen algorithm""" + + is_mv_sep = True + + if is_bulk: + number_list = list(set([os.path.basename(i).split("_")[0] for i in audio_inputs])) + for n in number_list: + current_list = [i for i in audio_inputs if os.path.basename(i).startswith(n)] + audio_file_base = os.path.basename(current_list[0]).split('.wav')[0] + stem_testing = "instrum" if "Instrumental" in audio_file_base else "vocals" + if is_mv_sep: + audio_file_base = audio_file_base.split("_") + audio_file_base = f"{audio_file_base[1]}_{audio_file_base[2]}_{stem_testing}" + self.ensemble_manual_process(current_list, audio_file_base, is_bulk) + else: + self.ensemble_manual_process(audio_inputs, audio_file_base, is_bulk) + + def ensemble_manual_process(self, audio_inputs, audio_file_base, is_bulk): + + algorithm = root.choose_algorithm_var.get() + algorithm_text = "" if is_bulk else f"_({root.choose_algorithm_var.get()})" + stem_save_path = os.path.join('{}'.format(self.main_export_path),'{}{}{}.wav'.format(self.is_testing_audio, audio_file_base, algorithm_text)) + spec_utils.ensemble_inputs(audio_inputs, algorithm, self.is_normalization, self.wav_type_set, stem_save_path, is_wave=self.is_wav_ensemble) + save_format(stem_save_path, self.save_format, self.mp3_bit_set) + + def get_files_to_ensemble(self, folder="", prefix="", suffix=""): + """Grab all the files to be ensembled""" + + return [os.path.join(folder, i) for i in os.listdir(folder) if i.startswith(prefix) and i.endswith(suffix)] + + def combine_audio(self, audio_inputs, audio_file_base): + save_format_ = lambda save_path:save_format(save_path, root.save_format_var.get(), root.mp3_bit_set_var.get()) + spec_utils.combine_audio(audio_inputs, + os.path.join(self.main_export_path, f"{self.is_testing_audio}{audio_file_base}"), + self.wav_type_set, + save_format=save_format_) + +class AudioTools(): + def __init__(self, audio_tool): + time_stamp = round(time.time()) + self.audio_tool = audio_tool + self.main_export_path = Path(root.export_path_var.get()) + self.wav_type_set = root.wav_type_set + self.is_normalization = root.is_normalization_var.get() + self.is_testing_audio = f"{time_stamp}_" if root.is_testing_audio_var.get() else '' + self.save_format = lambda save_path:save_format(save_path, root.save_format_var.get(), root.mp3_bit_set_var.get()) + self.align_window = TIME_WINDOW_MAPPER[root.time_window_var.get()] + self.align_intro_val = INTRO_MAPPER[root.intro_analysis_var.get()] + self.db_analysis_val = VOLUME_MAPPER[root.db_analysis_var.get()] + self.is_save_align = root.is_save_align_var.get()# + self.is_match_silence = root.is_match_silence_var.get()# + self.is_spec_match = root.is_spec_match_var.get() + + self.phase_option = root.phase_option_var.get()# + self.phase_shifts = PHASE_SHIFTS_OPT[root.phase_shifts_var.get()] + + def align_inputs(self, audio_inputs, audio_file_base, audio_file_2_base, command_Text, set_progress_bar): + audio_file_base = f"{self.is_testing_audio}{audio_file_base}" + audio_file_2_base = f"{self.is_testing_audio}{audio_file_2_base}" + + aligned_path = os.path.join('{}'.format(self.main_export_path),'{}_(Aligned).wav'.format(audio_file_2_base)) + inverted_path = os.path.join('{}'.format(self.main_export_path),'{}_(Inverted).wav'.format(audio_file_base)) + + spec_utils.align_audio(audio_inputs[0], + audio_inputs[1], + aligned_path, + inverted_path, + self.wav_type_set, + self.is_save_align, + command_Text, + self.save_format, + align_window=self.align_window, + align_intro_val=self.align_intro_val, + db_analysis=self.db_analysis_val, + set_progress_bar=set_progress_bar, + phase_option=self.phase_option, + phase_shifts=self.phase_shifts, + is_match_silence=self.is_match_silence, + is_spec_match=self.is_spec_match) + + def match_inputs(self, audio_inputs, audio_file_base, command_Text): + + target = audio_inputs[0] + reference = audio_inputs[1] + + command_Text(f"Processing... ") + + save_path = os.path.join('{}'.format(self.main_export_path),'{}_(Matched).wav'.format(f"{self.is_testing_audio}{audio_file_base}")) + + match.process( + target=target, + reference=reference, + results=[match.save_audiofile(save_path, wav_set=self.wav_type_set), + ], + ) + + self.save_format(save_path) + + def combine_audio(self, audio_inputs, audio_file_base): + spec_utils.combine_audio(audio_inputs, + os.path.join(self.main_export_path, f"{self.is_testing_audio}{audio_file_base}"), + self.wav_type_set, + save_format=self.save_format) + + def pitch_or_time_shift(self, audio_file, audio_file_base): + is_time_correction = True + rate = float(root.time_stretch_rate_var.get()) if self.audio_tool == TIME_STRETCH else float(root.pitch_rate_var.get()) + is_pitch = False if self.audio_tool == TIME_STRETCH else True + if is_pitch: + is_time_correction = True if root.is_time_correction_var.get() else False + file_text = TIME_TEXT if self.audio_tool == TIME_STRETCH else PITCH_TEXT + save_path = os.path.join(self.main_export_path, f"{self.is_testing_audio}{audio_file_base}{file_text}.wav") + spec_utils.augment_audio(save_path, audio_file, rate, self.is_normalization, self.wav_type_set, self.save_format, is_pitch=is_pitch, is_time_correction=is_time_correction) + +class ToolTip(object): + + def __init__(self, widget): + self.widget = widget + self.tooltip = None + + def showtip(self, text, is_message_box=False, is_success_message=None):# + self.hidetip() + def create_label_config(): + + font_size = FONT_SIZE_3 if is_message_box else FONT_SIZE_2 + + """Helper function to generate label configurations.""" + common_config = { + "text": text, + "relief": tk.SOLID, + "borderwidth": 1, + "font": (MAIN_FONT_NAME, f"{font_size}", "normal") + } + if is_message_box: + background_color = "#03692d" if is_success_message else "#8B0000" + return {**common_config, "background": background_color, "foreground": "#ffffff"} + else: + return {**common_config, "background": "#1C1C1C", "foreground": "#ffffff", + "highlightcolor": "#898b8e", "justify": tk.LEFT} + + if is_message_box: + temp_tooltip = tk.Toplevel(self.widget) + temp_tooltip.wm_overrideredirect(True) + temp_tooltip.withdraw() + label = tk.Label(temp_tooltip, **create_label_config()) + label.pack() + temp_tooltip.update() if is_windows else temp_tooltip.update_idletasks() + + x = self.widget.winfo_rootx() + (self.widget.winfo_width() // 2) - (temp_tooltip.winfo_reqwidth() // 2) + y = self.widget.winfo_rooty() + self.widget.winfo_height() + + temp_tooltip.destroy() + else: + x, y, _, _ = self.widget.bbox("insert") + x += self.widget.winfo_rootx() + 25 + y += self.widget.winfo_rooty() + 25 + + # Create the actual tooltip + self.tooltip = tk.Toplevel(self.widget) + self.tooltip.wm_overrideredirect(True) + self.tooltip.wm_geometry(f"+{x}+{y}") + + label_config = create_label_config() + if not is_message_box: + label_config['padx'] = 10 # horizontal padding + label_config['pady'] = 10 # vertical padding + label_config["wraplength"] = 750 + label = tk.Label(self.tooltip, **label_config) + + label.pack() + + if is_message_box: + self.tooltip.after(3000 if type(is_success_message) is bool else 2000, self.hidetip) + + def hidetip(self): + if self.tooltip: + self.tooltip.destroy() + self.tooltip = None + +class ListboxBatchFrame(tk.Frame): + def __init__(self, master=None, name="Listbox", command=None, image_sel=None, img_mapper=None): + super().__init__(master) + self.master = master + + self.path_list = [] # A list to keep track of the paths + self.basename_to_path = {} # A dict to map basenames to paths + + self.label = tk.Label(self, text=name, font=(MAIN_FONT_NAME, f"{FONT_SIZE_5}"), foreground=FG_COLOR) + self.label.pack(pady=(10, 8)) # add padding between label and listbox + + self.input_button = ttk.Button(self, text=SELECT_INPUTS, command=self.select_input) # create button for selecting files + self.input_button.pack(pady=(0, 10)) # add padding between button and next widget + + self.listbox = tk.Listbox(self, activestyle='dotbox', font=(MAIN_FONT_NAME, f"{FONT_SIZE_4}"), foreground='#cdd3ce', background='#101414', exportselection=0, width=70, height=15) + self.listbox.pack(fill="both", expand=True) + + self.button_frame = tk.Frame(self) + self.button_frame.pack() + + self.up_button = ttk.Button(self.button_frame, image=img_mapper["up"], command=self.move_up) + self.up_button.grid(row=0, column=0) + + self.down_button = ttk.Button(self.button_frame, image=img_mapper["down"], command=self.move_down) + self.down_button.grid(row=0, column=1) + + if command and image_sel: + self.move_button = ttk.Button(self.button_frame, image=image_sel, command=command) + self.move_button.grid(row=0, column=2) + + self.duplicate_button = ttk.Button(self.button_frame, image=img_mapper["copy"], command=self.duplicate_selected) + self.duplicate_button.grid(row=0, column=3) + + self.delete_button = ttk.Button(self.button_frame, image=img_mapper["clear"], command=self.delete_selected) + self.delete_button.grid(row=0, column=4) + + def delete_selected(self): + selected = self.listbox.curselection() + if selected: + basename = self.listbox.get(selected[0]).split(": ", 1)[1] # We get the actual basename here, without the index + path_to_delete = self.basename_to_path[basename] # store the path to delete + del self.basename_to_path[basename] # delete from the dict + self.path_list.remove(path_to_delete) # delete from the list + self.listbox.delete(selected) + self.update_displayed_index() + + def select_input(self, inputs=None): + files = inputs if inputs else root.show_file_dialog(dialoge_type=MULTIPLE_FILE) + for file in files: + if file not in self.path_list: # only add file if it's not already in the list + basename = os.path.basename(file) + self.listbox.insert(tk.END, basename) # insert basename to the listbox + self.path_list.append(file) # append the file path to the list + self.basename_to_path[basename] = file # add to the dict + self.update_displayed_index(is_acc_dupe=False) + + def duplicate_selected(self): + selected = self.listbox.curselection() + if selected: + basename = self.listbox.get(selected[0]).split(": ", 1)[1] # We get the actual basename here, without the index + path_to_duplicate = self.basename_to_path[basename] # store the path to duplicate + self.path_list.append(path_to_duplicate) # add the duplicated path to the list + self.update_displayed_index() # redraw listbox with the duplicated item + + def update_displayed_index(self, inputs=None, is_acc_dupe=True): + self.basename_to_path = {} # reset the dictionary + + if inputs: + self.path_list = inputs + + basename_count = Counter(self.path_list) # count occurrences of each path + + for i in range(len(self.path_list)): + basename = os.path.basename(self.path_list[i]) + + # If the path is not unique or we are adding a duplicate + if basename_count[self.path_list[i]] > 1 and is_acc_dupe: + j = 1 + new_basename = f"{basename} ({j})" + while new_basename in self.basename_to_path: + j += 1 + new_basename = f"{basename} ({j})" + basename = new_basename + + self.basename_to_path[basename] = self.path_list[i] # update the dict with the new order + self.listbox.delete(i) + self.listbox.insert(i, f"{i + 1}: {basename}") + + def move_up(self): + selected = self.listbox.curselection() + if selected and selected[0] > 0: + # Swap items in path_list + self.path_list[selected[0] - 1], self.path_list[selected[0]] = self.path_list[selected[0]], self.path_list[selected[0] - 1] + # Redraw listbox + self.update_displayed_index() + # Reselect item + self.listbox.select_set(selected[0] - 1) + + def move_down(self): + selected = self.listbox.curselection() + if selected and selected[0] < self.listbox.size() - 1: + # Swap items in path_list + self.path_list[selected[0] + 1], self.path_list[selected[0]] = self.path_list[selected[0]], self.path_list[selected[0] + 1] + # Redraw listbox + self.update_displayed_index() + # Reselect item + self.listbox.select_set(selected[0] + 1) + + def get_selected_path(self): + """Returns the path associated with the selected entry.""" + selected = self.listbox.curselection() + if selected: + basename = self.listbox.get(selected[0]).split(": ", 1)[1] # We get the actual basename here, without the index + path = self.basename_to_path[basename] # get the path associated with the basename + return path + return None + +class ComboBoxEditableMenu(ttk.Combobox): + def __init__(self, master=None, pattern=None, default=None, width=None, is_stay_disabled=False, **kw): + + if 'values' in kw: + kw['values'] = tuple(kw['values']) + (OPT_SEPARATOR, USER_INPUT) + else: + kw['values'] = (USER_INPUT) + + super().__init__(master, **kw) + + self.textvariable = kw.get('textvariable', tk.StringVar()) + self.pattern = pattern + self.test = 1 + self.tooltip = ToolTip(self) + self.is_user_input_var = tk.BooleanVar(value=False) + self.is_stay_disabled = is_stay_disabled + + if isinstance(default, (str, int)): + self.default = default + else: + self.default = default[0] + + self.menu_combobox_configure() + self.var_validation(is_start_up=True) + + if width: + self.configure(width=width) + + def menu_combobox_configure(self): + self.bind('<>', self.check_input) + self.bind('', lambda e:self.focus()) + self.bind('', self.focusin) + self.bind('', lambda e: self.var_validation(is_focus_only=True)) + + if is_macos: + self.bind('', lambda e:self.button_released()) + + if not self.is_stay_disabled: + self.configure(state=READ_ONLY) + + def check_input(self, event=None): + if self.textvariable.get() == USER_INPUT: + self.textvariable.set('') + self.configure(state=tk.NORMAL) + self.focus() + self.selection_range(0, 0) + else: + self.var_validation() + + def var_validation(self, is_focus_only=False, is_start_up=False): + if is_focus_only and not self.is_stay_disabled: + self.configure(state=READ_ONLY) + + if re.fullmatch(self.pattern, self.textvariable.get()) is None: + if not is_start_up and not self.textvariable.get() in (OPT_SEPARATOR, USER_INPUT): + self.tooltip.showtip(INVALID_INPUT_E, True) + + self.textvariable.set(self.default) + + def button_released(self, e=None): + self.event_generate('') + self.event_generate('') + + def focusin(self, e): + self.selection_clear() + if is_macos: + self.event_generate('') + +class ComboBoxMenu(ttk.Combobox): + def __init__(self, master=None, dropdown_name=None, offset=185, is_download_menu=False, command=None, width=None, **kw): + super().__init__(master, **kw) + + # Configure the combobox using the menu_combobox_configure method + self.menu_combobox_configure(is_download_menu, width=width) + + # Check if both dropdown_name and 'values' are provided to update dropdown size + if dropdown_name and 'values' in kw: + self.update_dropdown_size(kw['values'], dropdown_name, offset) + + if command: + self.command(command) + + def menu_combobox_configure(self, is_download_menu=False, command=None, width=None): + self.bind('', self.focusin) + self.bind('', lambda e:"break") + + if is_macos: + self.bind('', lambda e:self.button_released()) + + if not is_download_menu: + self.configure(state=READ_ONLY) + + if command: + self.command(command) + + if width: + self.configure(width=width) + + def button_released(self, e=None): + self.event_generate('') + self.event_generate('') + + def command(self, command): + if not self.bind('<>'): + self.bind('<>', command) + + def focusin(self, e): + self.selection_clear() + if is_macos: + self.event_generate('') + + def update_dropdown_size(self, option_list, dropdown_name, offset=185, command=None): + dropdown_style = f"{dropdown_name}.TCombobox" + if option_list: + max_string = max(option_list, key=len) + font = Font(font=self.cget('font')) + width_in_pixels = font.measure(max_string) - offset + width_in_pixels = 0 if width_in_pixels < 0 else width_in_pixels + else: + width_in_pixels = 0 + + style = ttk.Style(self) + style.configure(dropdown_style, padding=(0, 0, 0, 0), postoffset=(0, 0, width_in_pixels, 0)) + self.configure(style=dropdown_style) + + if command: + self.command(command) + +class ThreadSafeConsole(tk.Text): + """ + Text Widget which is thread safe for tkinter + """ + + def __init__(self, master, **options): + tk.Text.__init__(self, master, **options) + self.queue = queue.Queue() + self.update_me() + + def write(self, line): + self.queue.put(line) + + def clear(self): + self.queue.put(None) + + def update_me(self): + self.configure(state=tk.NORMAL) + try: + while 1: + line = self.queue.get_nowait() + if line is None: + self.delete(1.0, tk.END) + else: + self.insert(tk.END, str(line)) + self.see(tk.END) + self.update_idletasks() + except queue.Empty: + pass + self.configure(state=tk.DISABLED) + self.after(100, self.update_me) + + def copy_text(self): + hightlighted_text = self.selection_get() + self.clipboard_clear() + self.clipboard_append(hightlighted_text) + + def select_all_text(self): + self.tag_add('sel', '1.0', 'end') + +class MainWindow(TkinterDnD.Tk if is_dnd_compatible else tk.Tk): + # --Constants-- + # Layout + + IMAGE_HEIGHT = IMAGE_HEIGHT + FILEPATHS_HEIGHT = FILEPATHS_HEIGHT + OPTIONS_HEIGHT = OPTIONS_HEIGHT + CONVERSIONBUTTON_HEIGHT = CONVERSIONBUTTON_HEIGHT + COMMAND_HEIGHT = COMMAND_HEIGHT + PROGRESS_HEIGHT = PROGRESS_HEIGHT + PADDING = PADDING + WIDTH = WIDTH + COL1_ROWS = 11 + COL2_ROWS = 11 + + def __init__(self): + #Run the __init__ method on the tk.Tk class + super().__init__() + + self.set_app_font() + + style = ttk.Style(self) + style.map('TCombobox', selectbackground=[('focus', '#0c0c0c')], selectforeground=[('focus', 'white')]) + style.configure('TCombobox', selectbackground='#0c0c0c') + #style.configure('TCheckbutton', indicatorsize=30) + + # Calculate window height + height = self.IMAGE_HEIGHT + self.FILEPATHS_HEIGHT + self.OPTIONS_HEIGHT + height += self.CONVERSIONBUTTON_HEIGHT + self.COMMAND_HEIGHT + self.PROGRESS_HEIGHT + height += self.PADDING * 5 # Padding + width = self.WIDTH + self.main_window_width = width + self.main_window_height = height + + # --Window Settings-- + self.withdraw() + self.title('Ultimate Vocal Remover') + # Set Geometry and Center Window + self.geometry('{width}x{height}+{xpad}+{ypad}'.format( + width=self.main_window_width, + height=height, + xpad=int(self.winfo_screenwidth()/2 - width/2), + ypad=int(self.winfo_screenheight()/2 - height/2 - 30))) + + self.iconbitmap(ICON_IMG_PATH) if is_windows else self.tk.call('wm', 'iconphoto', self._w, tk.PhotoImage(file=MAIN_ICON_IMG_PATH)) + self.protocol("WM_DELETE_WINDOW", self.save_values) + self.resizable(False, False) + + self.msg_queue = queue.Queue() + # Create a custom style that inherits from the original Combobox style. + + if not is_windows: + self.update() + + #Load Images + img = ImagePath(BASE_PATH) + self.logo_img = img.open_image(path=img.banner_path, size=(width, height)) + self.efile_img = img.efile_img + self.stop_img = img.stop_img + self.help_img = img.help_img + self.download_img = img.download_img + self.donate_img = img.donate_img + self.key_img = img.key_img + self.credits_img = img.credits_img + + self.right_img = img.right_img + self.left_img = img.left_img + self.img_mapper = { + "down":img.down_img, + "up":img.up_img, + "copy":img.copy_img, + "clear":img.clear_img + } + + #Placeholders + self.error_log_var = tk.StringVar(value='') + self.vr_secondary_model_names = [] + self.mdx_secondary_model_names = [] + self.demucs_secondary_model_names = [] + self.vr_primary_model_names = [] + self.mdx_primary_model_names = [] + self.demucs_primary_model_names = [] + + self.vr_cache_source_mapper = {} + self.mdx_cache_source_mapper = {} + self.demucs_cache_source_mapper = {} + + # -Tkinter Value Holders- + + try: + self.load_saved_vars(data) + except Exception as e: + self.error_log_var.set(error_text('Loading Saved Variables', e)) + self.load_saved_vars(DEFAULT_DATA) + + self.cached_sources_clear() + + self.method_mapper = { + VR_ARCH_PM: self.vr_model_var, + MDX_ARCH_TYPE: self.mdx_net_model_var, + DEMUCS_ARCH_TYPE: self.demucs_model_var} + + self.vr_secondary_model_vars = {'voc_inst_secondary_model': self.vr_voc_inst_secondary_model_var, + 'other_secondary_model': self.vr_other_secondary_model_var, + 'bass_secondary_model': self.vr_bass_secondary_model_var, + 'drums_secondary_model': self.vr_drums_secondary_model_var, + 'is_secondary_model_activate': self.vr_is_secondary_model_activate_var, + 'voc_inst_secondary_model_scale': self.vr_voc_inst_secondary_model_scale_var, + 'other_secondary_model_scale': self.vr_other_secondary_model_scale_var, + 'bass_secondary_model_scale': self.vr_bass_secondary_model_scale_var, + 'drums_secondary_model_scale': self.vr_drums_secondary_model_scale_var} + + self.demucs_secondary_model_vars = {'voc_inst_secondary_model': self.demucs_voc_inst_secondary_model_var, + 'other_secondary_model': self.demucs_other_secondary_model_var, + 'bass_secondary_model': self.demucs_bass_secondary_model_var, + 'drums_secondary_model': self.demucs_drums_secondary_model_var, + 'is_secondary_model_activate': self.demucs_is_secondary_model_activate_var, + 'voc_inst_secondary_model_scale': self.demucs_voc_inst_secondary_model_scale_var, + 'other_secondary_model_scale': self.demucs_other_secondary_model_scale_var, + 'bass_secondary_model_scale': self.demucs_bass_secondary_model_scale_var, + 'drums_secondary_model_scale': self.demucs_drums_secondary_model_scale_var} + + self.mdx_secondary_model_vars = {'voc_inst_secondary_model': self.mdx_voc_inst_secondary_model_var, + 'other_secondary_model': self.mdx_other_secondary_model_var, + 'bass_secondary_model': self.mdx_bass_secondary_model_var, + 'drums_secondary_model': self.mdx_drums_secondary_model_var, + 'is_secondary_model_activate': self.mdx_is_secondary_model_activate_var, + 'voc_inst_secondary_model_scale': self.mdx_voc_inst_secondary_model_scale_var, + 'other_secondary_model_scale': self.mdx_other_secondary_model_scale_var, + 'bass_secondary_model_scale': self.mdx_bass_secondary_model_scale_var, + 'drums_secondary_model_scale': self.mdx_drums_secondary_model_scale_var} + + #Main Application Vars + self.progress_bar_main_var = tk.IntVar(value=0) + self.inputPathsEntry_var = tk.StringVar(value='') + self.conversion_Button_Text_var = tk.StringVar(value=START_PROCESSING) + self.chosen_ensemble_var = tk.StringVar(value=CHOOSE_ENSEMBLE_OPTION) + self.ensemble_main_stem_var = tk.StringVar(value=CHOOSE_STEM_PAIR) + self.ensemble_type_var = tk.StringVar(value=MAX_MIN) + self.save_current_settings_var = tk.StringVar(value=SELECT_SAVED_SET) + self.demucs_stems_var = tk.StringVar(value=ALL_STEMS) + self.mdxnet_stems_var = tk.StringVar(value=ALL_STEMS) + self.is_primary_stem_only_Text_var = tk.StringVar(value='') + self.is_secondary_stem_only_Text_var = tk.StringVar(value='') + self.is_primary_stem_only_Demucs_Text_var = tk.StringVar(value='') + self.is_secondary_stem_only_Demucs_Text_var = tk.StringVar(value='') + self.scaling_var = tk.DoubleVar(value=1.0) + self.active_processing_thread = None + self.verification_thread = None + self.is_menu_settings_open = False + self.is_root_defined_var = tk.BooleanVar(value=False) + self.is_check_splash = False + + self.is_open_menu_advanced_vr_options = tk.BooleanVar(value=False) + self.is_open_menu_advanced_demucs_options = tk.BooleanVar(value=False) + self.is_open_menu_advanced_mdx_options = tk.BooleanVar(value=False) + self.is_open_menu_advanced_ensemble_options = tk.BooleanVar(value=False) + self.is_open_menu_view_inputs = tk.BooleanVar(value=False) + self.is_open_menu_help = tk.BooleanVar(value=False) + self.is_open_menu_error_log = tk.BooleanVar(value=False) + self.is_open_menu_advanced_align_options = tk.BooleanVar(value=False) + + self.menu_advanced_vr_options_close_window = None + self.menu_advanced_demucs_options_close_window = None + self.menu_advanced_mdx_options_close_window = None + self.menu_advanced_ensemble_options_close_window = None + self.menu_help_close_window = None + self.menu_error_log_close_window = None + self.menu_view_inputs_close_window = None + self.menu_advanced_align_options_close_window = None + + self.mdx_model_params = None + self.vr_model_params = None + self.current_text_box = None + self.wav_type_set = None + self.is_online_model_menu = None + self.progress_bar_var = tk.IntVar(value=0) + self.is_confirm_error_var = tk.BooleanVar(value=False) + self.clear_cache_torch = False + self.vr_hash_MAPPER = load_model_hash_data(VR_HASH_JSON) + self.mdx_hash_MAPPER = load_model_hash_data(MDX_HASH_JSON) + self.mdx_name_select_MAPPER = load_model_hash_data(MDX_MODEL_NAME_SELECT) + self.demucs_name_select_MAPPER = load_model_hash_data(DEMUCS_MODEL_NAME_SELECT) + self.is_gpu_available = is_gpu_available + self.is_process_stopped = False + self.inputs_from_dir = [] + self.iteration = 0 + self.true_model_count = 0 + self.vr_primary_source = None + self.vr_secondary_source = None + self.mdx_primary_source = None + self.mdx_secondary_source = None + self.demucs_primary_source = None + self.demucs_secondary_source = None + self.toplevels = [] + + #Download Center Vars + self.online_data = {} + self.bulletin_data = INFO_UNAVAILABLE_TEXT + self.is_online = False + self.lastest_version = '' + self.model_download_demucs_var = tk.StringVar(value='') + self.model_download_mdx_var = tk.StringVar(value='') + self.model_download_vr_var = tk.StringVar(value='') + self.selected_download_var = tk.StringVar(value=NO_MODEL) + self.select_download_var = tk.StringVar(value='') + self.download_progress_info_var = tk.StringVar(value='') + self.download_progress_percent_var = tk.StringVar(value='') + self.download_progress_bar_var = tk.IntVar(value=0) + self.download_stop_var = tk.StringVar(value='') + self.app_update_status_Text_var = tk.StringVar(value=LOADING_VERSION_INFO_TEXT) + self.app_update_button_Text_var = tk.StringVar(value=CHECK_FOR_UPDATES_TEXT) + + self.user_code_validation_var = tk.StringVar(value='') + self.download_link_path_var = tk.StringVar(value='') + self.download_save_path_var = tk.StringVar(value='') + self.download_update_link_var = tk.StringVar(value='') + self.download_update_path_var = tk.StringVar(value='') + self.download_demucs_models_list = [] + self.download_demucs_newer_models = [] + self.refresh_list_Button = None + self.stop_download_Button_DISABLE = None + self.enable_tabs = None + self.is_download_thread_active = False + self.is_process_thread_active = False + self.is_active_processing_thread = False + self.active_download_thread = None + self.pre_proc_model_toggle = None + self.change_state_lambda = None + self.file_one_sub_var = tk.StringVar(value=FILE_ONE_MAIN_LABEL) + self.file_two_sub_var = tk.StringVar(value=FILE_TWO_MAIN_LABEL) + self.cuda_device_list = GPU_DEVICE_NUM_OPTS + self.opencl_list = GPU_DEVICE_NUM_OPTS + + #Model Update + self.last_found_ensembles = ENSEMBLE_OPTIONS + self.last_found_settings = ENSEMBLE_OPTIONS + self.last_found_models = () + self.model_data_table = () + self.ensemble_model_list = () + self.default_change_model_list = () + + # --Widgets-- + self.fill_main_frame() + self.bind_widgets() + + # --Update Widgets-- + self.update_available_models() + self.update_main_widget_states() + self.update_loop() + self.update_button_states() + self.download_validate_code() + self.delete_temps(is_start_up=True) + self.ensemble_listbox_Option.configure(state=tk.DISABLED) + self.command_Text.write(f'Ultimate Vocal Remover {VERSION} [{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}]') + self.update_checkbox_text = lambda:self.selection_action_process_method(self.chosen_process_method_var.get()) + self.check_dual_paths() + if not is_windows: + self.update_idletasks() + self.fill_gpu_list() + self.online_data_refresh(user_refresh=False, is_start_up=True) + + # Menu Functions + def main_window_LABEL_SET(self, master, text):return ttk.Label(master=master, text=text, background=BG_COLOR, font=self.font_set, foreground=FG_COLOR, anchor=tk.CENTER) + def main_window_LABEL_SUB_SET(self, master, text_var):return ttk.Label(master=master, textvariable=text_var, background=BG_COLOR, font=self.font_set, foreground=FG_COLOR, anchor=tk.CENTER) + def menu_title_LABEL_SET(self, frame, text, width=35):return ttk.Label(master=frame, text=text, font=(SEC_FONT_NAME, f"{FONT_SIZE_5}", "underline"), justify="center", foreground="#13849f", width=width, anchor=tk.CENTER) + def menu_sub_LABEL_SET(self, frame, text, font_size=FONT_SIZE_2):return ttk.Label(master=frame, text=text, font=(MAIN_FONT_NAME, f"{font_size}"), foreground=FG_COLOR, anchor=tk.CENTER) + def menu_FRAME_SET(self, frame, thickness=20):return tk.Frame(frame, highlightbackground=BG_COLOR, highlightcolor=BG_COLOR, highlightthicknes=thickness) + def check_is_menu_settings_open(self):self.menu_settings() if not self.is_menu_settings_open else None + def spacer_label(self, frame): return tk.Label(frame, text='', font=(MAIN_FONT_NAME, f"{FONT_SIZE_1}"), foreground='#868687', justify="left").grid() + + #Ensemble Listbox Functions + def ensemble_listbox_get_all_selected_models(self):return [self.ensemble_listbox_Option.get(i) for i in self.ensemble_listbox_Option.curselection()] + def ensemble_listbox_select_from_indexs(self, indexes):return [self.ensemble_listbox_Option.selection_set(i) for i in indexes] + def ensemble_listbox_clear_and_insert_new(self, model_ensemble_updated):return (self.ensemble_listbox_Option.delete(0, 'end'), [self.ensemble_listbox_Option.insert(tk.END, models) for models in model_ensemble_updated]) + def ensemble_listbox_get_indexes_for_files(self, updated, selected):return [updated.index(model) for model in selected if model in updated] + + def set_app_font(self): + chosen_font_name, chosen_font_file = font_checker(OWN_FONT_PATH) + + if chosen_font_name: + gui_data.sv_ttk.set_theme("dark", chosen_font_name, 10) + if chosen_font_file: + pyglet_font.add_file(chosen_font_file) + self.font_set = Font(family=chosen_font_name, size=FONT_SIZE_F2) + self.font_entry = Font(family=chosen_font_name, size=FONT_SIZE_F2) + else: + pyglet_font.add_file(FONT_MAPPER[MAIN_FONT_NAME]) + pyglet_font.add_file(FONT_MAPPER[SEC_FONT_NAME]) + gui_data.sv_ttk.set_theme("dark", MAIN_FONT_NAME, 10) + self.font_set = Font(family=SEC_FONT_NAME, size=FONT_SIZE_F2) + self.font_entry = Font(family=MAIN_FONT_NAME, size=FONT_SIZE_F2) + + def process_iteration(self): + self.iteration = self.iteration + 1 + + def assemble_model_data(self, model=None, arch_type=ENSEMBLE_MODE, is_dry_check=False, is_change_def=False, is_get_hash_dir_only=False): + + if arch_type == ENSEMBLE_STEM_CHECK: + + model_data = self.model_data_table + missing_models = [model.model_status for model in model_data if not model.model_status] + + if missing_models or not model_data: + model_data: List[ModelData] = [ModelData(model_name, is_dry_check=is_dry_check) for model_name in self.ensemble_model_list] + self.model_data_table = model_data + + if arch_type == KARAOKEE_CHECK: + model_list = [] + model_data: List[ModelData] = [ModelData(model_name, is_dry_check=is_dry_check) for model_name in self.default_change_model_list] + for model in model_data: + if model.model_status and model.is_karaoke or model.is_bv_model: + model_list.append(model.model_and_process_tag) + + return model_list + + if arch_type == ENSEMBLE_MODE: + model_data: List[ModelData] = [ModelData(model_name) for model_name in self.ensemble_listbox_get_all_selected_models()] + if arch_type == ENSEMBLE_CHECK: + model_data: List[ModelData] = [ModelData(model, is_change_def=is_change_def, is_get_hash_dir_only=is_get_hash_dir_only)] + if arch_type == VR_ARCH_TYPE or arch_type == VR_ARCH_PM: + model_data: List[ModelData] = [ModelData(model, VR_ARCH_TYPE)] + if arch_type == MDX_ARCH_TYPE: + model_data: List[ModelData] = [ModelData(model, MDX_ARCH_TYPE)] + if arch_type == DEMUCS_ARCH_TYPE: + model_data: List[ModelData] = [ModelData(model, DEMUCS_ARCH_TYPE)]# + + return model_data + + def clear_cache(self, network): + + if network == VR_ARCH_TYPE: + dir = VR_HASH_DIR + if network == MDX_ARCH_TYPE: + dir = MDX_HASH_DIR + + for filename in os.listdir(dir): + filepath = os.path.join(dir, filename) + if filename not in ['model_data.json', 'model_name_mapper.json', 'mdx_c_configs'] and not os.path.isdir(filepath): + os.remove(filepath) + + self.vr_model_var.set(CHOOSE_MODEL) + self.mdx_net_model_var.set(CHOOSE_MODEL) + self.model_data_table.clear() + self.chosen_ensemble_var.set(CHOOSE_ENSEMBLE_OPTION) + self.ensemble_main_stem_var.set(CHOOSE_STEM_PAIR) + self.ensemble_listbox_Option.configure(state=tk.DISABLED) + self.update_checkbox_text() + + def thread_check(self, thread_to_check): + '''Checks if thread is alive''' + + is_running = False + + if type(thread_to_check) is KThread: + if thread_to_check.is_alive(): + is_running = True + + return is_running + + # -Widget Methods-- + + def fill_main_frame(self): + """Creates root window widgets""" + + self.title_Label = tk.Label(master=self, image=self.logo_img, compound=tk.TOP) + self.title_Label.place(x=-2, y=banner_placement) + + self.fill_filePaths_Frame() + self.fill_options_Frame() + + self.conversion_Button = ttk.Button(master=self, textvariable=self.conversion_Button_Text_var, command=self.process_initialize) + self.conversion_Button.place(x=X_CONVERSION_BUTTON_1080P, y=BUTTON_Y_1080P, width=WIDTH_CONVERSION_BUTTON_1080P, height=HEIGHT_GENERIC_BUTTON_1080P, + relx=0, rely=0, relwidth=1, relheight=0) + + self.conversion_Button_enable = lambda:(self.conversion_Button_Text_var.set(START_PROCESSING), self.conversion_Button.configure(state=tk.NORMAL)) + self.conversion_Button_disable = lambda message:(self.conversion_Button_Text_var.set(message), self.conversion_Button.configure(state=tk.DISABLED)) + + self.stop_Button = ttk.Button(master=self, image=self.stop_img, command=self.confirm_stop_process) + self.stop_Button.place(x=X_STOP_BUTTON_1080P, y=BUTTON_Y_1080P, width=HEIGHT_GENERIC_BUTTON_1080P, height=HEIGHT_GENERIC_BUTTON_1080P, + relx=1, rely=0, relwidth=0, relheight=0) + self.help_hints(self.stop_Button, text=STOP_HELP) + + self.settings_Button = ttk.Button(master=self, image=self.help_img, command=self.check_is_menu_settings_open) + self.settings_Button.place(x=X_SETTINGS_BUTTON_1080P, y=BUTTON_Y_1080P, width=HEIGHT_GENERIC_BUTTON_1080P, height=HEIGHT_GENERIC_BUTTON_1080P, + relx=1, rely=0, relwidth=0, relheight=0) + self.help_hints(self.settings_Button, text=SETTINGS_HELP) + + self.progressbar = ttk.Progressbar(master=self, variable=self.progress_bar_main_var) + self.progressbar.place(x=X_PROGRESSBAR_1080P, y=Y_OFFSET_PROGRESS_BAR_1080P, width=WIDTH_PROGRESSBAR_1080P, height=HEIGHT_PROGRESSBAR_1080P, + relx=0, rely=0, relwidth=1, relheight=0) + + # Select Music Files Option + self.console_Frame = tk.Frame(master=self, highlightbackground='#101012', highlightcolor='#101012', highlightthicknes=2) + self.console_Frame.place(x=15, y=self.IMAGE_HEIGHT + self.FILEPATHS_HEIGHT + self.OPTIONS_HEIGHT + self.CONVERSIONBUTTON_HEIGHT + self.PADDING + 5 *3, width=-30, height=self.COMMAND_HEIGHT+7, + relx=0, rely=0, relwidth=1, relheight=0) + + + self.command_Text = ThreadSafeConsole(master=self.console_Frame, background='#0c0c0d',fg='#898b8e', highlightcolor="#0c0c0d", font=(MAIN_FONT_NAME, FONT_SIZE_4), borderwidth=0) + self.command_Text.pack(fill=tk.BOTH, expand=1) + self.command_Text.bind(right_click_button, lambda e:self.right_click_console(e)) + + def fill_filePaths_Frame(self): + """Fill Frame with neccessary widgets""" + + # Select Music Files Option + self.filePaths_Frame = ttk.Frame(master=self) + self.filePaths_Frame.place(x=FILEPATHS_FRAME_X, y=FILEPATHS_FRAME_Y, width=FILEPATHS_FRAME_WIDTH, height=self.FILEPATHS_HEIGHT, relx=0, rely=0, relwidth=1, relheight=0) + + self.filePaths_musicFile_Button = ttk.Button(master=self.filePaths_Frame, text=SELECT_INPUT_TEXT, command=self.input_select_filedialog) + self.filePaths_musicFile_Button.place(x=MUSICFILE_BUTTON_X, y=MUSICFILE_BUTTON_Y, width=MUSICFILE_BUTTON_WIDTH, height=MUSICFILE_BUTTON_HEIGHT, relx=0, rely=0, relwidth=0.3, relheight=0.5) + self.filePaths_musicFile_Entry = ttk.Entry(master=self.filePaths_Frame, textvariable=self.inputPathsEntry_var, font=self.font_entry, state=tk.DISABLED) + self.filePaths_musicFile_Entry.place(x=MUSICFILE_ENTRY_X, y=MUSICFILE_BUTTON_Y, width=MUSICFILE_ENTRY_WIDTH, height=MUSICFILE_ENTRY_HEIGHT, relx=0.3, rely=0, relwidth=0.7, relheight=0.5) + self.filePaths_musicFile_Open = ttk.Button(master=self.filePaths_Frame, image=self.efile_img, command=lambda:OPEN_FILE_func(os.path.dirname(self.inputPaths[0])) if self.inputPaths and os.path.isdir(os.path.dirname(self.inputPaths[0])) else self.error_dialoge(INVALID_INPUT)) + self.filePaths_musicFile_Open.place(x=OPEN_BUTTON_X, y=MUSICFILE_BUTTON_Y, width=OPEN_BUTTON_WIDTH, height=MUSICFILE_ENTRY_HEIGHT, relx=0.3, rely=0, relwidth=0.7, relheight=0.5) + + # Add any additional configurations or method calls here + self.filePaths_musicFile_Entry.configure(cursor="hand2") + self.help_hints(self.filePaths_musicFile_Button, text=INPUT_FOLDER_ENTRY_HELP) + self.help_hints(self.filePaths_musicFile_Entry, text=INPUT_FOLDER_ENTRY_HELP_2) + self.help_hints(self.filePaths_musicFile_Open, text=INPUT_FOLDER_BUTTON_HELP) + + # Save To Option + self.filePaths_saveTo_Button = ttk.Button(master=self.filePaths_Frame, text=SELECT_OUTPUT_TEXT, command=self.export_select_filedialog) + self.filePaths_saveTo_Button.place(x=SAVETO_BUTTON_X, y=SAVETO_BUTTON_Y, width=SAVETO_BUTTON_WIDTH, height=SAVETO_BUTTON_HEIGHT, relx=0, rely=0.5, relwidth=0.3, relheight=0.5) + self.filePaths_saveTo_Entry = ttk.Entry(master=self.filePaths_Frame, textvariable=self.export_path_var, font=self.font_entry, state=tk.DISABLED) + self.filePaths_saveTo_Entry.place(x=SAVETO_ENTRY_X, y=SAVETO_BUTTON_Y, width=SAVETO_ENTRY_WIDTH, height=SAVETO_ENTRY_HEIGHT, relx=0.3, rely=0.5, relwidth=0.7, relheight=0.5) + self.filePaths_saveTo_Open = ttk.Button(master=self.filePaths_Frame, image=self.efile_img, command=lambda:OPEN_FILE_func(Path(self.export_path_var.get())) if os.path.isdir(self.export_path_var.get()) else self.error_dialoge(INVALID_EXPORT)) + self.filePaths_saveTo_Open.place(x=OPEN_BUTTON_X, y=SAVETO_BUTTON_Y, width=OPEN_BUTTON_WIDTH, height=SAVETO_ENTRY_HEIGHT, relx=0.3, rely=0.5, relwidth=0.7, relheight=0.5) + self.help_hints(self.filePaths_saveTo_Button, text=OUTPUT_FOLDER_ENTRY_HELP) + self.help_hints(self.filePaths_saveTo_Open, text=OUTPUT_FOLDER_BUTTON_HELP) + + def fill_options_Frame(self): + """Fill Frame with neccessary widgets""" + + self.options_Frame = ttk.Frame(master=self) + self.options_Frame.place(x=OPTIONS_FRAME_X, y=OPTIONS_FRAME_Y, width=OPTIONS_FRAME_WIDTH, height=self.OPTIONS_HEIGHT, relx=0, rely=0, relwidth=1, relheight=0) + + # -Create Widgets- + + ## Save Format + self.wav_button = ttk.Radiobutton(master=self.options_Frame, text=WAV, variable=self.save_format_var, value=WAV) + self.wav_button.place(x=RADIOBUTTON_X_WAV, y=RADIOBUTTON_Y, width=RADIOBUTTON_WIDTH, height=RADIOBUTTON_HEIGHT, relx=0, rely=0/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.wav_button, text=f'{FORMAT_SETTING_HELP}{WAV}') + + self.flac_button = ttk.Radiobutton(master=self.options_Frame, text=FLAC, variable=self.save_format_var, value=FLAC) + self.flac_button.place(x=RADIOBUTTON_X_FLAC, y=RADIOBUTTON_Y, width=RADIOBUTTON_WIDTH, height=RADIOBUTTON_HEIGHT, relx=1/3, rely=0/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.flac_button, text=f'{FORMAT_SETTING_HELP}{FLAC}') + + self.mp3_button = ttk.Radiobutton(master=self.options_Frame, text=MP3, variable=self.save_format_var, value=MP3) + self.mp3_button.place(x=RADIOBUTTON_X_MP3, y=RADIOBUTTON_Y, width=RADIOBUTTON_WIDTH, height=RADIOBUTTON_HEIGHT, relx=2/3, rely=0/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.mp3_button, text=f'{FORMAT_SETTING_HELP}{MP3}') + + # Choose Conversion Method + self.chosen_process_method_Label = self.main_window_LABEL_SET(self.options_Frame, CHOOSE_PROC_METHOD_MAIN_LABEL) + self.chosen_process_method_Label.place(x=0, y=MAIN_ROW_Y[0], width=LEFT_ROW_WIDTH, height=LABEL_HEIGHT, relx=0, rely=2/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.chosen_process_method_Option = ComboBoxMenu(self.options_Frame, textvariable=self.chosen_process_method_var, values=PROCESS_METHODS, command=lambda e: self.selection_action_process_method(self.chosen_process_method_var.get(), from_widget=True, is_from_conv_menu=True)) + self.chosen_process_method_Option.place(x=0, y=MAIN_ROW_Y[1], width=LEFT_ROW_WIDTH, height=OPTION_HEIGHT, relx=0, rely=3/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + #self.chosen_process_method_var.trace_add('write', lambda *args: self.update_main_widget_states()) + self.help_hints(self.chosen_process_method_Label, text=CHOSEN_PROCESS_METHOD_HELP) + + # Choose Settings Option + self.save_current_settings_Label = self.main_window_LABEL_SET(self.options_Frame, SELECT_SAVED_SETTINGS_MAIN_LABEL) + self.save_current_settings_Label_place = lambda:self.save_current_settings_Label.place(x=MAIN_ROW_2_X[0], y=LOW_MENU_Y[0], width=0, height=LABEL_HEIGHT, relx=2/3, rely=6/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.save_current_settings_Option = ComboBoxMenu(self.options_Frame, textvariable=self.save_current_settings_var, command=lambda e:self.selection_action_saved_settings(self.save_current_settings_var.get())) + self.save_current_settings_Option_place = lambda:self.save_current_settings_Option.place(x=MAIN_ROW_2_X[1], y=LOW_MENU_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=2/3, rely=7/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.help_hints(self.save_current_settings_Label, text=SAVE_CURRENT_SETTINGS_HELP) + + ### MDX-NET ### + + # Choose MDX-Net Model + self.mdx_net_model_Label = self.main_window_LABEL_SET(self.options_Frame, CHOOSE_MDX_MODEL_MAIN_LABEL) + self.mdx_net_model_Label_place = lambda:self.mdx_net_model_Label.place(x=0, y=LOW_MENU_Y[0], width=LEFT_ROW_WIDTH, height=LABEL_HEIGHT, relx=0, rely=6/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.mdx_net_model_Option = ComboBoxMenu(self.options_Frame, textvariable=self.mdx_net_model_var, command=lambda event: self.selection_action(event, self.mdx_net_model_var, is_mdx_net=True)) + self.mdx_net_model_Option_place = lambda:self.mdx_net_model_Option.place(x=0, y=LOW_MENU_Y[1], width=LEFT_ROW_WIDTH, height=OPTION_HEIGHT, relx=0, rely=7/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + #self.mdx_net_model_var.trace_add('write', lambda *args: self.update_main_widget_states_mdx()) + self.help_hints(self.mdx_net_model_Label, text=CHOOSE_MODEL_HELP) + + # MDX-Overlap + self.overlap_mdx_Label = self.main_window_LABEL_SET(self.options_Frame, 'OVERLAP') + self.overlap_mdx_Label_place = lambda:self.overlap_mdx_Label.place(x=MAIN_ROW_2_X[0], y=MAIN_ROW_2_Y[0], width=0, height=LABEL_HEIGHT, relx=2/3, rely=2/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.overlap_mdx_Option = ComboBoxEditableMenu(self.options_Frame, values=MDX_OVERLAP, width=MENU_COMBOBOX_WIDTH, textvariable=self.overlap_mdx_var, pattern=REG_OVERLAP, default=MDX_OVERLAP) + self.overlap_mdx_Option_place = lambda:self.overlap_mdx_Option.place(x=MAIN_ROW_2_X[1], y=MAIN_ROW_2_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=2/3, rely=3/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + + # MDX23-Overlap + self.overlap_mdx23_Option = ComboBoxEditableMenu(self.options_Frame, values=MDX23_OVERLAP, width=MENU_COMBOBOX_WIDTH, textvariable=self.overlap_mdx23_var, pattern=REG_OVERLAP23, default="8") + self.overlap_mdx23_Option_place = lambda:self.overlap_mdx23_Option.place(x=MAIN_ROW_2_X[1], y=MAIN_ROW_2_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=2/3, rely=3/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.overlap_mdx_Label, text=MDX_OVERLAP_HELP) + + # Choose MDX-Net Stems + self.mdxnet_stems_Label = self.main_window_LABEL_SET(self.options_Frame, CHOOSE_STEMS_MAIN_LABEL) + self.mdxnet_stems_Label_place = lambda:self.mdxnet_stems_Label.place(x=MAIN_ROW_X[0], y=MAIN_ROW_Y[0], width=0, height=LABEL_HEIGHT, relx=1/3, rely=2/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + + self.mdxnet_stems_Option = ComboBoxMenu(self.options_Frame, textvariable=self.mdxnet_stems_var) + self.mdxnet_stems_Option_place = lambda:self.mdxnet_stems_Option.place(x=MAIN_ROW_X[1], y=MAIN_ROW_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=3/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.mdxnet_stems_Label, text=DEMUCS_STEMS_HELP) + + # MDX-Segment Size + self.mdx_segment_size_Label = self.main_window_LABEL_SET(self.options_Frame, SEGMENT_MDX_MAIN_LABEL) + self.mdx_segment_size_Label_place = lambda:self.mdx_segment_size_Label.place(x=MAIN_ROW_X[0], y=MAIN_ROW_Y[0], width=0, height=LABEL_HEIGHT, relx=1/3, rely=2/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.mdx_segment_size_Option = ComboBoxEditableMenu(self.options_Frame, values=MDX_SEGMENTS, width=MENU_COMBOBOX_WIDTH, textvariable=self.mdx_segment_size_var, pattern=REG_MDX_SEG, default="256")# + self.mdx_segment_size_Option_place = lambda:self.mdx_segment_size_Option.place(x=MAIN_ROW_X[1], y=MAIN_ROW_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=3/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.mdx_segment_size_Label, text=MDX_SEGMENT_SIZE_HELP) + + ### VR ARCH ### + + # Choose VR Model + self.vr_model_Label = self.main_window_LABEL_SET(self.options_Frame, SELECT_VR_MODEL_MAIN_LABEL) + self.vr_model_Label_place = lambda:self.vr_model_Label.place(x=0, y=LOW_MENU_Y[0], width=LEFT_ROW_WIDTH, height=LABEL_HEIGHT, relx=0, rely=6/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.vr_model_Option = ComboBoxMenu(self.options_Frame, textvariable=self.vr_model_var, command=lambda event: self.selection_action(event, self.vr_model_var)) + self.vr_model_Option_place = lambda:self.vr_model_Option.place(x=0, y=LOW_MENU_Y[1], width=LEFT_ROW_WIDTH, height=OPTION_HEIGHT, relx=0, rely=7/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.help_hints(self.vr_model_Label, text=CHOOSE_MODEL_HELP) + + # Aggression Setting + self.aggression_setting_Label = self.main_window_LABEL_SET(self.options_Frame, AGGRESSION_SETTING_MAIN_LABEL) + self.aggression_setting_Label_place = lambda:self.aggression_setting_Label.place(x=MAIN_ROW_2_X[0], y=MAIN_ROW_2_Y[0], width=0, height=LABEL_HEIGHT, relx=2/3, rely=2/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.aggression_setting_Option = ComboBoxEditableMenu(self.options_Frame, values=VR_AGGRESSION, textvariable=self.aggression_setting_var, pattern=REG_AGGRESSION, default=VR_AGGRESSION[5])# + self.aggression_setting_Option_place = lambda:self.aggression_setting_Option.place(x=MAIN_ROW_2_X[1], y=MAIN_ROW_2_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=2/3, rely=3/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.aggression_setting_Label, text=AGGRESSION_SETTING_HELP) + + # Window Size + self.window_size_Label = self.main_window_LABEL_SET(self.options_Frame, WINDOW_SIZE_MAIN_LABEL) + self.window_size_Label_place = lambda:self.window_size_Label.place(x=MAIN_ROW_X[0], y=MAIN_ROW_Y[0], width=0, height=LABEL_HEIGHT, relx=1/3, rely=2/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.window_size_Option = ComboBoxEditableMenu(self.options_Frame, values=VR_WINDOW, textvariable=self.window_size_var, pattern=REG_WINDOW, default=VR_WINDOW[1])# + self.window_size_Option_place = lambda:self.window_size_Option.place(x=MAIN_ROW_X[1], y=MAIN_ROW_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=3/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.window_size_Label, text=WINDOW_SIZE_HELP) + + ### DEMUCS ### + + # Choose Demucs Models + self.demucs_model_Label = self.main_window_LABEL_SET(self.options_Frame, CHOOSE_DEMUCS_MODEL_MAIN_LABEL) + self.demucs_model_Label_place = lambda:self.demucs_model_Label.place(x=0, y=LOW_MENU_Y[0], width=LEFT_ROW_WIDTH, height=LABEL_HEIGHT, relx=0, rely=6/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.demucs_model_Option = ComboBoxMenu(self.options_Frame, textvariable=self.demucs_model_var, command=lambda event: self.selection_action(event, self.demucs_model_var)) + self.demucs_model_Option_place = lambda:self.demucs_model_Option.place(x=0, y=LOW_MENU_Y[1], width=LEFT_ROW_WIDTH, height=OPTION_HEIGHT, relx=0, rely=7/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.help_hints(self.demucs_model_Label, text=CHOOSE_MODEL_HELP) + + # Choose Demucs Stems + self.demucs_stems_Label = self.main_window_LABEL_SET(self.options_Frame, CHOOSE_STEMS_MAIN_LABEL) + self.demucs_stems_Label_place = lambda:self.demucs_stems_Label.place(x=MAIN_ROW_X[0], y=MAIN_ROW_Y[0], width=0, height=LABEL_HEIGHT, relx=1/3, rely=2/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.demucs_stems_Option = ComboBoxMenu(self.options_Frame, textvariable=self.demucs_stems_var) + self.demucs_stems_Option_place = lambda:self.demucs_stems_Option.place(x=MAIN_ROW_X[1], y=MAIN_ROW_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=3/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.demucs_stems_Label, text=DEMUCS_STEMS_HELP) + + # Demucs-Segment + self.segment_Label = self.main_window_LABEL_SET(self.options_Frame, CHOOSE_SEGMENT_MAIN_LABEL) + self.segment_Label_place = lambda:self.segment_Label.place(x=MAIN_ROW_2_X[0], y=MAIN_ROW_2_Y[0], width=0, height=LABEL_HEIGHT, relx=2/3, rely=2/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.segment_Option = ComboBoxEditableMenu(self.options_Frame, values=DEMUCS_SEGMENTS, textvariable=self.segment_var, pattern=REG_SEGMENTS, default=DEMUCS_SEGMENTS)# + self.segment_Option_place = lambda:self.segment_Option.place(x=MAIN_ROW_2_X[1], y=MAIN_ROW_2_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=2/3, rely=3/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.segment_Label, text=SEGMENT_HELP) + + # Stem A + self.is_primary_stem_only_Demucs_Option = ttk.Checkbutton(master=self.options_Frame, textvariable=self.is_primary_stem_only_Demucs_Text_var, variable=self.is_primary_stem_only_Demucs_var, command=lambda:self.is_primary_stem_only_Demucs_Option_toggle()) + self.is_primary_stem_only_Demucs_Option_place = lambda:self.is_primary_stem_only_Demucs_Option.place(x=CHECK_BOX_X, y=CHECK_BOX_Y, width=CHECK_BOX_WIDTH, height=CHECK_BOX_HEIGHT, relx=1/3, rely=6/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.is_primary_stem_only_Demucs_Option_toggle = lambda:self.is_secondary_stem_only_Demucs_var.set(False) if self.is_primary_stem_only_Demucs_var.get() else self.is_secondary_stem_only_Demucs_Option.configure(state=tk.NORMAL) + self.help_hints(self.is_primary_stem_only_Demucs_Option, text=SAVE_STEM_ONLY_HELP) + + # Stem B + self.is_secondary_stem_only_Demucs_Option = ttk.Checkbutton(master=self.options_Frame, textvariable=self.is_secondary_stem_only_Demucs_Text_var, variable=self.is_secondary_stem_only_Demucs_var, command=lambda:self.is_secondary_stem_only_Demucs_Option_toggle()) + self.is_secondary_stem_only_Demucs_Option_place = lambda:self.is_secondary_stem_only_Demucs_Option.place(x=CHECK_BOX_X, y=CHECK_BOX_Y, width=CHECK_BOX_WIDTH, height=CHECK_BOX_HEIGHT, relx=1/3, rely=7/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.is_secondary_stem_only_Demucs_Option_toggle = lambda:self.is_primary_stem_only_Demucs_var.set(False) if self.is_secondary_stem_only_Demucs_var.get() else self.is_primary_stem_only_Demucs_Option.configure(state=tk.NORMAL) + self.is_stem_only_Demucs_Options_Enable = lambda:(self.is_primary_stem_only_Demucs_Option.configure(state=tk.NORMAL), self.is_secondary_stem_only_Demucs_Option.configure(state=tk.NORMAL)) + self.help_hints(self.is_secondary_stem_only_Demucs_Option, text=SAVE_STEM_ONLY_HELP) + + ### ENSEMBLE MODE ### + + # Ensemble Mode + self.chosen_ensemble_Label = self.main_window_LABEL_SET(self.options_Frame, ENSEMBLE_OPTIONS_MAIN_LABEL) + self.chosen_ensemble_Label_place = lambda:self.chosen_ensemble_Label.place(x=0, y=LOW_MENU_Y[0], width=LEFT_ROW_WIDTH, height=LABEL_HEIGHT, relx=0, rely=6/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.chosen_ensemble_Option = ComboBoxMenu(self.options_Frame, textvariable=self.chosen_ensemble_var, command=lambda e:self.selection_action_chosen_ensemble(self.chosen_ensemble_var.get())) + self.chosen_ensemble_Option_place = lambda:self.chosen_ensemble_Option.place(x=0, y=LOW_MENU_Y[1], width=LEFT_ROW_WIDTH, height=OPTION_HEIGHT, relx=0, rely=7/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.help_hints(self.chosen_ensemble_Label, text=CHOSEN_ENSEMBLE_HELP) + + # Ensemble Main Stems + self.ensemble_main_stem_Label = self.main_window_LABEL_SET(self.options_Frame, CHOOSE_MAIN_PAIR_MAIN_LABEL) + self.ensemble_main_stem_Label_place = lambda:self.ensemble_main_stem_Label.place(x=MAIN_ROW_X[0], y=MAIN_ROW_Y[0], width=0, height=LABEL_HEIGHT, relx=1/3, rely=2/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.ensemble_main_stem_Option = ComboBoxMenu(self.options_Frame, textvariable=self.ensemble_main_stem_var, values=ENSEMBLE_MAIN_STEM, command=lambda e: self.selection_action_ensemble_stems(self.ensemble_main_stem_var.get())) + self.ensemble_main_stem_Option_place = lambda:self.ensemble_main_stem_Option.place(x=MAIN_ROW_X[1], y=MAIN_ROW_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=3/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.help_hints(self.ensemble_main_stem_Label, text=ENSEMBLE_MAIN_STEM_HELP) + + # Ensemble Algorithm + self.ensemble_type_Label = self.main_window_LABEL_SET(self.options_Frame, CHOOSE_ENSEMBLE_ALGORITHM_MAIN_LABEL) + self.ensemble_type_Label_place = lambda:self.ensemble_type_Label.place(x=MAIN_ROW_2_X[0], y=MAIN_ROW_2_Y[0], width=0, height=LABEL_HEIGHT, relx=2/3, rely=2/11, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.ensemble_type_Option = ComboBoxMenu(self.options_Frame, textvariable=self.ensemble_type_var, values=ENSEMBLE_TYPE) + self.ensemble_type_Option_place = lambda:self.ensemble_type_Option.place(x=MAIN_ROW_2_X[1], y=MAIN_ROW_2_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT,relx=2/3, rely=3/11, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.help_hints(self.ensemble_type_Label, text=ENSEMBLE_TYPE_HELP) + + # Select Music Files Option + + # Ensemble Save Ensemble Outputs + self.ensemble_listbox_Label = self.main_window_LABEL_SET(self.options_Frame, AVAILABLE_MODELS_MAIN_LABEL) + self.ensemble_listbox_Label_place = lambda:self.ensemble_listbox_Label.place(x=MAIN_ROW_2_X[0], y=MAIN_ROW_2_Y[1], width=0, height=LABEL_HEIGHT, relx=2/3, rely=5/11, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.ensemble_listbox_Frame = tk.Frame(self.options_Frame, highlightbackground='#04332c', highlightcolor='#04332c', highlightthicknes=1) + self.ensemble_listbox_Option = tk.Listbox(self.ensemble_listbox_Frame, selectmode=tk.MULTIPLE, activestyle='dotbox', font=(MAIN_FONT_NAME, f"{FONT_SIZE_1}"), background='#070708', exportselection=0, relief=tk.SOLID, borderwidth=0) + self.ensemble_listbox_scroll = ttk.Scrollbar(self.options_Frame, orient=tk.VERTICAL) + self.ensemble_listbox_Option.config(yscrollcommand=self.ensemble_listbox_scroll.set) + self.ensemble_listbox_scroll.configure(command=self.ensemble_listbox_Option.yview) + self.ensemble_listbox_Option_place = lambda: (self.ensemble_listbox_Frame.place(x=ENSEMBLE_LISTBOX_FRAME_X, y=ENSEMBLE_LISTBOX_FRAME_Y, width=ENSEMBLE_LISTBOX_FRAME_WIDTH, height=ENSEMBLE_LISTBOX_FRAME_HEIGHT, relx=2/3, rely=6/11, relwidth=1/3, relheight=1/self.COL1_ROWS), + self.ensemble_listbox_scroll.place(x=ENSEMBLE_LISTBOX_SCROLL_X, y=ENSEMBLE_LISTBOX_SCROLL_Y, width=ENSEMBLE_LISTBOX_SCROLL_WIDTH, height=ENSEMBLE_LISTBOX_SCROLL_HEIGHT, relx=2/3, rely=6/11, relwidth=1/10, relheight=1/self.COL1_ROWS)) + self.ensemble_listbox_Option_pack = lambda:self.ensemble_listbox_Option.pack(fill=tk.BOTH, expand=1) + self.help_hints(self.ensemble_listbox_Label, text=ENSEMBLE_LISTBOX_HELP) + + ### AUDIO TOOLS ### + + # Chosen Audio Tool + self.chosen_audio_tool_Label = self.main_window_LABEL_SET(self.options_Frame, CHOOSE_AUDIO_TOOLS_MAIN_LABEL) + self.chosen_audio_tool_Label_place = lambda:self.chosen_audio_tool_Label.place(x=0, y=LOW_MENU_Y[0], width=LEFT_ROW_WIDTH, height=LABEL_HEIGHT, relx=0, rely=6/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.chosen_audio_tool_Option = ComboBoxMenu(self.options_Frame, textvariable=self.chosen_audio_tool_var, values=AUDIO_TOOL_OPTIONS, command=lambda e: self.update_main_widget_states()) + self.chosen_audio_tool_Option_place = lambda:self.chosen_audio_tool_Option.place(x=0, y=LOW_MENU_Y[1], width=LEFT_ROW_WIDTH, height=OPTION_HEIGHT, relx=0, rely=7/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL1_ROWS) + self.help_hints(self.chosen_audio_tool_Label, text=AUDIO_TOOLS_HELP) + + # Choose Agorithim + self.choose_algorithm_Label = self.main_window_LABEL_SET(self.options_Frame, CHOOSE_MANUAL_ALGORITHM_MAIN_LABEL) + self.choose_algorithm_Label_place = lambda:self.choose_algorithm_Label.place(x=MAIN_ROW_X[0], y=MAIN_ROW_Y[0], width=0, height=LABEL_HEIGHT, relx=1/3, rely=2/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.choose_algorithm_Option = ComboBoxMenu(self.options_Frame, textvariable=self.choose_algorithm_var, values=MANUAL_ENSEMBLE_OPTIONS) + self.choose_algorithm_Option_place = lambda:self.choose_algorithm_Option.place(x=MAIN_ROW_X[1], y=MAIN_ROW_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=3/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + #self.help_hints(self.mdx_segment_size_Label, text=MDX_SEGMENT_SIZE_HELP) + + + # Time Stretch + self.time_stretch_rate_Label = self.main_window_LABEL_SET(self.options_Frame, CHOOSE_RATE_MAIN_LABEL) + self.time_stretch_rate_Label_place = lambda:self.time_stretch_rate_Label.place(x=MAIN_ROW_X[0], y=MAIN_ROW_Y[0], width=0, height=LABEL_HEIGHT, relx=1/3, rely=2/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.time_stretch_rate_Option = ComboBoxEditableMenu(self.options_Frame, values=TIME_PITCH, textvariable=self.time_stretch_rate_var, pattern=REG_TIME, default=TIME_PITCH)# + self.time_stretch_rate_Option_place = lambda:self.time_stretch_rate_Option.place(x=MAIN_ROW_X[1], y=MAIN_ROW_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=3/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + #self.help_hints(self.mdx_segment_size_Label, text=MDX_SEGMENT_SIZE_HELP) + + # Pitch Rate + self.pitch_rate_Label = self.main_window_LABEL_SET(self.options_Frame, CHOOSE_SEMITONES_MAIN_LABEL) + self.pitch_rate_Label_place = lambda:self.pitch_rate_Label.place(x=MAIN_ROW_X[0], y=MAIN_ROW_Y[0], width=0, height=LABEL_HEIGHT, relx=1/3, rely=2/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.pitch_rate_Option = ComboBoxEditableMenu(self.options_Frame, values=TIME_PITCH, textvariable=self.pitch_rate_var, pattern=REG_PITCH, default=TIME_PITCH)# + self.pitch_rate_Option_place = lambda:self.pitch_rate_Option.place(x=MAIN_ROW_X[1], y=MAIN_ROW_Y[1], width=MAIN_ROW_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=3/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + + # Is Time Correction + self.is_time_correction_Option = ttk.Checkbutton(master=self.options_Frame, text=TIME_CORRECTION_TEXT, variable=self.is_time_correction_var) + self.is_time_correction_Option_place = lambda:self.is_time_correction_Option.place(x=CHECK_BOX_X, y=CHECK_BOX_Y, width=CHECK_BOX_WIDTH, height=CHECK_BOX_HEIGHT, relx=1/3, rely=5/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.is_time_correction_Option, text=IS_TIME_CORRECTION_HELP) + + # Is Wav Ensemble + self.is_wav_ensemble_Option = ttk.Checkbutton(master=self.options_Frame, text=ENSEMBLE_WAVFORMS_TEXT, variable=self.is_wav_ensemble_var) + self.is_wav_ensemble_Option_place = lambda:self.is_wav_ensemble_Option.place(x=CHECK_BOX_X, y=CHECK_BOX_Y, width=CHECK_BOX_WIDTH, height=CHECK_BOX_HEIGHT, relx=1/3, rely=5/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.is_wav_ensemble_Option, text=IS_WAV_ENSEMBLE_HELP) + + ## ALIGN TOOL ## + + # Track 1 + self.fileOne_Label = self.main_window_LABEL_SUB_SET(self.options_Frame, self.file_one_sub_var) + self.fileOne_Label_place = lambda: self.fileOne_Label.place(x=FILEONE_LABEL_X, y=LABEL_Y, width=FILEONE_LABEL_WIDTH, height=LABEL_HEIGHT, relx=1/3, rely=2/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + + self.fileOne_Entry = ttk.Entry(master=self.options_Frame, textvariable=self.fileOneEntry_var, font=self.font_entry, state=tk.DISABLED) + self.fileOne_Entry_place = lambda: self.fileOne_Entry.place(x=SUB_ENT_ROW_X, y=ENTRY_Y, width=ENTRY_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=3/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.fileOne_Entry, text=INPUT_SEC_FIELDS_HELP) + self.fileOne_Entry.configure(cursor="hand2") + + self.fileOne_Open = ttk.Button(master=self.options_Frame, image=self.efile_img, command=lambda:OPEN_FILE_func(os.path.dirname(self.fileOneEntry_Full_var.get()))) + self.fileOne_Open_place = lambda:self.fileOne_Open.place(x=ENTRY_OPEN_BUTT_X_OFF, y=ENTRY_Y, width=ENTRY_OPEN_BUTT_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=3/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS)#OPEN_FILE_func(Path(self.export_path_var.get())) if os.path.isdir(self.export_path_var.get()) else self.error_dialoge(INVALID_EXPORT)) + self.help_hints(self.fileOne_Open, text=INPUT_FOLDER_BUTTON_HELP) + + # Track 2 + self.fileTwo_Label = self.main_window_LABEL_SUB_SET(self.options_Frame, self.file_two_sub_var) + self.fileTwo_Label_place = lambda: self.fileTwo_Label.place(x=FILETWO_LABEL_X, y=LABEL_Y, width=FILETWO_LABEL_WIDTH, height=LABEL_HEIGHT, relx=1/3, rely=4.5/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + + self.fileTwo_Entry = ttk.Entry(master=self.options_Frame, textvariable=self.fileTwoEntry_var, font=self.font_entry, state=tk.DISABLED) + self.fileTwo_Entry_place = lambda:self.fileTwo_Entry.place(x=SUB_ENT_ROW_X, y=ENTRY_Y, width=ENTRY_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=5.5/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.fileTwo_Entry, text=INPUT_SEC_FIELDS_HELP) + self.fileTwo_Entry.configure(cursor="hand2") + + self.fileTwo_Open = ttk.Button(master=self.options_Frame, image=self.efile_img, command=lambda:OPEN_FILE_func(os.path.dirname(self.fileTwoEntry_Full_var.get()))) + self.fileTwo_Open_place = lambda:self.fileTwo_Open.place(x=ENTRY_OPEN_BUTT_X_OFF, y=ENTRY_Y, width=ENTRY_OPEN_BUTT_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=5.5/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.fileTwo_Open, text=INPUT_FOLDER_BUTTON_HELP) + + # Time Window + self.time_window_Label = self.main_window_LABEL_SET(self.options_Frame, TIME_WINDOW_MAIN_LABEL) + self.time_window_Label_place = lambda: self.time_window_Label.place(x=TIME_WINDOW_LABEL_X, y=LABEL_Y, width=TIME_WINDOW_LABEL_WIDTH, height=LABEL_HEIGHT, relx=1/3, rely=7.37/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.time_window_Option = ComboBoxMenu(self.options_Frame, textvariable=self.time_window_var, values=tuple(TIME_WINDOW_MAPPER.keys())) + self.time_window_Option_place = lambda: self.time_window_Option.place(x=SUB_ENT_ROW_X, y=ENTRY_Y, width=OPTION_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=8.37/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.time_window_Label, text=TIME_WINDOW_ALIGN_HELP) + + # Align Shifts + self.intro_analysis_Label = self.main_window_LABEL_SET(self.options_Frame, INTRO_ANALYSIS_MAIN_LABEL) + self.intro_analysis_Label_place = lambda: self.intro_analysis_Label.place(x=INTRO_ANALYSIS_LABEL_X, y=LABEL_Y, width=INTRO_ANALYSIS_LABEL_WIDTH, height=LABEL_HEIGHT, relx=2/3, rely=7.37/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.intro_analysis_Option = ComboBoxMenu(self.options_Frame, textvariable=self.intro_analysis_var, values=tuple(INTRO_MAPPER.keys())) + self.intro_analysis_Option_place = lambda: self.intro_analysis_Option.place(x=INTRO_ANALYSIS_OPTION_X, y=ENTRY_Y, width=OPTION_WIDTH, height=OPTION_HEIGHT, relx=2/3, rely=8.37/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.intro_analysis_Label, text=INTRO_ANALYSIS_ALIGN_HELP) + + # Volume Adjustment + self.db_analysis_Label = self.main_window_LABEL_SET(self.options_Frame, VOLUME_ADJUSTMENT_MAIN_LABEL) + self.db_analysis_Label_place = lambda: self.db_analysis_Label.place(x=DB_ANALYSIS_LABEL_X, y=LABEL_Y, width=DB_ANALYSIS_LABEL_WIDTH, height=LABEL_HEIGHT, relx=2/3, rely=7.37/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.db_analysis_Option = ComboBoxMenu(self.options_Frame, textvariable=self.db_analysis_var, values=tuple(VOLUME_MAPPER.keys())) + self.db_analysis_Option_place = lambda: self.db_analysis_Option.place(x=DB_ANALYSIS_OPTION_X, y=ENTRY_Y, width=OPTION_WIDTH, height=OPTION_HEIGHT, relx=2/3, rely=8.37/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.db_analysis_Label, text=VOLUME_ANALYSIS_ALIGN_HELP) + + # Wav-Type + self.wav_type_set_Label = self.main_window_LABEL_SET(self.options_Frame, WAVE_TYPE_TEXT) + self.wav_type_set_Label_place = lambda: self.wav_type_set_Label.place(x=WAV_TYPE_SET_LABEL_X, y=LABEL_Y, width=WAV_TYPE_SET_LABEL_WIDTH, height=LABEL_HEIGHT, relx=1/3, rely=7.37/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.wav_type_set_Option = ComboBoxMenu(self.options_Frame, textvariable=self.wav_type_set_var, values=WAV_TYPE) + self.wav_type_set_Option_place = lambda: self.wav_type_set_Option.place(x=SUB_ENT_ROW_X, y=ENTRY_Y, width=OPTION_WIDTH, height=OPTION_HEIGHT, relx=1/3, rely=8.37/self.COL1_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + + ### SHARED SETTINGS ### + + # GPU Selection + self.is_gpu_conversion_Option = ttk.Checkbutton(master=self.options_Frame, text=GPU_CONVERSION_MAIN_LABEL, variable=self.is_gpu_conversion_var) + self.is_gpu_conversion_Option_place = lambda:self.is_gpu_conversion_Option.place(x=CHECK_BOX_X, y=CHECK_BOX_Y, width=CHECK_BOX_WIDTH, height=CHECK_BOX_HEIGHT, relx=1/3, rely=5/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.is_gpu_conversion_Disable = lambda:(self.is_gpu_conversion_Option.configure(state=tk.DISABLED), self.is_gpu_conversion_var.set(False)) + self.is_gpu_conversion_Enable = lambda:self.is_gpu_conversion_Option.configure(state=tk.NORMAL) + self.help_hints(self.is_gpu_conversion_Option, text=IS_GPU_CONVERSION_HELP) + + # Vocal Only + self.is_primary_stem_only_Option = ttk.Checkbutton(master=self.options_Frame, textvariable=self.is_primary_stem_only_Text_var, variable=self.is_primary_stem_only_var, command=lambda:self.is_primary_stem_only_Option_toggle()) + self.is_primary_stem_only_Option_place = lambda:self.is_primary_stem_only_Option.place(x=CHECK_BOX_X, y=CHECK_BOX_Y, width=CHECK_BOX_WIDTH, height=CHECK_BOX_HEIGHT, relx=1/3, rely=6/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.is_primary_stem_only_Option_toggle = lambda:self.is_secondary_stem_only_var.set(False) if self.is_primary_stem_only_var.get() else self.is_secondary_stem_only_Option.configure(state=tk.NORMAL) + self.help_hints(self.is_primary_stem_only_Option, text=SAVE_STEM_ONLY_HELP) + + # Instrumental Only + self.is_secondary_stem_only_Option = ttk.Checkbutton(master=self.options_Frame, textvariable=self.is_secondary_stem_only_Text_var, variable=self.is_secondary_stem_only_var, command=lambda:self.is_secondary_stem_only_Option_toggle()) + self.is_secondary_stem_only_Option_place = lambda:self.is_secondary_stem_only_Option.place(x=CHECK_BOX_X, y=CHECK_BOX_Y, width=CHECK_BOX_WIDTH, height=CHECK_BOX_HEIGHT, relx=1/3, rely=7/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.is_secondary_stem_only_Option_toggle = lambda:self.is_primary_stem_only_var.set(False) if self.is_secondary_stem_only_var.get() else self.is_primary_stem_only_Option.configure(state=tk.NORMAL) + self.is_stem_only_Options_Enable = lambda:(self.is_primary_stem_only_Option.configure(state=tk.NORMAL), self.is_secondary_stem_only_Option.configure(state=tk.NORMAL)) + self.help_hints(self.is_secondary_stem_only_Option, text=SAVE_STEM_ONLY_HELP) + + # Sample Mode + self.model_sample_mode_Option = ttk.Checkbutton(master=self.options_Frame, textvariable=self.model_sample_mode_duration_checkbox_var, variable=self.model_sample_mode_var)#f'Sample ({self.model_sample_mode_duration_var.get()} Seconds)' + self.model_sample_mode_Option_place = lambda rely=8:self.model_sample_mode_Option.place(x=CHECK_BOX_X, y=CHECK_BOX_Y, width=CHECK_BOX_WIDTH, height=CHECK_BOX_HEIGHT, relx=1/3, rely=rely/self.COL2_ROWS, relwidth=1/3, relheight=1/self.COL2_ROWS) + self.help_hints(self.model_sample_mode_Option, text=MODEL_SAMPLE_MODE_HELP) + + self.GUI_LIST = (self.vr_model_Label, + self.vr_model_Option, + self.aggression_setting_Label, + self.aggression_setting_Option, + self.window_size_Label, + self.window_size_Option, + self.demucs_model_Label, + self.demucs_model_Option, + self.demucs_stems_Label, + self.demucs_stems_Option, + self.segment_Label, + self.segment_Option, + self.mdx_net_model_Label, + self.mdx_net_model_Option, + self.overlap_mdx_Label, + self.overlap_mdx_Option, + self.overlap_mdx23_Option, + self.mdxnet_stems_Label, + self.mdxnet_stems_Option, + self.mdx_segment_size_Label, + self.mdx_segment_size_Option, + self.chosen_ensemble_Label, + self.chosen_ensemble_Option, + self.save_current_settings_Label, + self.save_current_settings_Option, + self.ensemble_main_stem_Label, + self.ensemble_main_stem_Option, + self.ensemble_type_Label, + self.ensemble_type_Option, + self.ensemble_listbox_Label, + self.ensemble_listbox_Frame, + self.ensemble_listbox_Option, + self.ensemble_listbox_scroll, + self.chosen_audio_tool_Label, + self.chosen_audio_tool_Option, + self.choose_algorithm_Label, + self.choose_algorithm_Option, + self.time_stretch_rate_Label, + self.time_stretch_rate_Option, + self.wav_type_set_Label, + self.wav_type_set_Option, + self.pitch_rate_Label, + self.pitch_rate_Option, + self.fileOne_Label, + self.fileOne_Entry, + self.fileOne_Open, + self.fileTwo_Label, + self.fileTwo_Entry, + self.fileTwo_Open, + self.intro_analysis_Label, + self.intro_analysis_Option, + self.time_window_Label, + self.time_window_Option, + self.db_analysis_Label, + self.db_analysis_Option, + self.is_gpu_conversion_Option, + self.is_primary_stem_only_Option, + self.is_secondary_stem_only_Option, + self.is_primary_stem_only_Demucs_Option, + self.is_secondary_stem_only_Demucs_Option, + self.model_sample_mode_Option, + self.is_time_correction_Option, + self.is_wav_ensemble_Option) + + REFRESH_VARS = (self.mdx_net_model_var, + self.vr_model_var, + self.demucs_model_var, + # self.demucs_stems_var, + # self.mdxnet_stems_var, + self.is_chunk_demucs_var, + self.is_chunk_mdxnet_var, + # self.is_primary_stem_only_Demucs_var, + # self.is_secondary_stem_only_Demucs_var, + # self.is_primary_stem_only_var, + # self.is_secondary_stem_only_var, + self.model_download_demucs_var, + self.model_download_mdx_var, + self.model_download_vr_var, + self.select_download_var, + # self.is_primary_stem_only_Demucs_Text_var, + # self.is_secondary_stem_only_Demucs_Text_var, + self.chosen_process_method_var, + self.ensemble_main_stem_var) + + # Change States + for var in REFRESH_VARS: + var.trace_add('write', lambda *args: self.update_button_states()) + + def combo_box_selection_clear(self, frame:tk.Frame): + for option in frame.winfo_children(): + if type(option) is ttk.Combobox or type(option) is ComboBoxEditableMenu: + option.selection_clear() + + def focus_out_widgets(self, all_widgets, frame): + for option in all_widgets: + if not type(option) is ComboBoxEditableMenu: + option.bind('', lambda e:(option.focus(), self.combo_box_selection_clear(frame))) + + def bind_widgets(self): + """Bind widgets to the drag & drop mechanic""" + + self.chosen_audio_tool_align = tk.BooleanVar(value=True) + other_items = [self.options_Frame, self.filePaths_Frame, self.title_Label, self.progressbar, self.conversion_Button, self.settings_Button, self.stop_Button, self.command_Text] + all_widgets = self.options_Frame.winfo_children() + self.filePaths_Frame.winfo_children() + other_items + self.focus_out_widgets(all_widgets, self.options_Frame) + + if is_dnd_compatible: + self.filePaths_saveTo_Button.drop_target_register(DND_FILES) + self.filePaths_saveTo_Entry.drop_target_register(DND_FILES) + self.drop_target_register(DND_FILES) + self.dnd_bind('<>', lambda e: drop(e, accept_mode='files')) + self.filePaths_saveTo_Button.dnd_bind('<>', lambda e: drop(e, accept_mode='folder')) + self.filePaths_saveTo_Entry.dnd_bind('<>', lambda e: drop(e, accept_mode='folder')) + + self.fileOne_Entry.drop_target_register(DND_FILES) + self.fileTwo_Entry.drop_target_register(DND_FILES) + self.fileOne_Entry.dnd_bind('<>', lambda e: drop(e, accept_mode=FILE_1)) + self.fileTwo_Entry.dnd_bind('<>', lambda e: drop(e, accept_mode=FILE_2)) + + self.ensemble_listbox_Option.bind('<>', lambda e: self.chosen_ensemble_var.set(CHOOSE_ENSEMBLE_OPTION)) + self.options_Frame.bind(right_click_button, lambda e:(self.right_click_menu_popup(e, main_menu=True), self.options_Frame.focus())) + self.filePaths_musicFile_Entry.bind(right_click_button, lambda e:(self.input_right_click_menu(e), self.filePaths_musicFile_Entry.focus())) + self.filePaths_musicFile_Entry.bind('', lambda e:(self.check_is_menu_open(INPUTS_MENU), self.filePaths_musicFile_Entry.focus())) + + self.fileOne_Entry.bind('', lambda e:self.menu_batch_dual()) + self.fileTwo_Entry.bind('', lambda e:self.menu_batch_dual()) + self.fileOne_Entry.bind(right_click_button, lambda e:self.input_dual_right_click_menu(e, is_primary=True)) + self.fileTwo_Entry.bind(right_click_button, lambda e:self.input_dual_right_click_menu(e, is_primary=False)) + if not is_macos: + self.bind("", self.adjust_toplevel_positions) + + def auto_save(self): + try: + self.save_values(app_close=False, is_auto_save=True) + except Exception as e: + print(e) + + #--Input/Export Methods-- + + def linux_filebox_fix(self, is_on=True): + fg_color_set = '#575757' if is_on else "#F6F6F7" + style = ttk.Style(self) + style.configure('TButton', foreground='#F6F6F7') + style.configure('TCheckbutton', foreground='#F6F6F7') + style.configure('TCombobox', foreground='#F6F6F7') + style.configure('TEntry', foreground='#F6F6F7') + style.configure('TLabel', foreground='#F6F6F7') + style.configure('TMenubutton', foreground='#F6F6F7') + style.configure('TRadiobutton', foreground='#F6F6F7') + gui_data.sv_ttk.set_theme("dark", MAIN_FONT_NAME, 10, fg_color_set=fg_color_set) + + def show_file_dialog(self, text='Select Audio files', dialoge_type=None): + parent_win = root + is_linux = not is_windows and not is_macos + + if is_linux: + self.linux_filebox_fix() + top = tk.Toplevel(root) + top.withdraw() + top.protocol("WM_DELETE_WINDOW", lambda: None) + parent_win = top + + if dialoge_type == MULTIPLE_FILE: + filenames = filedialog.askopenfilenames(parent=parent_win, + title=text) + elif dialoge_type == MAIN_MULTIPLE_FILE: + filenames = filedialog.askopenfilenames(parent=parent_win, + title=text, + initialfile='', + initialdir=self.lastDir) + elif dialoge_type == SINGLE_FILE: + filenames = filedialog.askopenfilename(parent=parent_win, + title=text) + elif dialoge_type == CHOOSE_EXPORT_FIR: + filenames = filedialog.askdirectory( + parent=parent_win, + title=f'Select Folder',) + + if is_linux: + print("Is Linux") + self.linux_filebox_fix(False) + top.destroy() + + return filenames + + def input_select_filedialog(self): + """Make user select music files""" + + if self.lastDir is not None: + if not os.path.isdir(self.lastDir): + self.lastDir = None + + paths = self.show_file_dialog(dialoge_type=MAIN_MULTIPLE_FILE) + + if paths: # Path selected + self.inputPaths = paths + + self.process_input_selections() + self.update_inputPaths() + + def export_select_filedialog(self): + """Make user select a folder to export the converted files in""" + + export_path = None + + path = self.show_file_dialog(dialoge_type=CHOOSE_EXPORT_FIR) + + if path: # Path selected + self.export_path_var.set(path) + export_path = self.export_path_var.get() + + return export_path + + def update_inputPaths(self): + """Update the music file entry""" + + if self.inputPaths: + if len(self.inputPaths) == 1: + text = self.inputPaths[0] + else: + count = len(self.inputPaths) - 1 + file_text = 'file' if len(self.inputPaths) == 2 else 'files' + text = f"{self.inputPaths[0]}, +{count} {file_text}" + else: + # Empty Selection + text = '' + + self.inputPathsEntry_var.set(text) + + def select_audiofile(self, path=None, is_primary=True): + """Make user select music files""" + + vars = { + True: (self.fileOneEntry_Full_var, self.fileOneEntry_var, self.fileTwoEntry_Full_var, self.fileTwoEntry_var), + False: (self.fileTwoEntry_Full_var, self.fileTwoEntry_var, self.fileOneEntry_Full_var, self.fileOneEntry_var) + } + + file_path_var, file_basename_var, file_path_2_var, file_basename_2_var = vars[is_primary] + + if not path: + path = self.show_file_dialog(text='Select Audio file', dialoge_type=SINGLE_FILE) + + if path: # Path selected + file_path_var.set(path) + file_basename_var.set(os.path.basename(path)) + + if BATCH_MODE_DUAL in file_path_2_var.get(): + file_path_2_var.set("") + file_basename_2_var.set("") + + self.DualBatch_inputPaths = [] + self.check_dual_paths() + + #--Utility Methods-- + + def restart(self): + """Restart the application after asking for confirmation""" + + confirm = messagebox.askyesno(parent=root, + title=CONFIRM_RESTART_TEXT[0], + message=CONFIRM_RESTART_TEXT[1]) + + if confirm: + self.save_values(app_close=True, is_restart=True) + + def delete_temps(self, is_start_up=False): + """Deletes temp files""" + + DIRECTORIES = (BASE_PATH, VR_MODELS_DIR, MDX_MODELS_DIR, DEMUCS_MODELS_DIR, DEMUCS_NEWER_REPO_DIR) + EXTENSIONS = (('.aes', '.txt', '.tmp')) + + try: + if os.path.isfile(f"{current_patch}{application_extension}"): + os.remove(f"{current_patch}{application_extension}") + + if not is_start_up: + if os.path.isfile(SPLASH_DOC): + os.remove(SPLASH_DOC) + + for dir in DIRECTORIES: + for temp_file in os.listdir(dir): + if temp_file.endswith(EXTENSIONS): + if os.path.isfile(os.path.join(dir, temp_file)): + os.remove(os.path.join(dir, temp_file)) + except Exception as e: + self.error_log_var.set(error_text(TEMP_FILE_DELETION_TEXT, e)) + + def get_files_from_dir(self, directory, ext, is_mdxnet=False): + """Gets files from specified directory that ends with specified extention""" + + return tuple( + x if is_mdxnet and x.endswith(CKPT) else os.path.splitext(x)[0] + for x in os.listdir(directory) + if x.endswith(ext) + ) + + def return_ensemble_stems(self, is_primary=False): + """Grabs and returns the chosen ensemble stems.""" + + ensemble_stem = self.ensemble_main_stem_var.get().partition("/") + + if is_primary: + return ensemble_stem[0] + else: + return ensemble_stem[0], ensemble_stem[2] + + def message_box(self, message): + """Template for confirmation box""" + + confirm = messagebox.askyesno(title=message[0], + message=message[1], + parent=root) + + return confirm + + def error_dialoge(self, message): + """Template for messagebox that informs user of error""" + + messagebox.showerror(master=self, + title=message[0], + message=message[1], + parent=root) + + def model_list(self, primary_stem: str, secondary_stem: str, is_4_stem_check=False, is_multi_stem=False, is_dry_check=False, is_no_demucs=False, is_check_vocal_split=False): + + stem_check = self.assemble_model_data(arch_type=ENSEMBLE_STEM_CHECK, is_dry_check=is_dry_check) + + def matches_stem(model: ModelData): + primary_match = model.primary_stem in {primary_stem, secondary_stem} + mdx_stem_match = primary_stem in model.mdx_model_stems and model.mdx_stem_count <= 2 + return primary_match or mdx_stem_match if is_no_demucs else primary_match or primary_stem in model.mdx_model_stems + + result = [] + + for model in stem_check: + if is_multi_stem: + result.append(model.model_and_process_tag) + elif is_4_stem_check and (model.demucs_stem_count == 4 or model.mdx_stem_count == 4): + result.append(model.model_and_process_tag) + elif matches_stem(model) or (not is_no_demucs and primary_stem.lower() in model.demucs_source_list): + if is_check_vocal_split: + model_name = None if model.is_karaoke or not model.vocal_split_model else model.model_basename + else: + model_name = model.model_and_process_tag + + result.append(model_name) + + return result + + def help_hints(self, widget, text): + toolTip = ToolTip(widget) + def enter(event): + if self.help_hints_var.get(): + toolTip.showtip(text) + def leave(event): + toolTip.hidetip() + widget.bind('', enter) + widget.bind('', leave) + widget.bind(right_click_button, lambda e:copy_help_hint(e)) + + def copy_help_hint(event): + if self.help_hints_var.get(): + right_click_menu = tk.Menu(self, font=(MAIN_FONT_NAME, FONT_SIZE_1), tearoff=0) + right_click_menu.add_command(label='Copy Help Hint Text', command=right_click_menu_copy_hint) + + try: + right_click_menu.tk_popup(event.x_root,event.y_root) + right_click_release_linux(right_click_menu) + finally: + right_click_menu.grab_release() + else: + if widget.winfo_toplevel() == root: + self.right_click_menu_popup(event, main_menu=True) + + def right_click_menu_copy_hint(): + pyperclip.copy(text) + + def check_is_menu_open(self, menu): + try: + menu_mapping = { + VR_OPTION: (self.is_open_menu_advanced_vr_options, self.menu_advanced_vr_options, self.menu_advanced_vr_options_close_window), + DEMUCS_OPTION: (self.is_open_menu_advanced_demucs_options, self.menu_advanced_demucs_options, self.menu_advanced_demucs_options_close_window), + MDX_OPTION: (self.is_open_menu_advanced_mdx_options, self.menu_advanced_mdx_options, self.menu_advanced_mdx_options_close_window), + ENSEMBLE_OPTION: (self.is_open_menu_advanced_ensemble_options, self.menu_advanced_ensemble_options, self.menu_advanced_ensemble_options_close_window), + HELP_OPTION: (self.is_open_menu_help, self.menu_help, self.menu_help_close_window), + ERROR_OPTION: (self.is_open_menu_error_log, self.menu_error_log, self.menu_error_log_close_window), + INPUTS_MENU: (self.is_open_menu_view_inputs, self.menu_view_inputs, self.menu_view_inputs_close_window), + ALIGNMENT_TOOL: (self.is_open_menu_advanced_align_options, self.menu_advanced_align_options, self.menu_advanced_align_options_close_window) + } + + is_open, open_method, close_method = menu_mapping.get(menu, (None, None, None)) + if is_open and is_open.get(): + close_method() + open_method() + except Exception as e: + self.error_log_var.set("{}".format(error_text(menu, e))) + + def input_right_click_menu(self, event): + + right_click_menu = tk.Menu(self, font=(MAIN_FONT_NAME, FONT_SIZE_1), tearoff=0) + right_click_menu.add_command(label='See All Inputs', command=lambda:self.check_is_menu_open(INPUTS_MENU)) + + try: + right_click_menu.tk_popup(event.x_root,event.y_root) + right_click_release_linux(right_click_menu) + finally: + right_click_menu.grab_release() + + def input_dual_right_click_menu(self, event, is_primary:bool): + input_path = self.fileOneEntry_Full_var.get() if is_primary else self.fileTwoEntry_Full_var.get() + right_click_menu = tk.Menu(self, font=(MAIN_FONT_NAME, FONT_SIZE_1), tearoff=0) + right_click_menu.add_command(label=CHOOSE_INPUT_TEXT, command=lambda:self.select_audiofile(is_primary=is_primary)) + if input_path and os.path.isdir(os.path.dirname(input_path)): + right_click_menu.add_command(label=OPEN_INPUT_DIR_TEXT, command=lambda:OPEN_FILE_func(os.path.dirname(input_path))) + right_click_menu.add_command(label=BATCH_PROCESS_MENU_TEXT, command=self.menu_batch_dual) + + try: + right_click_menu.tk_popup(event.x_root,event.y_root) + right_click_release_linux(right_click_menu) + finally: + right_click_menu.grab_release() + + def cached_sources_clear(self): + + self.vr_cache_source_mapper = {} + self.mdx_cache_source_mapper = {} + self.demucs_cache_source_mapper = {} + + def cached_source_callback(self, process_method, model_name=None): + + model, sources = None, None + + if process_method == VR_ARCH_TYPE: + mapper = self.vr_cache_source_mapper + if process_method == MDX_ARCH_TYPE: + mapper = self.mdx_cache_source_mapper + if process_method == DEMUCS_ARCH_TYPE: + mapper = self.demucs_cache_source_mapper + + for key, value in mapper.items(): + if model_name in key: + model = key + sources = value + + return model, sources + + def cached_model_source_holder(self, process_method, sources, model_name=None): + + if process_method == VR_ARCH_TYPE: + self.vr_cache_source_mapper = {**self.vr_cache_source_mapper, **{model_name: sources}} + if process_method == MDX_ARCH_TYPE: + self.mdx_cache_source_mapper = {**self.mdx_cache_source_mapper, **{model_name: sources}} + if process_method == DEMUCS_ARCH_TYPE: + self.demucs_cache_source_mapper = {**self.demucs_cache_source_mapper, **{model_name: sources}} + + def cached_source_model_list_check(self, model_list: List[ModelData]): + + model: ModelData + primary_model_names = lambda process_method:[model.model_basename if model.process_method == process_method else None for model in model_list] + secondary_model_names = lambda process_method:[model.secondary_model.model_basename if model.is_secondary_model_activated and model.process_method == process_method else None for model in model_list] + + self.vr_primary_model_names = primary_model_names(VR_ARCH_TYPE) + self.mdx_primary_model_names = primary_model_names(MDX_ARCH_TYPE) + self.demucs_primary_model_names = primary_model_names(DEMUCS_ARCH_TYPE) + self.vr_secondary_model_names = secondary_model_names(VR_ARCH_TYPE) + self.mdx_secondary_model_names = secondary_model_names(MDX_ARCH_TYPE) + self.demucs_secondary_model_names = [model.secondary_model.model_basename if model.is_secondary_model_activated and model.process_method == DEMUCS_ARCH_TYPE and not model.secondary_model is None else None for model in model_list] + self.demucs_pre_proc_model_name = [model.pre_proc_model.model_basename if model.pre_proc_model else None for model in model_list]#list(dict.fromkeys()) + + for model in model_list: + if model.process_method == DEMUCS_ARCH_TYPE and model.is_demucs_4_stem_secondaries: + if not model.is_4_stem_ensemble: + self.demucs_secondary_model_names = model.secondary_model_4_stem_model_names_list + break + else: + for i in model.secondary_model_4_stem_model_names_list: + self.demucs_secondary_model_names.append(i) + + self.all_models = self.vr_primary_model_names + self.mdx_primary_model_names + self.demucs_primary_model_names + self.vr_secondary_model_names + self.mdx_secondary_model_names + self.demucs_secondary_model_names + self.demucs_pre_proc_model_name + + def verify_audio(self, audio_file, is_process=True, sample_path=None): + is_good = False + error_data = '' + + if not type(audio_file) is tuple: + audio_file = [audio_file] + + for i in audio_file: + if os.path.isfile(i): + try: + librosa.load(i, duration=3, mono=False, sr=44100) if not type(sample_path) is str else self.create_sample(i, sample_path) + is_good = True + except Exception as e: + error_name = f'{type(e).__name__}' + traceback_text = ''.join(traceback.format_tb(e.__traceback__)) + message = f'{error_name}: "{e}"\n{traceback_text}"' + if is_process: + audio_base_name = os.path.basename(i) + self.error_log_var.set(f'{ERROR_LOADING_FILE_TEXT[0]}:\n\n\"{audio_base_name}\"\n\n{ERROR_LOADING_FILE_TEXT[1]}:\n\n{message}') + else: + error_data = AUDIO_VERIFICATION_CHECK(i, message) + + if is_process: + return is_good + else: + return is_good, error_data + + def create_sample(self, audio_file, sample_path=SAMPLE_CLIP_PATH): + try: + with audioread.audio_open(audio_file) as f: + track_length = int(f.duration) + except Exception as e: + print('Audioread failed to get duration. Trying Librosa...') + y, sr = librosa.load(audio_file, mono=False, sr=44100) + track_length = int(librosa.get_duration(y=y, sr=sr)) + + clip_duration = int(self.model_sample_mode_duration_var.get()) + + if track_length >= clip_duration: + offset_cut = track_length//3 + off_cut = offset_cut + track_length + if not off_cut >= clip_duration: + offset_cut = 0 + name_apped = f'{clip_duration}_second_' + else: + offset_cut, clip_duration = 0, track_length + name_apped = '' + + sample = librosa.load(audio_file, offset=offset_cut, duration=clip_duration, mono=False, sr=44100)[0].T + audio_sample = os.path.join(sample_path, f'{os.path.splitext(os.path.basename(audio_file))[0]}_{name_apped}sample.wav') + sf.write(audio_sample, sample, 44100) + + return audio_sample + + #--Right Click Menu Pop-Ups-- + + def right_click_select_settings_sub(self, parent_menu, process_method): + saved_settings_sub_menu = tk.Menu(parent_menu, font=(MAIN_FONT_NAME, FONT_SIZE_1), tearoff=False) + settings_options = self.last_found_settings + tuple(SAVE_SET_OPTIONS) + + for settings_options in settings_options: + settings_options = settings_options.replace("_", " ") + saved_settings_sub_menu.add_command(label=settings_options, command=lambda o=settings_options:self.selection_action_saved_settings(o, process_method=process_method)) + + saved_settings_sub_menu.insert_separator(len(self.last_found_settings)) + + return saved_settings_sub_menu + + def right_click_menu_popup(self, event, text_box=False, main_menu=False): + + def add_text_edit_options(menu): + """Add options related to text editing.""" + menu.add_command(label='Copy', command=self.right_click_menu_copy) + menu.add_command(label='Paste', command=lambda: self.right_click_menu_paste(text_box=text_box)) + menu.add_command(label='Delete', command=lambda: self.right_click_menu_delete(text_box=text_box)) + + def add_advanced_settings_options(menu, settings_mapper, var_mapper): + """Add advanced settings options to the menu.""" + current_method = self.chosen_process_method_var.get() + + if current_method in settings_mapper and (var_mapper[current_method] or (current_method == DEMUCS_ARCH_TYPE and self.is_demucs_pre_proc_model_activate_var.get())): + menu.add_cascade(label='Select Saved Settings', menu=saved_settings_sub_load_for_menu) + menu.add_separator() + for method, option in settings_mapper.items(): + if method != ENSEMBLE_MODE or current_method == ENSEMBLE_MODE: + menu.add_command(label=f'Advanced {method} Settings', command=option) + elif current_method in settings_mapper: + menu.add_command(label=f'Advanced {current_method} Settings', command=settings_mapper[current_method]) + + # Create the right-click menu + right_click_menu = tk.Menu(self, font=(MAIN_FONT_NAME, FONT_SIZE_1), tearoff=0) + + # Mappings + settings_mapper = { + ENSEMBLE_MODE: lambda: self.check_is_menu_open(ENSEMBLE_OPTION), + VR_ARCH_PM: lambda: self.check_is_menu_open(VR_OPTION), + MDX_ARCH_TYPE: lambda: self.check_is_menu_open(MDX_OPTION), + DEMUCS_ARCH_TYPE: lambda: self.check_is_menu_open(DEMUCS_OPTION) + } + + var_mapper = { + ENSEMBLE_MODE: True, + VR_ARCH_PM: self.vr_is_secondary_model_activate_var.get(), + MDX_ARCH_TYPE: self.mdx_is_secondary_model_activate_var.get(), + DEMUCS_ARCH_TYPE: self.demucs_is_secondary_model_activate_var.get() + } + + # Submenu for saved settings + saved_settings_sub_load_for_menu = tk.Menu(right_click_menu, font=(MAIN_FONT_NAME, FONT_SIZE_1), tearoff=False) + for label, arch_type in [(VR_ARCH_SETTING_LOAD, VR_ARCH_PM), (MDX_SETTING_LOAD, MDX_ARCH_TYPE), (DEMUCS_SETTING_LOAD, DEMUCS_ARCH_TYPE), (ALL_ARCH_SETTING_LOAD, None)]: + submenu = self.right_click_select_settings_sub(saved_settings_sub_load_for_menu, arch_type) + saved_settings_sub_load_for_menu.add_cascade(label=label, menu=submenu) + + if not main_menu: + add_text_edit_options(right_click_menu) + else: + if self.chosen_process_method_var.get() == AUDIO_TOOLS and self.chosen_audio_tool_var.get() == ALIGN_INPUTS: + right_click_menu.add_command(label='Advanced Align Tool Settings', command=lambda: self.check_is_menu_open(ALIGNMENT_TOOL)) + else: + add_advanced_settings_options(right_click_menu, settings_mapper, var_mapper) + + # Additional Settings and Help Hints + if not self.is_menu_settings_open: + right_click_menu.add_command(label='Additional Settings', command=lambda: self.menu_settings(select_tab_2=True)) + + help_hints_label = 'Enable' if not self.help_hints_var.get() else 'Disable' + right_click_menu.add_command(label=f'{help_hints_label} Help Hints', command=lambda: self.help_hints_var.set(not self.help_hints_var.get())) + + if self.error_log_var.get(): + right_click_menu.add_command(label='Error Log', command=lambda: self.check_is_menu_open(ERROR_OPTION)) + + try: + right_click_menu.tk_popup(event.x_root, event.y_root) + right_click_release_linux(right_click_menu) + finally: + right_click_menu.grab_release() + + def right_click_menu_copy(self): + hightlighted_text = self.current_text_box.selection_get() + self.clipboard_clear() + self.clipboard_append(hightlighted_text) + + def right_click_menu_paste(self, text_box=False): + clipboard = self.clipboard_get() + self.right_click_menu_delete(text_box=True) if text_box else self.right_click_menu_delete() + self.current_text_box.insert(self.current_text_box.index(tk.INSERT), clipboard) + + def right_click_menu_delete(self, text_box=False): + if text_box: + try: + s0 = self.current_text_box.index("sel.first") + s1 = self.current_text_box.index("sel.last") + self.current_text_box.tag_configure('highlight') + self.current_text_box.tag_add("highlight", s0, s1) + start_indexes = self.current_text_box.tag_ranges("highlight")[0::2] + end_indexes = self.current_text_box.tag_ranges("highlight")[1::2] + + for start, end in zip(start_indexes, end_indexes): + self.current_text_box.tag_remove("highlight", start, end) + + for start, end in zip(start_indexes, end_indexes): + self.current_text_box.delete(start, end) + except Exception as e: + print('RIGHT-CLICK-DELETE ERROR: \n', e) + else: + self.current_text_box.delete(0, tk.END) + + def right_click_console(self, event): + right_click_menu = tk.Menu(self, font=(MAIN_FONT_NAME, FONT_SIZE_1), tearoff=0) + right_click_menu.add_command(label='Copy', command=self.command_Text.copy_text) + right_click_menu.add_command(label='Select All', command=self.command_Text.select_all_text) + + try: + right_click_menu.tk_popup(event.x_root,event.y_root) + right_click_release_linux(right_click_menu) + finally: + right_click_menu.grab_release() + + #--Secondary Window Methods-- + + def vocal_splitter_Button_opt(self, top_window, frame, pady, width=15): + vocal_splitter_Button = ttk.Button(frame, text=VOCAL_SPLITTER_OPTIONS_TEXT, command=lambda:self.pop_up_set_vocal_splitter(top_window), width=width)# + vocal_splitter_Button.grid(pady=pady) + + def adjust_toplevel_positions(self, event): + # Copy the list to avoid modifying while iterating + for toplevel in self.toplevels.copy(): + # Check if the toplevel window is still alive + if not toplevel.winfo_exists(): + self.toplevels.remove(toplevel) + else: + menu_offset_x = (root.winfo_width() - toplevel.winfo_width()) // 2 + menu_offset_y = (root.winfo_height() - toplevel.winfo_height()) // 2 + toplevel.geometry("+%d+%d" % (root.winfo_x() + menu_offset_x, root.winfo_y() + menu_offset_y)) + + def menu_placement(self, window: tk.Toplevel, title, pop_up=False, is_help_hints=False, close_function=None, frame_list=None, top_window=None): + """Prepares and centers each secondary window relative to the main window""" + + top_window = top_window if top_window else root + window.withdraw() + window.resizable(False, False) + window.wm_transient(top_window) + window.title(title) + window.iconbitmap(ICON_IMG_PATH) if is_windows else self.tk.call('wm', 'iconphoto', window._w, tk.PhotoImage(file=MAIN_ICON_IMG_PATH)) + + root_location_x = root.winfo_x() + root_location_y = root.winfo_y() + root_x = root.winfo_width() + root_y = root.winfo_height() + window.update() if is_windows else window.update_idletasks() + sub_menu_x = window.winfo_reqwidth() + sub_menu_y = window.winfo_reqheight() + menu_offset_x = (root_x - sub_menu_x) // 2 + menu_offset_y = (root_y - sub_menu_y) // 2 + window.geometry("+%d+%d" %(root_location_x+menu_offset_x, root_location_y+menu_offset_y)) + + window.deiconify() + window.configure(bg=BG_COLOR) + + if not is_macos: + self.toplevels.append(window) + + def right_click_menu(event): + help_hints_label = 'Enable' if self.help_hints_var.get() == False else 'Disable' + help_hints_bool = True if self.help_hints_var.get() == False else False + right_click_menu = tk.Menu(self, font=(MAIN_FONT_NAME, FONT_SIZE_1), tearoff=0) + if is_help_hints: + right_click_menu.add_command(label=f'{help_hints_label} Help Hints', command=lambda:self.help_hints_var.set(help_hints_bool)) + right_click_menu.add_command(label='Exit Window', command=close_function) + + try: + right_click_menu.tk_popup(event.x_root,event.y_root) + right_click_release_linux(right_click_menu, window) + finally: + right_click_menu.grab_release() + + if close_function: + window.bind(right_click_button, lambda e:right_click_menu(e)) + + if frame_list: + for frame in frame_list: + #self.adjust_widget_widths(frame) + self.focus_out_widgets(frame.winfo_children() + [frame], frame) + + if pop_up: + window.attributes('-topmost', 'true') if OPERATING_SYSTEM == "Linux" else None + window.grab_set() + root.wait_window(window) + + def adjust_widget_widths(self, frame): + + def resize_widget(widgets): + max_width = max(wid.winfo_width() for wid in widgets) + for wid in widgets: + if isinstance(wid, (tk.Button, ttk.Combobox)): + # For widgets where width represents characters, not pixels + wid.configure(width=int(max_width / wid.winfo_pixels('1c'))) + else: + # For widgets where width represents pixels + wid.configure(width=max_width) + + resize_widget([widget for widget in frame.winfo_children() if isinstance(widget, tk.Button)]) + resize_widget([widget for widget in frame.winfo_children() if isinstance(widget, ttk.Combobox)]) + + def menu_move_tab(notebook: ttk.Notebook, tab_text, new_position): + # Get the tab ID + tab_id = None + for tab in notebook.tabs(): + if notebook.tab(tab, "text") == tab_text: + tab_id = tab + break + + if tab_id is None: + print(f"No tab named '{tab_text}'") + return + + # remove the tab + notebook.forget(tab_id) + + # add it back in new position + notebook.insert(new_position, tab_id) + + def menu_tab_control(self, toplevel, ai_network_vars, is_demucs=False, is_mdxnet=False): + """Prepares the tabs setup for some windows""" + + tabControl = ttk.Notebook(toplevel) + + tab1 = ttk.Frame(tabControl) + tab2 = ttk.Frame(tabControl) + + tabControl.add(tab1, text=SETTINGS_GUIDE_TEXT) + tabControl.add(tab2, text=SECONDARY_MODEL_TEXT) + + tab1.grid_rowconfigure(0, weight=1) + tab1.grid_columnconfigure(0, weight=1) + + tab2.grid_rowconfigure(0, weight=1) + tab2.grid_columnconfigure(0, weight=1) + + if is_demucs or is_mdxnet: + tab3 = ttk.Frame(tabControl) + tabControl.add(tab3, text=PREPROCESS_MODEL_CHOOSE_TEXT if is_demucs else MDX23C_ONLY_OPTIONS_TEXT) + tab3.grid_rowconfigure(0, weight=1) + tab3.grid_columnconfigure(0, weight=1) + + tabControl.pack(expand=1, fill=tk.BOTH) + + self.tab2_loaded = False + self.tab3_loaded = False + + def on_tab_selected(event): + # Check if it's tab2 (by tab id or tab title) and if it hasn't been loaded before + load_screen = False + if event.widget.tab('current', option='text') == 'Secondary Model' and not self.tab2_loaded: + tab = tab2 + self.tab2_loaded = True + tab_load = lambda:self.menu_secondary_model(tab, ai_network_vars) + load_screen = True + elif event.widget.tab('current', option='text') == PREPROCESS_MODEL_CHOOSE_TEXT and not self.tab3_loaded: + tab = tab3 + self.tab3_loaded = True + tab_load = lambda:self.menu_preproc_model(tab) + load_screen = True + + if load_screen: + # Step 1: Add "Loading..." label + loading_label = ttk.Label(tab, text="Updating model lists...", font=Font(family=MAIN_FONT_NAME, size=14)) + loading_label.place(relx=0.5, rely=0.5, anchor=tk.CENTER) # Assuming you want to center it + + # Step 2: Update the UI to show the label + tab.update_idletasks() + + # Load the content + tab_load() + + # Step 3: Remove or update the "Loading..." label + loading_label.destroy() # Remove the label. Or you can update its text if desired. + + #self.on_tab_changed(tabControl) + + tabControl.bind("<>", on_tab_selected) + + if is_demucs or is_mdxnet: + return tab1, tab3 + else: + return tab1 + + def menu_view_inputs(self): + + menu_view_inputs_top = tk.Toplevel(root) + + self.is_open_menu_view_inputs.set(True) + self.menu_view_inputs_close_window = lambda:close_window() + menu_view_inputs_top.protocol("WM_DELETE_WINDOW", self.menu_view_inputs_close_window) + + input_length_var = tk.StringVar(value='') + input_info_text_var = tk.StringVar(value='') + is_widen_box_var = tk.BooleanVar(value=False) + is_play_file_var = tk.BooleanVar(value=False) + varification_text_var = tk.StringVar(value=VERIFY_INPUTS_TEXT) + + reset_list = lambda:(input_files_listbox_Option.delete(0, 'end'), [input_files_listbox_Option.insert(tk.END, inputs) for inputs in self.inputPaths]) + audio_input_total = lambda:input_length_var.set(f'{AUDIO_INPUT_TOTAL_TEXT}: {len(self.inputPaths)}') + audio_input_total() + + def list_diff(list1, list2): return list(set(list1).symmetric_difference(set(list2))) + + def list_to_string(list1): return '\n'.join(''.join(sub) for sub in list1) + + def close_window(): + self.verification_thread.kill() if self.thread_check(self.verification_thread) else None + self.is_open_menu_view_inputs.set(False) + menu_view_inputs_top.destroy() + + def drag_n_drop(e): + input_info_text_var.set('') + drop(e, accept_mode='files') + reset_list() + audio_input_total() + + def selected_files(is_remove=False): + if not self.thread_check(self.active_processing_thread): + items_list = [input_files_listbox_Option.get(i) for i in input_files_listbox_Option.curselection()] + inputPaths = list(self.inputPaths)# if is_remove else items_list + if is_remove: + [inputPaths.remove(i) for i in items_list if items_list] + else: + [inputPaths.remove(i) for i in self.inputPaths if i not in items_list] + removed_files = list_diff(self.inputPaths, inputPaths) + [input_files_listbox_Option.delete(input_files_listbox_Option.get(0, tk.END).index(i)) for i in removed_files] + starting_len = len(self.inputPaths) + self.inputPaths = tuple(inputPaths) + self.update_inputPaths() + audio_input_total() + input_info_text_var.set(f'{starting_len - len(self.inputPaths)} input(s) removed.') + else: + input_info_text_var.set('You cannot remove inputs during an active process.') + + def box_size(): + input_info_text_var.set('') + input_files_listbox_Option.config(width=230, height=25) if is_widen_box_var.get() else input_files_listbox_Option.config(width=110, height=17) + self.menu_placement(menu_view_inputs_top, 'Selected Inputs', pop_up=True) + + def input_options(is_select_inputs=True): + input_info_text_var.set('') + if is_select_inputs: + self.input_select_filedialog() + else: + self.inputPaths = () + reset_list() + self.update_inputPaths() + audio_input_total() + + def pop_open_file_path(is_play_file=False): + if self.inputPaths: + track_selected = self.inputPaths[input_files_listbox_Option.index(tk.ACTIVE)] + if os.path.isfile(track_selected): + OPEN_FILE_func(track_selected if is_play_file else os.path.dirname(track_selected)) + + def get_export_dir(): + if os.path.isdir(self.export_path_var.get()): + export_dir = self.export_path_var.get() + else: + export_dir = self.export_select_filedialog() + + return export_dir + + def verify_audio(is_create_samples=False): + inputPaths = list(self.inputPaths) + iterated_list = self.inputPaths if not is_create_samples else [input_files_listbox_Option.get(i) for i in input_files_listbox_Option.curselection()] + removed_files = [] + export_dir = None + total_audio_count, current_file = len(iterated_list), 0 + if iterated_list: + for i in iterated_list: + current_file += 1 + input_info_text_var.set(f'{SAMPLE_BEGIN if is_create_samples else VERIFY_BEGIN}{current_file}/{total_audio_count}') + if is_create_samples: + export_dir = get_export_dir() + if not export_dir: + input_info_text_var.set(f'No export directory selected.') + return + is_good, error_data = self.verify_audio(i, is_process=False, sample_path=export_dir) + if not is_good: + inputPaths.remove(i) + removed_files.append(error_data)#sample = self.create_sample(i) + + varification_text_var.set(VERIFY_INPUTS_TEXT) + input_files_listbox_Option.configure(state=tk.NORMAL) + + if removed_files: + input_info_text_var.set(f'{len(removed_files)} {BROKEN_OR_INCOM_TEXT}') + error_text = '' + for i in removed_files: + error_text += i + removed_files = list_diff(self.inputPaths, inputPaths) + [input_files_listbox_Option.delete(input_files_listbox_Option.get(0, tk.END).index(i)) for i in removed_files] + self.error_log_var.set(REMOVED_FILES(list_to_string(removed_files), error_text)) + self.inputPaths = tuple(inputPaths) + self.update_inputPaths() + else: + input_info_text_var.set(f'No errors found!') + + audio_input_total() + else: + input_info_text_var.set(f'{NO_FILES_TEXT} {SELECTED_VER if is_create_samples else DETECTED_VER}') + varification_text_var.set(VERIFY_INPUTS_TEXT) + input_files_listbox_Option.configure(state=tk.NORMAL) + return + + audio_input_total() + + def verify_audio_start_thread(is_create_samples=False): + + if not self.thread_check(self.active_processing_thread): + if not self.thread_check(self.verification_thread): + varification_text_var.set('Stop Progress') + input_files_listbox_Option.configure(state=tk.DISABLED) + self.verification_thread = KThread(target=lambda:verify_audio(is_create_samples=is_create_samples)) + self.verification_thread.start() + else: + input_files_listbox_Option.configure(state=tk.NORMAL) + varification_text_var.set(VERIFY_INPUTS_TEXT) + input_info_text_var.set('Process Stopped') + self.verification_thread.kill() + else: + input_info_text_var.set('You cannot verify inputs during an active process.') + + def right_click_menu(event): + right_click_menu = tk.Menu(self, font=(MAIN_FONT_NAME, FONT_SIZE_1), tearoff=0) + right_click_menu.add_command(label='Remove Selected Items Only', command=lambda:selected_files(is_remove=True)) + right_click_menu.add_command(label='Keep Selected Items Only', command=lambda:selected_files(is_remove=False)) + right_click_menu.add_command(label='Clear All Input(s)', command=lambda:input_options(is_select_inputs=False)) + right_click_menu.add_separator() + right_click_menu_sub = tk.Menu(right_click_menu, font=(MAIN_FONT_NAME, FONT_SIZE_1), tearoff=False) + right_click_menu.add_command(label='Verify and Create Samples of Selected Inputs', command=lambda:verify_audio_start_thread(is_create_samples=True)) + right_click_menu.add_cascade(label='Preferred Double Click Action', menu=right_click_menu_sub) + if is_play_file_var.get(): + right_click_menu_sub.add_command(label='Enable: Open Audio File Directory', command=lambda:(input_files_listbox_Option.bind('', lambda e:pop_open_file_path()), is_play_file_var.set(False))) + else: + right_click_menu_sub.add_command(label='Enable: Open Audio File', command=lambda:(input_files_listbox_Option.bind('', lambda e:pop_open_file_path(is_play_file=True)), is_play_file_var.set(True))) + + try: + right_click_menu.tk_popup(event.x_root,event.y_root) + right_click_release_linux(right_click_menu, menu_view_inputs_top) + finally: + right_click_menu.grab_release() + + menu_view_inputs_Frame = self.menu_FRAME_SET(menu_view_inputs_top) + menu_view_inputs_Frame.grid(row=0) + + self.main_window_LABEL_SET(menu_view_inputs_Frame, SELECTED_INPUTS).grid(row=0,column=0,padx=0,pady=MENU_PADDING_1) + tk.Label(menu_view_inputs_Frame, textvariable=input_length_var, font=(MAIN_FONT_NAME, f"{FONT_SIZE_1}"), foreground=FG_COLOR).grid(row=1, column=0, padx=0, pady=MENU_PADDING_1) + if not OPERATING_SYSTEM == "Linux": + ttk.Button(menu_view_inputs_Frame, text=SELECT_INPUTS, command=lambda:input_options()).grid(row=2,column=0,padx=0,pady=MENU_PADDING_2) + input_files_listbox_Option = tk.Listbox(menu_view_inputs_Frame, selectmode=tk.EXTENDED, activestyle='dotbox', font=(MAIN_FONT_NAME, f"{FONT_SIZE_1}"), background='#101414', exportselection=0, width=110, height=17, relief=tk.SOLID, borderwidth=0) + input_files_listbox_vertical_scroll = ttk.Scrollbar(menu_view_inputs_Frame, orient=tk.VERTICAL) + input_files_listbox_Option.config(yscrollcommand=input_files_listbox_vertical_scroll.set) + input_files_listbox_vertical_scroll.configure(command=input_files_listbox_Option.yview) + input_files_listbox_Option.grid(row=4, sticky=tk.W) + input_files_listbox_vertical_scroll.grid(row=4, column=1, sticky=tk.NS) + + tk.Label(menu_view_inputs_Frame, textvariable=input_info_text_var, font=(MAIN_FONT_NAME, f"{FONT_SIZE_1}"), foreground=FG_COLOR).grid(row=5, column=0, padx=0, pady=0) + ttk.Checkbutton(menu_view_inputs_Frame, text=WIDEN_BOX, variable=is_widen_box_var, command=lambda:box_size()).grid(row=6,column=0,padx=0,pady=0) + verify_audio_Button = ttk.Button(menu_view_inputs_Frame, textvariable=varification_text_var, command=lambda:verify_audio_start_thread()) + verify_audio_Button.grid(row=7,column=0,padx=0,pady=MENU_PADDING_1) + ttk.Button(menu_view_inputs_Frame, text=CLOSE_WINDOW, command=lambda:menu_view_inputs_top.destroy()).grid(row=8,column=0,padx=0,pady=MENU_PADDING_1) + + if is_dnd_compatible: + menu_view_inputs_top.drop_target_register(DND_FILES) + menu_view_inputs_top.dnd_bind('<>', lambda e: drag_n_drop(e)) + input_files_listbox_Option.bind(right_click_button, lambda e:right_click_menu(e)) + input_files_listbox_Option.bind('', lambda e:pop_open_file_path()) + input_files_listbox_Option.bind('', lambda e:selected_files(is_remove=True)) + input_files_listbox_Option.bind('', lambda e:selected_files(is_remove=False)) + + reset_list() + + self.menu_placement(menu_view_inputs_top, 'Selected Inputs', pop_up=True) + + def menu_batch_dual(self): + menu_batch_dual_top = tk.Toplevel(root) + + def drag_n_drop(event, accept_mode): + listbox = left_frame if accept_mode == FILE_1_LB else right_frame + paths = drop(event, accept_mode) + for item in paths: + if item not in listbox.path_list: # only add file if it's not already in the list + basename = os.path.basename(item) + listbox.listbox.insert(tk.END, basename) # insert basename to the listbox + listbox.path_list.append(item) # append the file path to the list + listbox.update_displayed_index() + + def move_entry(is_primary=True): + if is_primary: + selected_frame, other_frame = left_frame, right_frame + else: + selected_frame, other_frame = right_frame, left_frame + + selected = selected_frame.listbox.curselection() + + if selected: + basename = selected_frame.listbox.get(selected[0]).split(': ', 1)[1] # remove displayed index + + if basename in other_frame.basename_to_path: + return + + path = selected_frame.basename_to_path[basename] # Get the actual path + + selected_frame.listbox.delete(selected) + other_frame.listbox.insert(tk.END, basename) + + selected_frame.path_list.remove(path) + del selected_frame.basename_to_path[basename] + + other_frame.path_list.append(path) + other_frame.basename_to_path[basename] = path + + selected_frame.update_displayed_index() + other_frame.update_displayed_index() + + def open_selected_path(lb, is_play_file=False): + selected_frame = left_frame if lb == FILE_1_LB else right_frame + selected_path = selected_frame.get_selected_path() + + if selected_path: + if os.path.isfile(selected_path): + OPEN_FILE_func(selected_path if is_play_file else os.path.dirname(selected_path)) + + def clear_all_data(lb): + selected_frame = left_frame if lb == FILE_1_LB else right_frame + selected_frame.listbox.delete(0, "end") + selected_frame.path_list.clear() + selected_frame.basename_to_path.clear() + + def clear_all(event, lb): + selected_frame = left_frame if lb == FILE_1_LB else right_frame + selected = selected_frame.listbox.curselection() + + right_click_menu = tk.Menu(self, font=(MAIN_FONT_NAME, FONT_SIZE_1), tearoff=0) + if selected: + right_click_menu.add_command(label='Open Location', command=lambda:open_selected_path(lb)) + right_click_menu.add_command(label='Open File', command=lambda:open_selected_path(lb, is_play_file=True)) + right_click_menu.add_command(label='Clear All', command=lambda:clear_all_data(lb)) + + try: + right_click_menu.tk_popup(event.x_root,event.y_root) + right_click_release_linux(right_click_menu, menu_batch_dual_top) + finally: + right_click_menu.grab_release() + + def gather_input_list(): + left_paths = list(left_frame.basename_to_path.values()) + right_paths = list(right_frame.basename_to_path.values()) + + clear_all_data(FILE_1_LB) + clear_all_data(FILE_2_LB) + + if left_paths and right_paths: + left_frame.select_input(left_paths) + right_frame.select_input(right_paths) + + self.DualBatch_inputPaths = list(zip(left_paths, right_paths)) + self.check_dual_paths() + menu_batch_dual_top.destroy() + + menu_view_inputs_Frame = self.menu_FRAME_SET(menu_batch_dual_top) + menu_view_inputs_Frame.grid(row=0) + + left_frame = ListboxBatchFrame(menu_view_inputs_Frame, self.file_one_sub_var.get().title(), move_entry, self.right_img, self.img_mapper) + left_frame.grid(row=0, column=0, sticky="nsew", padx=(0, 5)) + + right_frame = ListboxBatchFrame(menu_view_inputs_Frame, self.file_two_sub_var.get().title(), lambda:move_entry(False), self.left_img, self.img_mapper) + right_frame.grid(row=0, column=1, sticky="nsew", padx=(5, 0)) + + left_frame.listbox.drop_target_register(DND_FILES) + right_frame.listbox.drop_target_register(DND_FILES) + left_frame.listbox.dnd_bind('<>', lambda e: drag_n_drop(e, FILE_1_LB)) + right_frame.listbox.dnd_bind('<>', lambda e: drag_n_drop(e, FILE_2_LB)) + left_frame.listbox.dnd_bind(right_click_button, lambda e: clear_all(e, FILE_1_LB)) + right_frame.listbox.dnd_bind(right_click_button, lambda e: clear_all(e, FILE_2_LB)) + + menu_view_inputs_bottom_Frame = self.menu_FRAME_SET(menu_batch_dual_top) + menu_view_inputs_bottom_Frame.grid(row=1) + + confirm_btn = ttk.Button(menu_view_inputs_bottom_Frame, text=CONFIRM_ENTRIES, command=gather_input_list) + confirm_btn.grid(pady=MENU_PADDING_1) + + close_btn = ttk.Button(menu_view_inputs_bottom_Frame, text=CLOSE_WINDOW, command=lambda:menu_batch_dual_top.destroy()) + close_btn.grid(pady=MENU_PADDING_1) + + if self.check_dual_paths(): + left_frame_pane = [i[0] for i in self.DualBatch_inputPaths] + right_frame_pane = [i[1] for i in self.DualBatch_inputPaths] + left_frame.update_displayed_index(left_frame_pane) + right_frame.update_displayed_index(right_frame_pane) + self.check_dual_paths() + + self.menu_placement(menu_batch_dual_top, DUAL_AUDIO_PROCESSING, pop_up=True) + + def check_dual_paths(self, is_fill_menu=False): + + if self.DualBatch_inputPaths: + first_paths = tuple(self.DualBatch_inputPaths) + first_paths_len = len(first_paths) + first_paths = first_paths[0] + + if first_paths_len == 1: + file1_base_text = os.path.basename(first_paths[0]) + file2_base_text = os.path.basename(first_paths[1]) + else: + first_paths_len = first_paths_len - 1 + file1_base_text = f"{os.path.basename(first_paths[0])}, +{first_paths_len} file(s){BATCH_MODE_DUAL}" + file2_base_text = f"{os.path.basename(first_paths[1])}, +{first_paths_len} file(s){BATCH_MODE_DUAL}" + + self.fileOneEntry_var.set(file1_base_text) + self.fileOneEntry_Full_var.set(f"{first_paths[0]}") + self.fileTwoEntry_var.set(file2_base_text) + self.fileTwoEntry_Full_var.set(f"{first_paths[1]}") + else: + if is_fill_menu: + file_one = self.fileOneEntry_Full_var.get() + file_two = self.fileTwoEntry_Full_var.get() + + if file_one and file_two and BATCH_MODE_DUAL not in file_one and BATCH_MODE_DUAL not in file_two: + self.DualBatch_inputPaths = [(file_one, file_two)] + else: + if BATCH_MODE_DUAL in self.fileOneEntry_var.get(): + self.fileOneEntry_var.set("") + self.fileOneEntry_Full_var.set("") + if BATCH_MODE_DUAL in self.fileTwoEntry_var.get(): + self.fileTwoEntry_var.set("") + self.fileTwoEntry_Full_var.set("") + + return self.DualBatch_inputPaths + + def fill_gpu_list(self): + try: + if cuda_available: + self.cuda_device_list = [f"{torch.cuda.get_device_properties(i).name}:{i}" for i in range(torch.cuda.device_count())] + self.cuda_device_list.insert(0, DEFAULT) + #print(self.cuda_device_list) + + # if directml_available: + # self.opencl_list = [f"{torch_directml.device_name(i)}:{i}" for i in range(torch_directml.device_count())] + # self.opencl_list.insert(0, DEFAULT) + except Exception as e: + print(e) + + # if is_cuda_only: + # self.is_use_opencl_var.set(False) + + check_gpu_list = self.cuda_device_list#self.opencl_list if is_opencl_only or self.is_use_opencl_var.get() else self.cuda_device_list + if not self.device_set_var.get() in check_gpu_list: + self.device_set_var.set(DEFAULT) + + def loop_gpu_list(self, option_menu:ComboBoxMenu, menu_name, option_list): + option_menu['values'] = option_list + option_menu.update_dropdown_size(option_list, menu_name) + + def menu_settings(self, select_tab_2=False, select_tab_3=False):#** + """Open Settings and Download Center""" + + settings_menu = tk.Toplevel() + + option_var = tk.StringVar(value=SELECT_SAVED_SETTING) + self.is_menu_settings_open = True + + tabControl = ttk.Notebook(settings_menu) + + tab1 = ttk.Frame(tabControl) + tab2 = ttk.Frame(tabControl) + tab3 = ttk.Frame(tabControl) + + tabControl.add(tab1, text = SETTINGS_GUIDE_TEXT) + tabControl.add(tab2, text = ADDITIONAL_SETTINGS_TEXT) + tabControl.add(tab3, text = DOWNLOAD_CENTER_TEXT) + + tabControl.pack(expand = 1, fill ="both") + + tab1.grid_rowconfigure(0, weight=1) + tab1.grid_columnconfigure(0, weight=1) + + tab2.grid_rowconfigure(0, weight=1) + tab2.grid_columnconfigure(0, weight=1) + + tab3.grid_rowconfigure(0, weight=1) + tab3.grid_columnconfigure(0, weight=1) + + self.disable_tabs = lambda:(tabControl.tab(0, state="disabled"), tabControl.tab(1, state="disabled")) + self.enable_tabs = lambda:(tabControl.tab(0, state="normal"), tabControl.tab(1, state="normal")) + self.main_menu_var = tk.StringVar(value=CHOOSE_ADVANCED_MENU_TEXT) + + self.download_progress_bar_var.set(0) + self.download_progress_info_var.set('') + self.download_progress_percent_var.set('') + + def set_vars_for_sample_mode(event): + value = int(float(event)) + value = round(value / 5) * 5 + self.model_sample_mode_duration_var.set(value) + self.model_sample_mode_duration_checkbox_var.set(SAMPLE_MODE_CHECKBOX(value)) + self.model_sample_mode_duration_label_var.set(f'{value} {SECONDS_TEXT}') + + #Settings Tab 1 + settings_menu_main_Frame = self.menu_FRAME_SET(tab1) + settings_menu_main_Frame.grid(row=0) + settings_title_Label = self.menu_title_LABEL_SET(settings_menu_main_Frame, GENERAL_MENU_TEXT) + settings_title_Label.grid(pady=MENU_PADDING_2) + + select_Label = self.menu_sub_LABEL_SET(settings_menu_main_Frame, ADDITIONAL_MENUS_INFORMATION_TEXT) + select_Label.grid(pady=MENU_PADDING_1) + + select_Option = ComboBoxMenu(settings_menu_main_Frame, textvariable=self.main_menu_var, values=OPTION_LIST, width=GEN_SETTINGS_WIDTH+3) + select_Option.update_dropdown_size(OPTION_LIST, 'menuchoose', command=lambda e:(self.check_is_menu_open(self.main_menu_var.get()), close_window())) + select_Option.grid(pady=MENU_PADDING_1) + + help_hints_Option = ttk.Checkbutton(settings_menu_main_Frame, text=ENABLE_HELP_HINTS_TEXT, variable=self.help_hints_var, width=HELP_HINT_CHECKBOX_WIDTH) + help_hints_Option.grid(pady=MENU_PADDING_1) + + open_app_dir_Button = ttk.Button(settings_menu_main_Frame, text=OPEN_APPLICATION_DIRECTORY_TEXT, command=lambda:OPEN_FILE_func(BASE_PATH), width=SETTINGS_BUT_WIDTH) + open_app_dir_Button.grid(pady=MENU_PADDING_1) + + reset_all_app_settings_Button = ttk.Button(settings_menu_main_Frame, text=RESET_ALL_SETTINGS_TO_DEFAULT_TEXT, command=lambda:self.load_to_default_confirm(), width=SETTINGS_BUT_WIDTH)#pop_up_change_model_defaults + reset_all_app_settings_Button.grid(pady=MENU_PADDING_1) + + if is_windows: + restart_app_Button = ttk.Button(settings_menu_main_Frame, text=RESTART_APPLICATION_TEXT, command=lambda:self.restart()) + restart_app_Button.grid(pady=MENU_PADDING_1) + + delete_your_settings_Label = self.menu_title_LABEL_SET(settings_menu_main_Frame, DELETE_USER_SAVED_SETTING_TEXT) + delete_your_settings_Label.grid(pady=MENU_PADDING_2) + self.help_hints(delete_your_settings_Label, text=DELETE_YOUR_SETTINGS_HELP) + + delete_your_settings_Option = ComboBoxMenu(settings_menu_main_Frame, textvariable=option_var, width=GEN_SETTINGS_WIDTH+3) + delete_your_settings_Option.grid(padx=20,pady=MENU_PADDING_1) + self.deletion_list_fill(delete_your_settings_Option, option_var, SETTINGS_CACHE_DIR, SELECT_SAVED_SETTING, menu_name='deletesetting') + + app_update_Label = self.menu_title_LABEL_SET(settings_menu_main_Frame, APPLICATION_UPDATES_TEXT) + app_update_Label.grid(pady=MENU_PADDING_2) + + self.app_update_button = ttk.Button(settings_menu_main_Frame, textvariable=self.app_update_button_Text_var, width=SETTINGS_BUT_WIDTH-2, command=lambda:self.pop_up_update_confirmation()) + self.app_update_button.grid(pady=MENU_PADDING_1) + + self.app_update_status_Label = tk.Label(settings_menu_main_Frame, textvariable=self.app_update_status_Text_var, padx=3, pady=3, font=(MAIN_FONT_NAME, f"{FONT_SIZE_4}"), width=UPDATE_LABEL_WIDTH, justify="center", relief="ridge", fg="#13849f") + self.app_update_status_Label.grid(pady=20) + + donate_Button = ttk.Button(settings_menu_main_Frame, image=self.donate_img, command=lambda:webbrowser.open_new_tab(DONATE_LINK_BMAC)) + donate_Button.grid(pady=MENU_PADDING_2) + self.help_hints(donate_Button, text=DONATE_HELP) + + close_settings_win_Button = ttk.Button(settings_menu_main_Frame, text=CLOSE_WINDOW, command=lambda:close_window()) + close_settings_win_Button.grid(pady=MENU_PADDING_1) + + #Settings Tab 2 + settings_menu_format_Frame = self.menu_FRAME_SET(tab2) + settings_menu_format_Frame.grid(row=0) + + audio_format_title_Label = self.menu_title_LABEL_SET(settings_menu_format_Frame, AUDIO_FORMAT_SETTINGS_TEXT, width=20) + audio_format_title_Label.grid(pady=MENU_PADDING_2) + + wav_type_set_Label = self.menu_sub_LABEL_SET(settings_menu_format_Frame, WAV_TYPE_TEXT) + wav_type_set_Label.grid(pady=MENU_PADDING_1) + + wav_type_set_Option = ComboBoxMenu(settings_menu_format_Frame, textvariable=self.wav_type_set_var, values=WAV_TYPE, width=HELP_HINT_CHECKBOX_WIDTH) + wav_type_set_Option.grid(padx=20,pady=MENU_PADDING_1) + + mp3_bit_set_Label = self.menu_sub_LABEL_SET(settings_menu_format_Frame, MP3_BITRATE_TEXT) + mp3_bit_set_Label.grid(pady=MENU_PADDING_1) + + mp3_bit_set_Option = ComboBoxMenu(settings_menu_format_Frame, textvariable=self.mp3_bit_set_var, values=MP3_BIT_RATES, width=HELP_HINT_CHECKBOX_WIDTH) + mp3_bit_set_Option.grid(padx=20,pady=MENU_PADDING_1) + + audio_format_title_Label = self.menu_title_LABEL_SET(settings_menu_format_Frame, GENERAL_PROCESS_SETTINGS_TEXT) + audio_format_title_Label.grid(pady=MENU_PADDING_2) + + is_testing_audio_Option = ttk.Checkbutton(settings_menu_format_Frame, text=SETTINGS_TEST_MODE_TEXT, width=GEN_SETTINGS_WIDTH, variable=self.is_testing_audio_var) + is_testing_audio_Option.grid() + self.help_hints(is_testing_audio_Option, text=IS_TESTING_AUDIO_HELP) + + is_add_model_name_Option = ttk.Checkbutton(settings_menu_format_Frame, text=MODEL_TEST_MODE_TEXT, width=GEN_SETTINGS_WIDTH, variable=self.is_add_model_name_var) + is_add_model_name_Option.grid() + self.help_hints(is_add_model_name_Option, text=IS_MODEL_TESTING_AUDIO_HELP) + + is_create_model_folder_Option = ttk.Checkbutton(settings_menu_format_Frame, text=GENERATE_MODEL_FOLDER_TEXT, width=GEN_SETTINGS_WIDTH, variable=self.is_create_model_folder_var) + is_create_model_folder_Option.grid() + self.help_hints(is_create_model_folder_Option, text=IS_CREATE_MODEL_FOLDER_HELP) + + is_accept_any_input_Option = ttk.Checkbutton(settings_menu_format_Frame, text=ACCEPT_ANY_INPUT_TEXT, width=GEN_SETTINGS_WIDTH, variable=self.is_accept_any_input_var) + is_accept_any_input_Option.grid() + self.help_hints(is_accept_any_input_Option, text=IS_ACCEPT_ANY_INPUT_HELP) + + is_task_complete_Option = ttk.Checkbutton(settings_menu_format_Frame, text=NOTIFICATION_CHIMES_TEXT, width=GEN_SETTINGS_WIDTH, variable=self.is_task_complete_var) + is_task_complete_Option.grid() + self.help_hints(is_task_complete_Option, text=IS_TASK_COMPLETE_HELP) + + is_normalization_Option = ttk.Checkbutton(settings_menu_format_Frame, text=NORMALIZE_OUTPUT_TEXT, width=GEN_SETTINGS_WIDTH, variable=self.is_normalization_var) + is_normalization_Option.grid() + self.help_hints(is_normalization_Option, text=IS_NORMALIZATION_HELP) + + change_model_default_Button = ttk.Button(settings_menu_format_Frame, text=CHANGE_MODEL_DEFAULTS_TEXT, command=lambda:self.pop_up_change_model_defaults(settings_menu), width=SETTINGS_BUT_WIDTH-2)# + change_model_default_Button.grid(pady=MENU_PADDING_4) + + #if not is_choose_arch: + self.vocal_splitter_Button_opt(settings_menu, settings_menu_format_Frame, width=SETTINGS_BUT_WIDTH-2, pady=MENU_PADDING_4) + + if not is_macos and self.is_gpu_available: + gpu_list_options = lambda:self.loop_gpu_list(device_set_Option, 'gpudevice', self.cuda_device_list)#self.opencl_list if is_opencl_only or self.is_use_opencl_var.get() else self.cuda_device_list) + device_set_Label = self.menu_title_LABEL_SET(settings_menu_format_Frame, CUDA_NUM_TEXT) + device_set_Label.grid(pady=MENU_PADDING_2) + + device_set_Option = ComboBoxMenu(settings_menu_format_Frame, textvariable=self.device_set_var, values=GPU_DEVICE_NUM_OPTS, width=GEN_SETTINGS_WIDTH+1) + device_set_Option.grid(padx=20,pady=MENU_PADDING_1) + gpu_list_options() + self.help_hints(device_set_Label, text=IS_CUDA_SELECT_HELP) + + # if is_choose_arch: + # is_use_opencl_Option = ttk.Checkbutton(settings_menu_format_Frame, + # text=USE_OPENCL_TEXT, + # width=9, + # variable=self.is_use_opencl_var, + # command=lambda:(gpu_list_options(), self.device_set_var.set(DEFAULT))) + # is_use_opencl_Option.grid() + # self.help_hints(is_use_opencl_Option, text=IS_NORMALIZATION_HELP) + + model_sample_mode_Label = self.menu_title_LABEL_SET(settings_menu_format_Frame, MODEL_SAMPLE_MODE_SETTINGS_TEXT) + model_sample_mode_Label.grid(pady=MENU_PADDING_2) + + model_sample_mode_duration_Label = self.menu_sub_LABEL_SET(settings_menu_format_Frame, SAMPLE_CLIP_DURATION_TEXT) + model_sample_mode_duration_Label.grid(pady=MENU_PADDING_1) + + tk.Label(settings_menu_format_Frame, textvariable=self.model_sample_mode_duration_label_var, font=(MAIN_FONT_NAME, f"{FONT_SIZE_1}"), foreground=FG_COLOR).grid(pady=2) + model_sample_mode_duration_Option = ttk.Scale(settings_menu_format_Frame, variable=self.model_sample_mode_duration_var, from_=5, to=120, command=set_vars_for_sample_mode, orient='horizontal') + model_sample_mode_duration_Option.grid(pady=2) + + #Settings Tab 3 + settings_menu_download_center_Frame = self.menu_FRAME_SET(tab3) + settings_menu_download_center_Frame.grid(row=0) + + download_center_title_Label = self.menu_title_LABEL_SET(settings_menu_download_center_Frame, APPLICATION_DOWNLOAD_CENTER_TEXT) + download_center_title_Label.grid(padx=20,pady=MENU_PADDING_2) + + select_download_Label = self.menu_sub_LABEL_SET(settings_menu_download_center_Frame, SELECT_DOWNLOAD_TEXT) + select_download_Label.grid(pady=MENU_PADDING_2) + + self.model_download_vr_Button = ttk.Radiobutton(settings_menu_download_center_Frame, text='VR Arch', width=8, variable=self.select_download_var, value='VR Arc', command=lambda:self.download_list_state()) + self.model_download_vr_Button.grid(pady=MENU_PADDING_1) + self.model_download_vr_Option = ComboBoxMenu(settings_menu_download_center_Frame, textvariable=self.model_download_vr_var, width=READ_ONLY_COMBO_WIDTH) + self.model_download_vr_Option.grid(pady=MENU_PADDING_1) + + self.model_download_mdx_Button = ttk.Radiobutton(settings_menu_download_center_Frame, text='MDX-Net', width=8, variable=self.select_download_var, value='MDX-Net', command=lambda:self.download_list_state()) + self.model_download_mdx_Button.grid(pady=MENU_PADDING_1) + self.model_download_mdx_Option = ComboBoxMenu(settings_menu_download_center_Frame, textvariable=self.model_download_mdx_var, width=READ_ONLY_COMBO_WIDTH) + self.model_download_mdx_Option.grid(pady=MENU_PADDING_1) + + self.model_download_demucs_Button = ttk.Radiobutton(settings_menu_download_center_Frame, text='Demucs', width=8, variable=self.select_download_var, value='Demucs', command=lambda:self.download_list_state()) + self.model_download_demucs_Button.grid(pady=MENU_PADDING_1) + self.model_download_demucs_Option = ComboBoxMenu(settings_menu_download_center_Frame, textvariable=self.model_download_demucs_var, width=READ_ONLY_COMBO_WIDTH) + self.model_download_demucs_Option.grid(pady=MENU_PADDING_1) + + self.download_Button = ttk.Button(settings_menu_download_center_Frame, image=self.download_img, command=lambda:self.download_item())#, command=download_model) + self.download_Button.grid(pady=MENU_PADDING_1) + + self.download_progress_info_Label = tk.Label(settings_menu_download_center_Frame, textvariable=self.download_progress_info_var, font=(MAIN_FONT_NAME, f"{FONT_SIZE_2}"), foreground=FG_COLOR, borderwidth=0) + self.download_progress_info_Label.grid(pady=MENU_PADDING_1) + + self.download_progress_percent_Label = tk.Label(settings_menu_download_center_Frame, textvariable=self.download_progress_percent_var, font=(MAIN_FONT_NAME, f"{FONT_SIZE_2}"), wraplength=350, foreground=FG_COLOR) + self.download_progress_percent_Label.grid(pady=MENU_PADDING_1) + + self.download_progress_bar_Progressbar = ttk.Progressbar(settings_menu_download_center_Frame, variable=self.download_progress_bar_var) + self.download_progress_bar_Progressbar.grid(pady=MENU_PADDING_1) + + self.stop_download_Button = ttk.Button(settings_menu_download_center_Frame, textvariable=self.download_stop_var, width=15, command=lambda:self.download_post_action(DOWNLOAD_STOPPED)) + self.stop_download_Button.grid(pady=MENU_PADDING_1) + self.stop_download_Button_DISABLE = lambda:(self.download_stop_var.set(""), self.stop_download_Button.configure(state=tk.DISABLED)) + self.stop_download_Button_ENABLE = lambda:(self.download_stop_var.set(STOP_DOWNLOAD_TEXT), self.stop_download_Button.configure(state=tk.NORMAL)) + + self.refresh_list_Button = ttk.Button(settings_menu_download_center_Frame, text=REFRESH_LIST_TEXT, command=lambda:self.online_data_refresh(refresh_list_Button=True))#, command=refresh_list) + self.refresh_list_Button.grid(pady=MENU_PADDING_1) + + self.download_key_Button = ttk.Button(settings_menu_download_center_Frame, image=self.key_img, command=lambda:self.pop_up_user_code_input()) + self.download_key_Button.grid(pady=MENU_PADDING_1) + + self.manual_download_Button = ttk.Button(settings_menu_download_center_Frame, text=TRY_MANUAL_DOWNLOAD_TEXT, command=self.menu_manual_downloads) + self.manual_download_Button.grid(pady=MENU_PADDING_1) + + self.download_center_Buttons = (self.model_download_vr_Button, + self.model_download_mdx_Button, + self.model_download_demucs_Button, + self.download_Button, + self.download_key_Button) + + self.download_lists = (self.model_download_vr_Option, + self.model_download_mdx_Option, + self.model_download_demucs_Option) + + self.download_list_vars = (self.model_download_vr_var, + self.model_download_mdx_var, + self.model_download_demucs_var) + + self.online_data_refresh() + + self.menu_placement(settings_menu, SETTINGS_GUIDE_TEXT, is_help_hints=True, close_function=lambda:close_window()) + + if select_tab_2: + tabControl.select(tab2) + settings_menu.update_idletasks() + + if select_tab_3: + tabControl.select(tab3) + settings_menu.update_idletasks() + + def close_window(): + self.active_download_thread.terminate() if self.thread_check(self.active_download_thread) else None + self.is_menu_settings_open = False + self.select_download_var.set('') + settings_menu.destroy() + + #self.update_checkbox_text() + settings_menu.protocol("WM_DELETE_WINDOW", close_window) + + def menu_advanced_vr_options(self):#** + """Open Advanced VR Options""" + + vr_opt = tk.Toplevel() + + tab1 = self.menu_tab_control(vr_opt, self.vr_secondary_model_vars) + + self.is_open_menu_advanced_vr_options.set(True) + self.menu_advanced_vr_options_close_window = lambda:(self.is_open_menu_advanced_vr_options.set(False), vr_opt.destroy()) + vr_opt.protocol("WM_DELETE_WINDOW", self.menu_advanced_vr_options_close_window) + + toggle_post_process = lambda:self.post_process_threshold_Option.configure(state=READ_ONLY) if self.is_post_process_var.get() else self.post_process_threshold_Option.configure(state=tk.DISABLED) + + vr_opt_frame = self.menu_FRAME_SET(tab1) + vr_opt_frame.grid(pady=0 if not self.chosen_process_method_var.get() == VR_ARCH_PM else 70) + + vr_title = self.menu_title_LABEL_SET(vr_opt_frame, ADVANCED_VR_OPTIONS_TEXT) + vr_title.grid(padx=25, pady=MENU_PADDING_2) + + if not self.chosen_process_method_var.get() == VR_ARCH_PM: + window_size_Label = self.menu_sub_LABEL_SET(vr_opt_frame, WINDOW_SIZE_TEXT) + window_size_Label.grid(pady=MENU_PADDING_1) + window_size_Option = ComboBoxEditableMenu(vr_opt_frame, values=VR_WINDOW, width=MENU_COMBOBOX_WIDTH, textvariable=self.window_size_var, pattern=REG_WINDOW, default=VR_WINDOW[1])# + window_size_Option.grid(pady=MENU_PADDING_1) + self.help_hints(window_size_Label, text=WINDOW_SIZE_HELP) + + aggression_setting_Label = self.menu_sub_LABEL_SET(vr_opt_frame, AGGRESSION_SETTING_TEXT) + aggression_setting_Label.grid(pady=MENU_PADDING_1) + aggression_setting_Option = ComboBoxEditableMenu(vr_opt_frame, values=VR_AGGRESSION, width=MENU_COMBOBOX_WIDTH, textvariable=self.aggression_setting_var, pattern=REG_AGGRESSION, default=VR_AGGRESSION[5])# + aggression_setting_Option.grid(pady=MENU_PADDING_1) + self.help_hints(aggression_setting_Label, text=AGGRESSION_SETTING_HELP) + + self.batch_size_Label = self.menu_sub_LABEL_SET(vr_opt_frame, BATCH_SIZE_TEXT) + self.batch_size_Label.grid(pady=MENU_PADDING_1) + self.batch_size_Option = ComboBoxEditableMenu(vr_opt_frame, values=BATCH_SIZE, width=MENU_COMBOBOX_WIDTH, textvariable=self.batch_size_var, pattern=REG_BATCHES, default=BATCH_SIZE)# + self.batch_size_Option.grid(pady=MENU_PADDING_1) + self.help_hints(self.batch_size_Label, text=BATCH_SIZE_HELP) + + self.post_process_threshold_Label = self.menu_sub_LABEL_SET(vr_opt_frame, POST_PROCESS_THRESHOLD_TEXT) + self.post_process_threshold_Label.grid(pady=MENU_PADDING_1) + self.post_process_threshold_Option = ComboBoxEditableMenu(vr_opt_frame, values=POST_PROCESSES_THREASHOLD_VALUES, width=MENU_COMBOBOX_WIDTH, textvariable=self.post_process_threshold_var, pattern=REG_THES_POSTPORCESS, default=POST_PROCESSES_THREASHOLD_VALUES[1])# + self.post_process_threshold_Option.grid(pady=MENU_PADDING_1) + self.help_hints(self.post_process_threshold_Label, text=POST_PROCESS_THREASHOLD_HELP) + + self.is_tta_Option = ttk.Checkbutton(vr_opt_frame, text=ENABLE_TTA_TEXT, width=VR_CHECKBOXS_WIDTH, variable=self.is_tta_var) + self.is_tta_Option.grid(pady=0) + self.help_hints(self.is_tta_Option, text=IS_TTA_HELP) + + self.is_post_process_Option = ttk.Checkbutton(vr_opt_frame, text=POST_PROCESS_TEXT, width=VR_CHECKBOXS_WIDTH, variable=self.is_post_process_var, command=toggle_post_process) + self.is_post_process_Option.grid(pady=0) + self.help_hints(self.is_post_process_Option, text=IS_POST_PROCESS_HELP) + + self.is_high_end_process_Option = ttk.Checkbutton(vr_opt_frame, text=HIGHEND_PROCESS_TEXT, width=VR_CHECKBOXS_WIDTH, variable=self.is_high_end_process_var) + self.is_high_end_process_Option.grid(pady=0) + self.help_hints(self.is_high_end_process_Option, text=IS_HIGH_END_PROCESS_HELP) + + self.vocal_splitter_Button_opt(vr_opt, vr_opt_frame, pady=MENU_PADDING_1, width=VR_BUT_WIDTH) + + self.vr_clear_cache_Button = ttk.Button(vr_opt_frame, text=CLEAR_AUTOSET_CACHE_TEXT, command=lambda:self.clear_cache(VR_ARCH_TYPE), width=VR_BUT_WIDTH) + self.vr_clear_cache_Button.grid(pady=MENU_PADDING_1) + self.help_hints(self.vr_clear_cache_Button, text=CLEAR_CACHE_HELP) + + self.open_vr_model_dir_Button = ttk.Button(vr_opt_frame, text=OPEN_MODELS_FOLDER_TEXT, command=lambda:OPEN_FILE_func(VR_MODELS_DIR), width=VR_BUT_WIDTH) + self.open_vr_model_dir_Button.grid(pady=MENU_PADDING_1) + + self.vr_return_Button=ttk.Button(vr_opt_frame, text=BACK_TO_MAIN_MENU, command=lambda:(self.menu_advanced_vr_options_close_window(), self.check_is_menu_settings_open())) + self.vr_return_Button.grid(pady=MENU_PADDING_1) + + self.vr_close_Button = ttk.Button(vr_opt_frame, text=CLOSE_WINDOW, command=lambda:self.menu_advanced_vr_options_close_window()) + self.vr_close_Button.grid(pady=MENU_PADDING_1) + + toggle_post_process() + + frame_list = [vr_opt_frame] + self.menu_placement(vr_opt, ADVANCED_VR_OPTIONS_TEXT, is_help_hints=True, close_function=self.menu_advanced_vr_options_close_window, frame_list=frame_list) + + def menu_advanced_demucs_options(self):#** + """Open Advanced Demucs Options""" + + demuc_opt = tk.Toplevel() + + self.is_open_menu_advanced_demucs_options.set(True) + self.menu_advanced_demucs_options_close_window = lambda:(self.is_open_menu_advanced_demucs_options.set(False), demuc_opt.destroy()) + demuc_opt.protocol("WM_DELETE_WINDOW", self.menu_advanced_demucs_options_close_window) + + tab1, tab3 = self.menu_tab_control(demuc_opt, self.demucs_secondary_model_vars, is_demucs=True) + + demucs_frame = self.menu_FRAME_SET(tab1) + demucs_frame.grid(pady=0 if not self.chosen_process_method_var.get() == DEMUCS_ARCH_TYPE else 55) + + demucs_pre_model_frame = self.menu_FRAME_SET(tab3) + demucs_pre_model_frame.grid(row=0) + + demucs_title_Label = self.menu_title_LABEL_SET(demucs_frame, ADVANCED_DEMUCS_OPTIONS_TEXT) + demucs_title_Label.grid(pady=MENU_PADDING_2) + + if not self.chosen_process_method_var.get() == DEMUCS_ARCH_TYPE: + segment_Label = self.menu_sub_LABEL_SET(demucs_frame, SEGMENTS_TEXT) + segment_Label.grid(pady=MENU_PADDING_2) + segment_Option = ComboBoxEditableMenu(demucs_frame, values=DEMUCS_SEGMENTS, width=MENU_COMBOBOX_WIDTH, textvariable=self.segment_var, pattern=REG_SEGMENTS, default=DEMUCS_SEGMENTS)# + segment_Option.grid() + self.help_hints(segment_Label, text=SEGMENT_HELP) + + self.shifts_Label = self.menu_sub_LABEL_SET(demucs_frame, SHIFTS_TEXT) + self.shifts_Label.grid(pady=MENU_PADDING_1) + self.shifts_Option = ComboBoxEditableMenu(demucs_frame, values=DEMUCS_SHIFTS, width=MENU_COMBOBOX_WIDTH, textvariable=self.shifts_var, pattern=REG_SHIFTS, default=DEMUCS_SHIFTS[2])# + self.shifts_Option.grid(pady=MENU_PADDING_1) + self.help_hints(self.shifts_Label, text=SHIFTS_HELP) + + self.overlap_Label = self.menu_sub_LABEL_SET(demucs_frame, OVERLAP_TEXT) + self.overlap_Label.grid(pady=MENU_PADDING_1) + self.overlap_Option = ComboBoxEditableMenu(demucs_frame, values=DEMUCS_OVERLAP, width=MENU_COMBOBOX_WIDTH, textvariable=self.overlap_var, pattern=REG_OVERLAP, default=DEMUCS_OVERLAP)# + self.overlap_Option.grid(pady=MENU_PADDING_1) + self.help_hints(self.overlap_Label, text=OVERLAP_HELP) + + pitch_shift_Label = self.menu_sub_LABEL_SET(demucs_frame, SHIFT_CONVERSION_PITCH_TEXT) + pitch_shift_Label.grid(pady=MENU_PADDING_1) + pitch_shift_Option = ComboBoxEditableMenu(demucs_frame, values=SEMITONE_SEL, width=MENU_COMBOBOX_WIDTH, textvariable=self.semitone_shift_var, pattern=REG_SEMITONES, default=SEMI_DEF)# + pitch_shift_Option.grid(pady=MENU_PADDING_1) + self.help_hints(pitch_shift_Label, text=PITCH_SHIFT_HELP) + + self.is_split_mode_Option = ttk.Checkbutton(demucs_frame, text=SPLIT_MODE_TEXT, width=DEMUCS_CHECKBOXS_WIDTH, variable=self.is_split_mode_var) + self.is_split_mode_Option.grid() + self.help_hints(self.is_split_mode_Option, text=IS_SPLIT_MODE_HELP) + + self.is_demucs_combine_stems_Option = ttk.Checkbutton(demucs_frame, text=COMBINE_STEMS_TEXT, width=DEMUCS_CHECKBOXS_WIDTH, variable=self.is_demucs_combine_stems_var) + self.is_demucs_combine_stems_Option.grid() + self.help_hints(self.is_demucs_combine_stems_Option, text=IS_DEMUCS_COMBINE_STEMS_HELP) + + is_invert_spec_Option = ttk.Checkbutton(demucs_frame, text=SPECTRAL_INVERSION_TEXT, width=DEMUCS_CHECKBOXS_WIDTH, variable=self.is_invert_spec_var) + is_invert_spec_Option.grid() + self.help_hints(is_invert_spec_Option, text=IS_INVERT_SPEC_HELP) + + self.vocal_splitter_Button_opt(demuc_opt, demucs_frame, width=VR_BUT_WIDTH, pady=MENU_PADDING_1) + + self.open_demucs_model_dir_Button = ttk.Button(demucs_frame, text=OPEN_MODELS_FOLDER_TEXT, command=lambda:OPEN_FILE_func(DEMUCS_MODELS_DIR), width=VR_BUT_WIDTH) + self.open_demucs_model_dir_Button.grid(pady=MENU_PADDING_1) + + self.demucs_return_Button = ttk.Button(demucs_frame, text=BACK_TO_MAIN_MENU, command=lambda:(self.menu_advanced_demucs_options_close_window(), self.check_is_menu_settings_open())) + self.demucs_return_Button.grid(pady=MENU_PADDING_1) + + self.demucs_close_Button = ttk.Button(demucs_frame, text=CLOSE_WINDOW, command=lambda:self.menu_advanced_demucs_options_close_window()) + self.demucs_close_Button.grid(pady=MENU_PADDING_1) + + frame_list = [demucs_pre_model_frame, demucs_frame] + self.menu_placement(demuc_opt, ADVANCED_DEMUCS_OPTIONS_TEXT, is_help_hints=True, close_function=self.menu_advanced_demucs_options_close_window, frame_list=frame_list) + + def menu_advanced_mdx_options(self):#** + """Open Advanced MDX Options""" + + mdx_net_opt = tk.Toplevel() + + self.is_open_menu_advanced_mdx_options.set(True) + self.menu_advanced_mdx_options_close_window = lambda:(self.is_open_menu_advanced_mdx_options.set(False), mdx_net_opt.destroy()) + mdx_net_opt.protocol("WM_DELETE_WINDOW", self.menu_advanced_mdx_options_close_window) + + tab1, tab3 = self.menu_tab_control(mdx_net_opt, self.mdx_secondary_model_vars, is_mdxnet=True) + + mdx_net_frame = self.menu_FRAME_SET(tab1) + mdx_net_frame.grid(pady=0) + + mdx_net23_frame = self.menu_FRAME_SET(tab3) + mdx_net23_frame.grid(pady=0) + + mdx_opt_title = self.menu_title_LABEL_SET(mdx_net_frame, ADVANCED_MDXNET_OPTIONS_TEXT) + mdx_opt_title.grid(pady=MENU_PADDING_1) + + compensate_Label = self.menu_sub_LABEL_SET(mdx_net_frame, VOLUME_COMPENSATION_TEXT) + compensate_Label.grid(pady=MENU_PADDING_4) + compensate_Option = ComboBoxEditableMenu(mdx_net_frame, values=VOL_COMPENSATION, width=MENU_COMBOBOX_WIDTH, textvariable=self.compensate_var, pattern=REG_COMPENSATION, default=VOL_COMPENSATION)# + compensate_Option.grid(pady=MENU_PADDING_4) + self.help_hints(compensate_Label, text=COMPENSATE_HELP) + + mdx_segment_size_Label = self.menu_sub_LABEL_SET(mdx_net_frame, SEGMENT_SIZE_TEXT) + mdx_segment_size_Label.grid(pady=MENU_PADDING_4) + mdx_segment_size_Option = ComboBoxEditableMenu(mdx_net_frame, values=MDX_SEGMENTS, width=MENU_COMBOBOX_WIDTH, textvariable=self.mdx_segment_size_var, pattern=REG_MDX_SEG, default="256")# + mdx_segment_size_Option.grid(pady=MENU_PADDING_4) + self.help_hints(mdx_segment_size_Label, text=MDX_SEGMENT_SIZE_HELP) + + overlap_mdx_Label = self.menu_sub_LABEL_SET(mdx_net_frame, OVERLAP_TEXT) + overlap_mdx_Label.grid(pady=MENU_PADDING_4) + overlap_mdx_Option = ComboBoxEditableMenu(mdx_net_frame, values=MDX_OVERLAP, width=MENU_COMBOBOX_WIDTH, textvariable=self.overlap_mdx_var, pattern=REG_OVERLAP, default=MDX_OVERLAP)# + overlap_mdx_Option.grid(pady=MENU_PADDING_4) + self.help_hints(overlap_mdx_Label, text=OVERLAP_HELP) + + pitch_shift_Label = self.menu_sub_LABEL_SET(mdx_net_frame, SHIFT_CONVERSION_PITCH_TEXT) + pitch_shift_Label.grid(pady=MENU_PADDING_4) + pitch_shift_Option = ComboBoxEditableMenu(mdx_net_frame, values=SEMITONE_SEL, width=MENU_COMBOBOX_WIDTH, textvariable=self.semitone_shift_var, pattern=REG_SEMITONES, default=SEMI_DEF)# + pitch_shift_Option.grid(pady=MENU_PADDING_4) + self.help_hints(pitch_shift_Label, text=PITCH_SHIFT_HELP) + + if not os.path.isfile(DENOISER_MODEL_PATH): + denoise_options_var_text = self.denoise_option_var.get() + denoise_options = [option for option in MDX_DENOISE_OPTION if option != DENOISE_M] + self.denoise_option_var.set(DENOISE_S if denoise_options_var_text == DENOISE_M else denoise_options_var_text) + else: + denoise_options = MDX_DENOISE_OPTION + + denoise_option_Label = self.menu_sub_LABEL_SET(mdx_net_frame, DENOISE_OUTPUT_TEXT) + denoise_option_Label.grid(pady=MENU_PADDING_4) + denoise_option_Option = ComboBoxMenu(mdx_net_frame, textvariable=self.denoise_option_var, values=denoise_options, width=MENU_COMBOBOX_WIDTH) + denoise_option_Option.grid(pady=MENU_PADDING_4) + self.help_hints(denoise_option_Label, text=IS_DENOISE_HELP) + + is_match_frequency_pitch_Option = ttk.Checkbutton(mdx_net_frame, text=MATCH_FREQ_CUTOFF_TEXT, width=MDX_CHECKBOXS_WIDTH, variable=self.is_match_frequency_pitch_var) + is_match_frequency_pitch_Option.grid(pady=0) + self.help_hints(is_match_frequency_pitch_Option, text=IS_FREQUENCY_MATCH_HELP) + + is_invert_spec_Option = ttk.Checkbutton(mdx_net_frame, text=SPECTRAL_INVERSION_TEXT, width=MDX_CHECKBOXS_WIDTH, variable=self.is_invert_spec_var) + is_invert_spec_Option.grid(pady=0) + self.help_hints(is_invert_spec_Option, text=IS_INVERT_SPEC_HELP) + + self.vocal_splitter_Button_opt(mdx_net_opt, mdx_net_frame, pady=MENU_PADDING_1, width=VR_BUT_WIDTH) + + clear_mdx_cache_Button = ttk.Button(mdx_net_frame, text=CLEAR_AUTOSET_CACHE_TEXT, command=lambda:self.clear_cache(MDX_ARCH_TYPE), width=VR_BUT_WIDTH) + clear_mdx_cache_Button.grid(pady=MENU_PADDING_1) + self.help_hints(clear_mdx_cache_Button, text=CLEAR_CACHE_HELP) + + open_mdx_model_dir_Button = ttk.Button(mdx_net_frame, text=OPEN_MODELS_FOLDER_TEXT, command=lambda:OPEN_FILE_func(MDX_MODELS_DIR), width=VR_BUT_WIDTH) + open_mdx_model_dir_Button.grid(pady=MENU_PADDING_1) + + mdx_return_Button = ttk.Button(mdx_net_frame, text=BACK_TO_MAIN_MENU, command=lambda:(self.menu_advanced_mdx_options_close_window(), self.check_is_menu_settings_open())) + mdx_return_Button.grid(pady=MENU_PADDING_1) + + mdx_close_Button = ttk.Button(mdx_net_frame, text=CLOSE_WINDOW, command=lambda:self.menu_advanced_mdx_options_close_window()) + mdx_close_Button.grid(pady=MENU_PADDING_1) + + mdx23_opt_title = self.menu_title_LABEL_SET(mdx_net23_frame, ADVANCED_MDXNET23_OPTIONS_TEXT) + mdx23_opt_title.grid(pady=MENU_PADDING_2) + + mdx_batch_size_Label = self.menu_sub_LABEL_SET(mdx_net23_frame, BATCH_SIZE_TEXT) + mdx_batch_size_Label.grid(pady=MENU_PADDING_1) + mdx_batch_size_Option = ComboBoxEditableMenu(mdx_net23_frame, values=BATCH_SIZE, width=MENU_COMBOBOX_WIDTH, textvariable=self.mdx_batch_size_var, pattern=REG_BATCHES, default=BATCH_SIZE)# + mdx_batch_size_Option.grid(pady=MENU_PADDING_1) + self.help_hints(mdx_batch_size_Label, text=BATCH_SIZE_HELP) + + overlap_mdx23_Label = self.menu_sub_LABEL_SET(mdx_net23_frame, OVERLAP_TEXT) + overlap_mdx23_Label.grid(pady=MENU_PADDING_1) + overlap_mdx23_Option = ComboBoxEditableMenu(mdx_net23_frame, values=MDX23_OVERLAP, width=MENU_COMBOBOX_WIDTH, textvariable=self.overlap_mdx23_var, pattern=REG_OVERLAP23, default="8")# + overlap_mdx23_Option.grid(pady=MENU_PADDING_1) + self.help_hints(overlap_mdx23_Label, text=OVERLAP_23_HELP) + + is_mdx_c_seg_def_Option = ttk.Checkbutton(mdx_net23_frame, text=SEGMENT_DEFAULT_TEXT, width=MDX_CHECKBOXS_WIDTH, variable=self.is_mdx_c_seg_def_var) + is_mdx_c_seg_def_Option.grid(pady=0) + self.help_hints(is_mdx_c_seg_def_Option, text=IS_SEGMENT_DEFAULT_HELP) + + is_mdx_combine_stems_Option = ttk.Checkbutton(mdx_net23_frame, text=COMBINE_STEMS_TEXT, width=MDX_CHECKBOXS_WIDTH, variable=self.is_mdx23_combine_stems_var) + is_mdx_combine_stems_Option.grid() + self.help_hints(is_mdx_combine_stems_Option, text=IS_DEMUCS_COMBINE_STEMS_HELP) + + mdx23_close_Button = ttk.Button(mdx_net23_frame, text=CLOSE_WINDOW, command=lambda:self.menu_advanced_mdx_options_close_window()) + mdx23_close_Button.grid(pady=MENU_PADDING_2) + + frame_list = [mdx_net_frame, mdx_net23_frame] + self.menu_placement(mdx_net_opt, ADVANCED_MDXNET_OPTIONS_TEXT, is_help_hints=True, close_function=self.menu_advanced_mdx_options_close_window, frame_list=frame_list) + + def menu_advanced_ensemble_options(self):#** + """Open Ensemble Custom""" + + custom_ens_opt = tk.Toplevel() + + self.is_open_menu_advanced_ensemble_options.set(True) + self.menu_advanced_ensemble_options_close_window = lambda:(self.is_open_menu_advanced_ensemble_options.set(False), custom_ens_opt.destroy()) + custom_ens_opt.protocol("WM_DELETE_WINDOW", self.menu_advanced_ensemble_options_close_window) + + option_var = tk.StringVar(value=SELECT_SAVED_ENSEMBLE) + + custom_ens_opt_frame = self.menu_FRAME_SET(custom_ens_opt) + custom_ens_opt_frame.grid(row=0) + + settings_title_Label = self.menu_title_LABEL_SET(custom_ens_opt_frame, ADVANCED_OPTION_MENU_TEXT) + settings_title_Label.grid(pady=MENU_PADDING_2) + + delete_entry_Label = self.menu_sub_LABEL_SET(custom_ens_opt_frame, REMOVE_SAVED_ENSEMBLE_TEXT) + delete_entry_Label.grid(pady=MENU_PADDING_1) + delete_entry_Option = ComboBoxMenu(custom_ens_opt_frame, textvariable=option_var, width=ENSEMBLE_CHECKBOXS_WIDTH+2) + delete_entry_Option.grid(padx=20,pady=MENU_PADDING_1) + + is_save_all_outputs_ensemble_Option = ttk.Checkbutton(custom_ens_opt_frame, text=SAVE_ALL_OUTPUTS_TEXT, width=ENSEMBLE_CHECKBOXS_WIDTH, variable=self.is_save_all_outputs_ensemble_var) + is_save_all_outputs_ensemble_Option.grid(pady=0) + self.help_hints(is_save_all_outputs_ensemble_Option, text=IS_SAVE_ALL_OUTPUTS_ENSEMBLE_HELP) + + is_append_ensemble_name_Option = ttk.Checkbutton(custom_ens_opt_frame, text=APPEND_ENSEMBLE_NAME_TEXT, width=ENSEMBLE_CHECKBOXS_WIDTH, variable=self.is_append_ensemble_name_var) + is_append_ensemble_name_Option.grid(pady=0) + self.help_hints(is_append_ensemble_name_Option, text=IS_APPEND_ENSEMBLE_NAME_HELP) + + is_wav_ensemble_Option = ttk.Checkbutton(custom_ens_opt_frame, text=ENSEMBLE_WAVFORMS_TEXT, width=ENSEMBLE_CHECKBOXS_WIDTH, variable=self.is_wav_ensemble_var) + is_wav_ensemble_Option.grid(pady=0) + self.help_hints(is_wav_ensemble_Option, text=IS_WAV_ENSEMBLE_HELP) + + ensemble_return_Button = ttk.Button(custom_ens_opt_frame, text=BACK_TO_MAIN_MENU, command=lambda:(self.menu_advanced_ensemble_options_close_window(), self.check_is_menu_settings_open())) + ensemble_return_Button.grid(pady=MENU_PADDING_1) + + ensemble_close_Button = ttk.Button(custom_ens_opt_frame, text=CLOSE_WINDOW, command=lambda:self.menu_advanced_ensemble_options_close_window()) + ensemble_close_Button.grid(pady=MENU_PADDING_1) + + self.deletion_list_fill(delete_entry_Option, option_var, ENSEMBLE_CACHE_DIR, SELECT_SAVED_ENSEMBLE, menu_name='deleteensemble') + + self.menu_placement(custom_ens_opt, ADVANCED_ENSEMBLE_OPTIONS_TEXT, is_help_hints=True, close_function=self.menu_advanced_ensemble_options_close_window) + + def menu_advanced_align_options(self):#** + """Open Ensemble Custom""" + + advanced_align_opt = tk.Toplevel() + + self.is_open_menu_advanced_align_options.set(True) + self.menu_advanced_align_options_close_window = lambda:(self.is_open_menu_advanced_align_options.set(False), advanced_align_opt.destroy()) + advanced_align_opt.protocol("WM_DELETE_WINDOW", self.menu_advanced_align_options_close_window) + + advanced_align_opt_frame = self.menu_FRAME_SET(advanced_align_opt) + advanced_align_opt_frame.grid(row=0) + + settings_title_Label = self.menu_title_LABEL_SET(advanced_align_opt_frame, ADVANCED_ALIGN_TOOL_OPTIONS_TEXT) + settings_title_Label.grid(pady=MENU_PADDING_2) + + phase_option_Label = self.menu_sub_LABEL_SET(advanced_align_opt_frame, SECONDARY_PHASE_TEXT) + phase_option_Label.grid(pady=4) + phase_option_Option = ComboBoxMenu(advanced_align_opt_frame, textvariable=self.phase_option_var, values=ALIGN_PHASE_OPTIONS, width=MENU_COMBOBOX_WIDTH) + phase_option_Option.grid(pady=4) + self.help_hints(phase_option_Label, text=IS_PHASE_HELP) + + phase_shifts_Label = self.menu_sub_LABEL_SET(advanced_align_opt_frame, PHASE_SHIFTS_TEXT) + phase_shifts_Label.grid(pady=4)# + phase_shifts_Option = ComboBoxMenu(advanced_align_opt_frame, textvariable=self.phase_shifts_var, values=list(PHASE_SHIFTS_OPT.keys()), width=MENU_COMBOBOX_WIDTH) + phase_shifts_Option.grid(pady=4) + self.help_hints(phase_shifts_Label, text=PHASE_SHIFTS_ALIGN_HELP) + + is_save_align_Option = ttk.Checkbutton(advanced_align_opt_frame, text=SAVE_ALIGNED_TRACK_TEXT, width=MDX_CHECKBOXS_WIDTH, variable=self.is_save_align_var) + is_save_align_Option.grid(pady=0) + self.help_hints(is_save_align_Option, text=IS_ALIGN_TRACK_HELP) + + is_match_silence_Option = ttk.Checkbutton(advanced_align_opt_frame, text=SILENCE_MATCHING_TEXT, width=MDX_CHECKBOXS_WIDTH, variable=self.is_match_silence_var) + is_match_silence_Option.grid(pady=0) + self.help_hints(is_match_silence_Option, text=IS_MATCH_SILENCE_HELP) + + is_spec_match_Option = ttk.Checkbutton(advanced_align_opt_frame, text=SPECTRAL_MATCHING_TEXT, width=MDX_CHECKBOXS_WIDTH, variable=self.is_spec_match_var) + is_spec_match_Option.grid(pady=0) + self.help_hints(is_spec_match_Option, text=IS_MATCH_SPEC_HELP) + + ensemble_return_Button = ttk.Button(advanced_align_opt_frame, text=BACK_TO_MAIN_MENU, command=lambda:(self.menu_advanced_align_options_close_window(), self.check_is_menu_settings_open())) + ensemble_return_Button.grid(pady=MENU_PADDING_1) + + ensemble_close_Button = ttk.Button(advanced_align_opt_frame, text=CLOSE_WINDOW, command=lambda:self.menu_advanced_align_options_close_window()) + ensemble_close_Button.grid(pady=MENU_PADDING_1) + + self.menu_placement(advanced_align_opt, ADVANCED_ALIGN_TOOL_OPTIONS_TEXT, is_help_hints=True, close_function=self.menu_advanced_align_options_close_window) + + def menu_help(self):#** + """Open Help Guide""" + + help_guide_opt = tk.Toplevel() + + self.is_open_menu_help.set(True) + self.menu_help_close_window = lambda:(self.is_open_menu_help.set(False), help_guide_opt.destroy()) + help_guide_opt.protocol("WM_DELETE_WINDOW", self.menu_help_close_window) + + tabControl = ttk.Notebook(help_guide_opt) + + tab1 = ttk.Frame(tabControl) + tab2 = ttk.Frame(tabControl) + tab3 = ttk.Frame(tabControl) + tab4 = ttk.Frame(tabControl) + + tabControl.add(tab1, text ='Credits') + tabControl.add(tab2, text ='Resources') + tabControl.add(tab3, text ='Application License & Version Information') + tabControl.add(tab4, text ='Additional Information') + + tabControl.pack(expand = 1, fill ="both") + + tab1.grid_rowconfigure(0, weight=1) + tab1.grid_columnconfigure(0, weight=1) + + tab2.grid_rowconfigure(0, weight=1) + tab2.grid_columnconfigure(0, weight=1) + + tab3.grid_rowconfigure(0, weight=1) + tab3.grid_columnconfigure(0, weight=1) + + tab4.grid_rowconfigure(0, weight=1) + tab4.grid_columnconfigure(0, weight=1) + + section_title_Label = lambda place, frame, text, font_size=FONT_SIZE_4: tk.Label(master=frame, text=text,font=(MAIN_FONT_NAME, f"{font_size}", "bold"), justify="center", fg="#F4F4F4").grid(row=place,column=0,padx=0,pady=MENU_PADDING_4) + description_Label = lambda place, frame, text, font=FONT_SIZE_2: tk.Label(master=frame, text=text, font=(MAIN_FONT_NAME, f"{font}"), justify="center", fg="#F6F6F7").grid(row=place,column=0,padx=0,pady=MENU_PADDING_4) + + def credit_label(place, frame, text, link=None, message=None, is_link=False, is_top=False): + if is_top: + thank = tk.Label(master=frame, text=text, font=(MAIN_FONT_NAME, f"{FONT_SIZE_3}", "bold"), justify="center", fg="#13849f") + else: + thank = tk.Label(master=frame, text=text, font=(MAIN_FONT_NAME, f"{FONT_SIZE_3}", "underline" if is_link else "normal"), justify="center", fg="#13849f") + thank.configure(cursor="hand2") if is_link else None + thank.grid(row=place,column=0,padx=0,pady=1) + if link: + thank.bind("", lambda e:webbrowser.open_new_tab(link)) + if message: + description_Label(place+1, frame, message) + + def Link(place, frame, text, link, description, font=FONT_SIZE_2): + link_label = tk.Label(master=frame, text=text, font=(MAIN_FONT_NAME, f"{FONT_SIZE_4}", "underline"), foreground=FG_COLOR, justify="center", cursor="hand2") + link_label.grid(row=place,column=0,padx=0,pady=MENU_PADDING_1) + link_label.bind("", lambda e:webbrowser.open_new_tab(link)) + description_Label(place+1, frame, description, font=font) + + def right_click_menu(event): + right_click_menu = tk.Menu(self, font=(MAIN_FONT_NAME, FONT_SIZE_1), tearoff=0) + right_click_menu.add_command(label='Return to Settings Menu', command=lambda:(self.menu_help_close_window(), self.check_is_menu_settings_open())) + right_click_menu.add_command(label='Exit Window', command=lambda:self.menu_help_close_window()) + + try: + right_click_menu.tk_popup(event.x_root,event.y_root) + right_click_release_linux(right_click_menu, help_guide_opt) + finally: + right_click_menu.grab_release() + + help_guide_opt.bind(right_click_button, lambda e:right_click_menu(e)) + credits_Frame = tk.Frame(tab1, highlightthicknes=50) + credits_Frame.grid(row=0, column=0, padx=0, pady=0) + tk.Label(credits_Frame, image=self.credits_img).grid(row=1,column=0,padx=0,pady=MENU_PADDING_1) + + section_title_Label(place=0, + frame=credits_Frame, + text="Core UVR Developers") + + credit_label(place=2, + frame=credits_Frame, + text="Anjok07\nAufr33", + is_top=True) + + section_title_Label(place=3, + frame=credits_Frame, + text="Special Thanks") + + credit_label(place=6, + frame=credits_Frame, + text="Tsurumeso", + message="Developed the original VR Architecture AI code.", + link="https://github.com/tsurumeso/vocal-remover", + is_link=True) + + credit_label(place=8, + frame=credits_Frame, + text="Kuielab & Woosung Choi", + message="Developed the original MDX-Net AI code.", + link="https://github.com/kuielab", + is_link=True) + + credit_label(place=10, + frame=credits_Frame, + text="Adefossez & Demucs", + message="Core developer of Facebook's Demucs Music Source Separation.", + link="https://github.com/facebookresearch/demucs", + is_link=True) + + credit_label(place=12, + frame=credits_Frame, + text="Bas Curtiz", + message="Designed the official UVR logo, icon, banner, splash screen.") + + credit_label(place=14, + frame=credits_Frame, + text="DilanBoskan", + message="Your contributions at the start of this project were essential to the success of UVR. Thank you!") + + credit_label(place=16, + frame=credits_Frame, + text="Audio Separation and CC Karaoke & Friends Discord Communities", + message="Thank you for the support!") + + more_info_tab_Frame = tk.Frame(tab2, highlightthicknes=30) + more_info_tab_Frame.grid(row=0,column=0,padx=0,pady=0) + + section_title_Label(place=3, + frame=more_info_tab_Frame, + text="Resources") + + Link(place=4, + frame=more_info_tab_Frame, + text="Ultimate Vocal Remover (Official GitHub)", + link="https://github.com/Anjok07/ultimatevocalremovergui", + description="You can find updates, report issues, and give us a shout via our official GitHub.", + font=FONT_SIZE_1) + + Link(place=8, + frame=more_info_tab_Frame, + text="X-Minus AI", + link="https://x-minus.pro/ai", + description="Many of the models provided are also on X-Minus.\n" + \ + "X-Minus benefits users without the computing resources to run the GUI or models locally.", + font=FONT_SIZE_1) + + Link(place=12, + frame=more_info_tab_Frame, + text="MVSep", + link="https://mvsep.com/quality_checker/leaderboard.php", + description="Some of our models are also on MVSep.\n" + \ + "Click the link above for a list of some of the best settings \nand model combinations recorded by fellow UVR users.\nSpecial thanks to ZFTurbo for all his work on MVSep!", + font=FONT_SIZE_1) + + Link(place=18, + frame=more_info_tab_Frame, + text="FFmpeg", + link="https://www.wikihow.com/Install-FFmpeg-on-Windows", + description="UVR relies on FFmpeg for processing non-wav audio files.\n" + \ + "If you are missing FFmpeg, please see the installation guide via the link provided.", + font=FONT_SIZE_1) + + Link(place=22, + frame=more_info_tab_Frame, + text="Rubber Band Library", + link="https://breakfastquay.com/rubberband/", + description="UVR uses the Rubber Band library for the sound stretch and pitch shift tool.\n" + \ + "You can get more information on it via the link provided.", + font=FONT_SIZE_1) + + Link(place=26, + frame=more_info_tab_Frame, + text="Matchering", + link="https://github.com/sergree/matchering", + description="UVR uses the Matchering library for the \"Matchering\" Audio Tool.\n" + \ + "You can get more information on it via the link provided.", + font=FONT_SIZE_1) + + Link(place=30, + frame=more_info_tab_Frame, + text="Official UVR BMAC", + link=DONATE_LINK_BMAC, + description="If you wish to support and donate to this project, click the link above!", + font=FONT_SIZE_1) + + appplication_license_tab_Frame = tk.Frame(tab3) + appplication_license_tab_Frame.grid(row=0,column=0,padx=0,pady=0) + + appplication_license_Label = tk.Label(appplication_license_tab_Frame, text='UVR License Information', font=(MAIN_FONT_NAME, f"{FONT_SIZE_6}", "bold"), justify="center", fg="#f4f4f4") + appplication_license_Label.grid(row=0,column=0,padx=0,pady=25) + + appplication_license_Text = tk.Text(appplication_license_tab_Frame, font=(MAIN_FONT_NAME, f"{FONT_SIZE_4}"), fg="white", bg="black", width=72, wrap=tk.WORD, borderwidth=0) + appplication_license_Text.grid(row=1,column=0,padx=0,pady=0) + appplication_license_Text_scroll = ttk.Scrollbar(appplication_license_tab_Frame, orient=tk.VERTICAL) + appplication_license_Text.config(yscrollcommand=appplication_license_Text_scroll.set) + appplication_license_Text_scroll.configure(command=appplication_license_Text.yview) + appplication_license_Text.grid(row=4,sticky=tk.W) + appplication_license_Text_scroll.grid(row=4, column=1, sticky=tk.NS) + appplication_license_Text.insert("insert", LICENSE_TEXT(VERSION, current_patch)) + appplication_license_Text.configure(state=tk.DISABLED) + + application_change_log_tab_Frame = tk.Frame(tab4) + application_change_log_tab_Frame.grid(row=0,column=0,padx=0,pady=0) + + application_change_log_Label = tk.Label(application_change_log_tab_Frame, text='Additional Information', font=(MAIN_FONT_NAME, f"{FONT_SIZE_6}", "bold"), justify="center", fg="#f4f4f4") + application_change_log_Label.grid(row=0,column=0,padx=0,pady=25) + + application_change_log_Text = tk.Text(application_change_log_tab_Frame, font=(MAIN_FONT_NAME, f"{FONT_SIZE_4}"), fg="white", bg="black", width=72, wrap=tk.WORD, borderwidth=0) + application_change_log_Text.grid(row=1,column=0,padx=40 if is_macos else 30,pady=0) + application_change_log_Text_scroll = ttk.Scrollbar(application_change_log_tab_Frame, orient=tk.VERTICAL) + application_change_log_Text.config(yscrollcommand=application_change_log_Text_scroll.set) + application_change_log_Text_scroll.configure(command=application_change_log_Text.yview) + application_change_log_Text.grid(row=4,sticky=tk.W) + application_change_log_Text_scroll.grid(row=4, column=1, sticky=tk.NS) + application_change_log_Text.insert("insert", self.bulletin_data) + auto_hyperlink(application_change_log_Text) + application_change_log_Text.configure(state=tk.DISABLED) + + self.menu_placement(help_guide_opt, "Information Guide") + + def menu_error_log(self):# + """Open Error Log""" + + self.is_confirm_error_var.set(False) + + copied_var = tk.StringVar(value='') + error_log_screen = tk.Toplevel() + + self.is_open_menu_error_log.set(True) + self.menu_error_log_close_window = lambda:(self.is_open_menu_error_log.set(False), error_log_screen.destroy()) + error_log_screen.protocol("WM_DELETE_WINDOW", self.menu_error_log_close_window) + + error_log_frame = self.menu_FRAME_SET(error_log_screen) + error_log_frame.grid(row=0) + + error_consol_title_Label = self.menu_title_LABEL_SET(error_log_frame, ERROR_CONSOLE_TEXT) + error_consol_title_Label.grid(row=1,column=0,padx=20,pady=MENU_PADDING_2) + + error_details_Text = tk.Text(error_log_frame, font=(MAIN_FONT_NAME, f"{FONT_SIZE_1}"), fg="#D37B7B", bg="black", width=110, wrap=tk.WORD, borderwidth=0) + error_details_Text.grid(row=2,column=0,padx=0,pady=0) + error_details_Text.insert("insert", self.error_log_var.get()) + error_details_Text.bind(right_click_button, lambda e:self.right_click_menu_popup(e, text_box=True)) + self.current_text_box = error_details_Text + error_details_Text_scroll = ttk.Scrollbar(error_log_frame, orient=tk.VERTICAL) + error_details_Text.config(yscrollcommand=error_details_Text_scroll.set) + error_details_Text_scroll.configure(command=error_details_Text.yview) + error_details_Text.grid(row=2,sticky=tk.W) + error_details_Text_scroll.grid(row=2, column=1, sticky=tk.NS) + + copy_text_Label = tk.Label(error_log_frame, textvariable=copied_var, font=(MAIN_FONT_NAME, f"{FONT_SIZE_0}"), justify="center", fg="#f4f4f4") + copy_text_Label.grid(padx=20,pady=0) + + copy_text_Button = ttk.Button(error_log_frame, text=COPY_ALL_TEXT_TEXT, width=14, command=lambda:(pyperclip.copy(error_details_Text.get(1.0, tk.END+"-1c")), copied_var.set('Copied!'))) + copy_text_Button.grid(padx=20,pady=MENU_PADDING_1) + + report_issue_Button = ttk.Button(error_log_frame, text=REPORT_ISSUE_TEXT, width=14, command=lambda:webbrowser.open_new_tab(ISSUE_LINK)) + report_issue_Button.grid(padx=20,pady=MENU_PADDING_1) + + error_log_return_Button = ttk.Button(error_log_frame, text=BACK_TO_MAIN_MENU, command=lambda:(self.menu_error_log_close_window(), self.menu_settings())) + error_log_return_Button.grid(padx=20,pady=MENU_PADDING_1) + + error_log_close_Button = ttk.Button(error_log_frame, text=CLOSE_WINDOW, command=lambda:self.menu_error_log_close_window()) + error_log_close_Button.grid(padx=20,pady=MENU_PADDING_1) + + self.menu_placement(error_log_screen, UVR_ERROR_LOG_TEXT) + + def menu_secondary_model(self, tab, ai_network_vars: dict): + + #Settings Tab 1 + secondary_model_Frame = self.menu_FRAME_SET(tab) + secondary_model_Frame.grid(row=0) + + settings_title_Label = self.menu_title_LABEL_SET(secondary_model_Frame, SECONDARY_MODEL_TEXT) + settings_title_Label.grid(row=0,column=0,padx=0,pady=MENU_PADDING_3) + + voc_inst_list = self.model_list(VOCAL_STEM, INST_STEM, is_dry_check=True) + other_list = self.model_list(OTHER_STEM, NO_OTHER_STEM, is_dry_check=True) + bass_list = self.model_list(BASS_STEM, NO_BASS_STEM, is_dry_check=True) + drum_list = self.model_list(DRUM_STEM, NO_DRUM_STEM, is_dry_check=True) + + voc_inst_secondary_model_var = ai_network_vars["voc_inst_secondary_model"] + other_secondary_model_var = ai_network_vars["other_secondary_model"] + bass_secondary_model_var = ai_network_vars["bass_secondary_model"] + drums_secondary_model_var = ai_network_vars["drums_secondary_model"] + voc_inst_secondary_model_scale_var = ai_network_vars['voc_inst_secondary_model_scale'] + other_secondary_model_scale_var = ai_network_vars['other_secondary_model_scale'] + bass_secondary_model_scale_var = ai_network_vars['bass_secondary_model_scale'] + drums_secondary_model_scale_var = ai_network_vars['drums_secondary_model_scale'] + is_secondary_model_activate_var = ai_network_vars["is_secondary_model_activate"] + + change_state_lambda = lambda:change_state(tk.NORMAL if is_secondary_model_activate_var.get() else tk.DISABLED) + init_convert_to_percentage = lambda raw_value:f"{int(float(raw_value)*100)}%" + + voc_inst_secondary_model_scale_LABEL_var = tk.StringVar(value=init_convert_to_percentage(voc_inst_secondary_model_scale_var.get())) + other_secondary_model_scale_LABEL_var = tk.StringVar(value=init_convert_to_percentage(other_secondary_model_scale_var.get())) + bass_secondary_model_scale_LABEL_var = tk.StringVar(value=init_convert_to_percentage(bass_secondary_model_scale_var.get())) + drums_secondary_model_scale_LABEL_var = tk.StringVar(value=init_convert_to_percentage(drums_secondary_model_scale_var.get())) + + def change_state(change_state): + for child_widget in secondary_model_Frame.winfo_children(): + if type(child_widget) is ComboBoxMenu: + change_state = READ_ONLY if change_state == tk.NORMAL else change_state + child_widget.configure(state=change_state) + elif type(child_widget) is ttk.Scale: + child_widget.configure(state=change_state) + + def convert_to_percentage(raw_value, scale_var: tk.StringVar, label_var: tk.StringVar): + raw_value = '%0.2f' % float(raw_value) + scale_var.set(raw_value) + label_var.set(f"{int(float(raw_value)*100)}%") + + def build_widgets(stem_pair: str, model_list: list, option_var: tk.StringVar, label_var: tk.StringVar, scale_var: tk.DoubleVar): + model_list.insert(0, NO_MODEL) + secondary_model_Label = self.menu_sub_LABEL_SET(secondary_model_Frame, f'{stem_pair}', font_size=FONT_SIZE_3) + secondary_model_Label.grid(pady=MENU_PADDING_1) + secondary_model_Option = ComboBoxMenu(secondary_model_Frame, textvariable=option_var, values=model_list, dropdown_name=stem_pair, offset=310, width=READ_ONLY_COMBO_WIDTH) + secondary_model_Option.grid(pady=MENU_PADDING_1) + secondary_scale_info_Label = tk.Label(secondary_model_Frame, textvariable=label_var, font=(MAIN_FONT_NAME, f"{FONT_SIZE_1}"), foreground=FG_COLOR) + secondary_scale_info_Label.grid(pady=0) + secondary_model_scale_Option = ttk.Scale(secondary_model_Frame, variable=scale_var, from_=0.01, to=0.99, command=lambda s:convert_to_percentage(s, scale_var, label_var), orient='horizontal') + secondary_model_scale_Option.grid(pady=2) + self.help_hints(secondary_model_Label, text=SECONDARY_MODEL_HELP) + self.help_hints(secondary_scale_info_Label, text=SECONDARY_MODEL_SCALE_HELP) + + build_widgets(stem_pair=VOCAL_PAIR, + model_list=voc_inst_list, + option_var=voc_inst_secondary_model_var, + label_var=voc_inst_secondary_model_scale_LABEL_var, + scale_var=voc_inst_secondary_model_scale_var) + + build_widgets(stem_pair=OTHER_PAIR, + model_list=other_list, + option_var=other_secondary_model_var, + label_var=other_secondary_model_scale_LABEL_var, + scale_var=other_secondary_model_scale_var) + + build_widgets(stem_pair=BASS_PAIR, + model_list=bass_list, + option_var=bass_secondary_model_var, + label_var=bass_secondary_model_scale_LABEL_var, + scale_var=bass_secondary_model_scale_var) + + build_widgets(stem_pair=DRUM_PAIR, + model_list=drum_list, + option_var=drums_secondary_model_var, + label_var=drums_secondary_model_scale_LABEL_var, + scale_var=drums_secondary_model_scale_var) + + is_secondary_model_activate_Option = ttk.Checkbutton(secondary_model_Frame, text=ACTIVATE_SECONDARY_MODEL_TEXT, variable=is_secondary_model_activate_var, command=change_state_lambda) + is_secondary_model_activate_Option.grid(row=21,pady=MENU_PADDING_1) + self.help_hints(is_secondary_model_activate_Option, text=SECONDARY_MODEL_ACTIVATE_HELP) + + change_state_lambda() + + self.change_state_lambda = change_state_lambda + + def menu_preproc_model(self, tab): + + preproc_model_Frame = self.menu_FRAME_SET(tab) + preproc_model_Frame.grid(row=0) + + demucs_pre_proc_model_title_Label = self.menu_title_LABEL_SET(preproc_model_Frame, PREPROCESS_MODEL_CHOOSE_TEXT) + demucs_pre_proc_model_title_Label.grid(pady=MENU_PADDING_3) + + pre_proc_list = self.model_list(VOCAL_STEM, INST_STEM, is_dry_check=True, is_no_demucs=True) + pre_proc_list.insert(0, NO_MODEL) + + enable_pre_proc_model = lambda:(is_demucs_pre_proc_model_inst_mix_Option.configure(state=tk.NORMAL), demucs_pre_proc_model_Option.configure(state=READ_ONLY)) + disable_pre_proc_model = lambda:(is_demucs_pre_proc_model_inst_mix_Option.configure(state=tk.DISABLED), demucs_pre_proc_model_Option.configure(state=tk.DISABLED), self.is_demucs_pre_proc_model_inst_mix_var.set(False)) + pre_proc_model_toggle = lambda:enable_pre_proc_model() if self.is_demucs_pre_proc_model_activate_var.get() else disable_pre_proc_model() + + demucs_pre_proc_model_Label = self.menu_sub_LABEL_SET(preproc_model_Frame, SELECT_MODEL_TEXT, font_size=FONT_SIZE_3) + demucs_pre_proc_model_Label.grid() + demucs_pre_proc_model_Option = ComboBoxMenu(preproc_model_Frame, textvariable=self.demucs_pre_proc_model_var, values=pre_proc_list, dropdown_name='demucspre', offset=310, width=READ_ONLY_COMBO_WIDTH) + demucs_pre_proc_model_Option.grid(pady=MENU_PADDING_2) + + is_demucs_pre_proc_model_inst_mix_Option = ttk.Checkbutton(preproc_model_Frame, text='Save Instrumental Mixture', width=DEMUCS_PRE_CHECKBOXS_WIDTH, variable=self.is_demucs_pre_proc_model_inst_mix_var) + is_demucs_pre_proc_model_inst_mix_Option.grid() + self.help_hints(is_demucs_pre_proc_model_inst_mix_Option, text=PRE_PROC_MODEL_INST_MIX_HELP) + + is_demucs_pre_proc_model_activate_Option = ttk.Checkbutton(preproc_model_Frame, text=ACTIVATE_PRE_PROCESS_MODEL_TEXT, width=DEMUCS_PRE_CHECKBOXS_WIDTH, variable=self.is_demucs_pre_proc_model_activate_var, command=pre_proc_model_toggle) + is_demucs_pre_proc_model_activate_Option.grid() + self.help_hints(is_demucs_pre_proc_model_activate_Option, text=PRE_PROC_MODEL_ACTIVATE_HELP) + + pre_proc_model_toggle() + + def menu_manual_downloads(self): + + manual_downloads_menu = tk.Toplevel() + model_selection_var = tk.StringVar(value=SELECT_MODEL_TEXT) + #info_text_var = tk.StringVar(value='') + + if self.is_online: + model_data = self.online_data + + # Save the data as a JSON file + with open(DOWNLOAD_MODEL_CACHE, 'w') as json_file: + json.dump(model_data, json_file) + + else: + if os.path.isfile(DOWNLOAD_MODEL_CACHE): + with open(DOWNLOAD_MODEL_CACHE, 'r') as json_file: + model_data = json.load(json_file) + + vr_download_list = model_data["vr_download_list"] + mdx_download_list = model_data["mdx_download_list"] + demucs_download_list = model_data["demucs_download_list"] + mdx_download_list.update(model_data["mdx23c_download_list"]) + + def create_link(link): + final_link = lambda:webbrowser.open_new_tab(link) + return final_link + + def get_links(): + for widgets in manual_downloads_link_Frame.winfo_children(): + widgets.destroy() + + main_selection = model_selection_var.get() + + MAIN_ROW = 0 + + self.menu_sub_LABEL_SET(manual_downloads_link_Frame, 'Download Link(s)').grid(row=0,column=0,padx=0,pady=MENU_PADDING_4) + + if VR_ARCH_TYPE in main_selection: + main_selection = vr_download_list[main_selection] + model_dir = VR_MODELS_DIR + elif MDX_ARCH_TYPE in main_selection or MDX_23_NAME in main_selection: + if isinstance(mdx_download_list[main_selection], dict): + main_selection = mdx_download_list[main_selection] + main_selection = list(main_selection.keys())[0] + else: + main_selection = mdx_download_list[main_selection] + + model_dir = MDX_MODELS_DIR + + elif DEMUCS_ARCH_TYPE in main_selection: + model_dir = DEMUCS_NEWER_REPO_DIR if 'v3' in main_selection or 'v4' in main_selection else DEMUCS_MODELS_DIR + main_selection = demucs_download_list[main_selection] + + if type(main_selection) is dict: + for links in main_selection.values(): + MAIN_ROW += 1 + button_text = f" - Item {MAIN_ROW}" if len(main_selection.keys()) >= 2 else '' + link = create_link(links) + link_button = ttk.Button(manual_downloads_link_Frame, text=f"Open Link to Model{button_text}", command=link).grid(row=MAIN_ROW,column=0,padx=0,pady=MENU_PADDING_1) + else: + link = f"{NORMAL_REPO}{main_selection}" + link_button = ttk.Button(manual_downloads_link_Frame, text=OPEN_LINK_TO_MODEL_TEXT, command=lambda:webbrowser.open_new_tab(link)) + link_button.grid(row=1,column=0,padx=0,pady=MENU_PADDING_2) + + self.menu_sub_LABEL_SET(manual_downloads_link_Frame, SELECTED_MODEL_PLACE_PATH_TEXT).grid(row=MAIN_ROW+2,column=0,padx=0,pady=MENU_PADDING_4) + ttk.Button(manual_downloads_link_Frame, text=OPEN_MODEL_DIRECTORY_TEXT, command=lambda:OPEN_FILE_func(model_dir)).grid(row=MAIN_ROW+3,column=0,padx=0,pady=MENU_PADDING_1) + + manual_downloads_menu_Frame = self.menu_FRAME_SET(manual_downloads_menu) + manual_downloads_menu_Frame.grid(row=0) + + manual_downloads_link_Frame = self.menu_FRAME_SET(manual_downloads_menu, thickness=5) + manual_downloads_link_Frame.grid(row=1) + + manual_downloads_menu_title_Label = self.menu_title_LABEL_SET(manual_downloads_menu_Frame, MANUAL_DOWNLOADS_TEXT, width=45) + manual_downloads_menu_title_Label.grid(row=0,column=0,padx=0,pady=MENU_PADDING_3) + + manual_downloads_menu_select_Label = self.menu_sub_LABEL_SET(manual_downloads_menu_Frame, SELECT_MODEL_TEXT) + manual_downloads_menu_select_Label.grid(row=1,column=0,padx=0,pady=MENU_PADDING_1) + + manual_downloads_menu_select_Option = ttk.OptionMenu(manual_downloads_menu_Frame, model_selection_var) + manual_downloads_menu_select_VR_Option = tk.Menu(manual_downloads_menu_select_Option['menu']) + manual_downloads_menu_select_MDX_Option = tk.Menu(manual_downloads_menu_select_Option['menu']) + manual_downloads_menu_select_DEMUCS_Option = tk.Menu(manual_downloads_menu_select_Option['menu']) + manual_downloads_menu_select_Option['menu'].add_cascade(label='VR Models', menu= manual_downloads_menu_select_VR_Option) + manual_downloads_menu_select_Option['menu'].add_cascade(label='MDX-Net Models', menu= manual_downloads_menu_select_MDX_Option) + manual_downloads_menu_select_Option['menu'].add_cascade(label='Demucs Models', menu= manual_downloads_menu_select_DEMUCS_Option) + + for model_selection_vr in vr_download_list.keys(): + if not os.path.isfile(os.path.join(VR_MODELS_DIR, vr_download_list[model_selection_vr])): + manual_downloads_menu_select_VR_Option.add_radiobutton(label=model_selection_vr, variable=model_selection_var, command=get_links) + + for model_selection_mdx in mdx_download_list.keys(): + + model_name = mdx_download_list[model_selection_mdx] + + if isinstance(model_name, dict): + items_list = list(model_name.items()) + model_name, config = items_list[0] + config_link = f"{MDX23_CONFIG_CHECKS}{config}" + config_local = os.path.join(MDX_C_CONFIG_PATH, config) + if not os.path.isfile(config_local): + try: + with urllib.request.urlopen(config_link) as response: + with open(config_local, 'wb') as out_file: + out_file.write(response.read()) + except Exception as e: + model_name = None + + #print(model_name) + + if model_name: + if not os.path.isfile(os.path.join(MDX_MODELS_DIR, model_name)): + manual_downloads_menu_select_MDX_Option.add_radiobutton(label=model_selection_mdx, variable=model_selection_var, command=get_links) + + for model_selection_demucs in demucs_download_list.keys(): + manual_downloads_menu_select_DEMUCS_Option.add_radiobutton(label=model_selection_demucs, variable=model_selection_var, command=get_links) + + manual_downloads_menu_select_Option.grid(row=2,column=0,padx=0,pady=MENU_PADDING_1) + + self.menu_placement(manual_downloads_menu, MANUAL_DOWNLOADS_TEXT, pop_up=True, close_function=lambda:manual_downloads_menu.destroy()) + + def invalid_tooltip(self, widget, pattern=None): + tooltip = ToolTip(widget) + invalid_message = lambda:tooltip.showtip(INVALID_INPUT_E, True) + + def invalid_message_(): + tooltip.showtip(INVALID_INPUT_E, True) + + def validation(value): + if re.fullmatch(modified_pattern, value) is None: + return False + else: + return True + + if not pattern: + pattern = r'^[a-zA-Z0-9 -]{0,25}$' + + modified_pattern = f"({pattern}|)" + + widget.configure( + validate='key', + validatecommand=(self.register(validation), '%P'), + invalidcommand=(self.register(invalid_message)) + ) + + return invalid_message_ + + def pop_up_save_current_settings(self): + """Save current application settings as...""" + + settings_save = tk.Toplevel(root) + + settings_save_var = tk.StringVar(value='') + + settings_save_Frame = self.menu_FRAME_SET(settings_save) + settings_save_Frame.grid(row=1) + + save_func = lambda:(self.pop_up_save_current_settings_sub_json_dump(settings_save_var.get()), settings_save.destroy()) + validation = lambda value:False if re.fullmatch(REG_SAVE_INPUT, value) is None else True + + settings_save_title = self.menu_title_LABEL_SET(settings_save_Frame, SAVE_CURRENT_SETTINGS_TEXT) + settings_save_title.grid() + + settings_save_name_Label = self.menu_sub_LABEL_SET(settings_save_Frame, NAME_SETTINGS_TEXT) + settings_save_name_Label.grid(pady=MENU_PADDING_1) + settings_save_name_Entry = ttk.Entry(settings_save_Frame, textvariable=settings_save_var, justify='center', width=25) + settings_save_name_Entry.grid(pady=MENU_PADDING_1) + invalid_message = self.invalid_tooltip(settings_save_name_Entry) + settings_save_name_Entry.bind(right_click_button, self.right_click_menu_popup) + self.current_text_box = settings_save_name_Entry + settings_save_name_Entry.focus_set() + + self.spacer_label(settings_save_Frame) + + entry_rules_Label = tk.Label(settings_save_Frame, text=ENSEMBLE_INPUT_RULE, font=(MAIN_FONT_NAME, f"{FONT_SIZE_1}"), foreground='#868687', justify="left") + entry_rules_Label.grid() + + settings_save_Button = ttk.Button(settings_save_Frame, text=SAVE_TEXT, command=lambda:save_func() if validation(settings_save_var.get()) else invalid_message()) + settings_save_Button.grid(pady=MENU_PADDING_1) + + stop_process_Button = ttk.Button(settings_save_Frame, text=CANCEL_TEXT, command=lambda:settings_save.destroy()) + stop_process_Button.grid(pady=MENU_PADDING_1) + + self.menu_placement(settings_save, SAVE_CURRENT_SETTINGS_TEXT, pop_up=True) + + def pop_up_save_current_settings_sub_json_dump(self, settings_save_name: str): + """Dumps current application settings to a json named after user input""" + + if settings_save_name: + self.save_current_settings_var.set(settings_save_name) + settings_save_name = settings_save_name.replace(" ", "_") + current_settings = self.save_values(app_close=False) + + saved_data_dump = json.dumps(current_settings, indent=4) + with open(os.path.join(SETTINGS_CACHE_DIR, f'{settings_save_name}.json'), "w") as outfile: + outfile.write(saved_data_dump) + + def pop_up_update_confirmation(self): + """Ask user is they want to update""" + + is_new_update = self.online_data_refresh(confirmation_box=True) + is_download_in_app_var = tk.BooleanVar(value=False) + + def update_type(): + if is_download_in_app_var.get(): + self.download_item(is_update_app=True) + else: + webbrowser.open_new_tab(self.download_update_link_var.get()) + + update_confirmation_win.destroy() + + if is_new_update: + + update_confirmation_win = tk.Toplevel() + + update_confirmation_Frame = self.menu_FRAME_SET(update_confirmation_win) + update_confirmation_Frame.grid(row=0) + + update_found_label = self.menu_title_LABEL_SET(update_confirmation_Frame, UPDATE_FOUND_TEXT, width=15) + update_found_label.grid(row=0,column=0,padx=0,pady=MENU_PADDING_2) + + confirm_update_label = self.menu_sub_LABEL_SET(update_confirmation_Frame, UPDATE_CONFIRMATION_TEXT, font_size=FONT_SIZE_3) + confirm_update_label.grid(row=1,column=0,padx=0,pady=MENU_PADDING_1) + + yes_button = ttk.Button(update_confirmation_Frame, text=YES_TEXT, command=update_type) + yes_button.grid(row=2,column=0,padx=0,pady=MENU_PADDING_1) + + no_button = ttk.Button(update_confirmation_Frame, text=NO_TEXT, command=lambda:(update_confirmation_win.destroy())) + no_button.grid(row=3,column=0,padx=0,pady=MENU_PADDING_1) + + if is_windows: + download_outside_application_button = ttk.Checkbutton(update_confirmation_Frame, variable=is_download_in_app_var, text='Download Update in Application') + download_outside_application_button.grid(row=4,column=0,padx=0,pady=MENU_PADDING_1) + + self.menu_placement(update_confirmation_win, CONFIRM_UPDATE_TEXT, pop_up=True) + + def pop_up_user_code_input(self): + """Input VIP Code""" + + self.user_code_validation_var.set('') + + self.user_code = tk.Toplevel() + + user_code_Frame = self.menu_FRAME_SET(self.user_code) + user_code_Frame.grid(row=0) + + user_code_title_Label = self.menu_title_LABEL_SET(user_code_Frame, USER_DOWNLOAD_CODES_TEXT, width=20) + user_code_title_Label.grid(row=0,column=0,padx=0,pady=MENU_PADDING_1) + + user_code_Label = self.menu_sub_LABEL_SET(user_code_Frame, DOWNLOAD_CODE_TEXT) + user_code_Label.grid(pady=MENU_PADDING_1) + + self.user_code_Entry = ttk.Entry(user_code_Frame, textvariable=self.user_code_var, justify='center') + self.user_code_Entry.grid(pady=MENU_PADDING_1) + self.user_code_Entry.bind(right_click_button, self.right_click_menu_popup) + self.current_text_box = self.user_code_Entry + + tooltip = ToolTip(self.user_code_Entry) + def invalid_message_(text, is_success_message): + tooltip.hidetip() + tooltip.showtip(text, True, is_success_message) + + self.spacer_label(user_code_Frame) + + user_code_confrim_Button = ttk.Button(user_code_Frame, text=CONFIRM_TEXT, command=lambda:self.download_validate_code(confirm=True, code_message=invalid_message_)) + user_code_confrim_Button.grid(pady=MENU_PADDING_1) + + user_code_cancel_Button = ttk.Button(user_code_Frame, text=CANCEL_TEXT, command=lambda:self.user_code.destroy()) + user_code_cancel_Button.grid(pady=MENU_PADDING_1) + + support_title_Label = self.menu_title_LABEL_SET(user_code_Frame, text=SUPPORT_UVR_TEXT, width=20) + support_title_Label.grid(pady=MENU_PADDING_1) + + support_sub_Label = tk.Label(user_code_Frame, text=GET_DL_VIP_CODE_TEXT, font=(MAIN_FONT_NAME, f"{FONT_SIZE_1}"), foreground=FG_COLOR) + support_sub_Label.grid(pady=MENU_PADDING_1) + + uvr_patreon_Button = ttk.Button(user_code_Frame, text=UVR_PATREON_LINK_TEXT, command=lambda:webbrowser.open_new_tab(DONATE_LINK_PATREON)) + uvr_patreon_Button.grid(pady=MENU_PADDING_1) + + bmac_patreon_Button=ttk.Button(user_code_Frame, text=BMAC_UVR_TEXT, command=lambda:webbrowser.open_new_tab(DONATE_LINK_BMAC)) + bmac_patreon_Button.grid(pady=MENU_PADDING_1) + + self.menu_placement(self.user_code, INPUT_CODE_TEXT, pop_up=True) + + def pop_up_change_model_defaults(self, top_window): + """ + Change model defaults... + """ + + def message_box_(text, is_success_message): + tooltip.hidetip() + tooltip.showtip(text, True, is_success_message) + + def delete_entry(): + model_data = self.assemble_model_data(model=change_model_defaults_var.get(), arch_type=ENSEMBLE_CHECK, is_change_def=True, is_get_hash_dir_only=True)[0] + hash_file = model_data.model_hash_dir + if hash_file: + if os.path.isfile(hash_file): + os.remove(hash_file) + message_box_("Defined Parameters Deleted", True) + else: + message_box_("No Defined Parameters Found", False) + + self.update_checkbox_text() + + def change_default(): + model_data = self.assemble_model_data(model=change_model_defaults_var.get(), arch_type=ENSEMBLE_CHECK, is_change_def=True)[0] + if model_data.model_status: + message_box_("Model Parameters Changed", True) + self.update_checkbox_text() + + change_model_defaults = tk.Toplevel(root) + change_model_defaults_var = tk.StringVar(value=NO_MODEL) + + default_change_model_list = list(self.default_change_model_list) + default_change_model_list.insert(0, NO_MODEL) + + change_model_defaults_Frame = self.menu_FRAME_SET(change_model_defaults) + change_model_defaults_Frame.grid(row=1) + + change_model_defaults_title = self.menu_title_LABEL_SET(change_model_defaults_Frame, CHANGE_MODEL_DEFAULT_TEXT) + change_model_defaults_title.grid() + + model_param_Label = self.menu_sub_LABEL_SET(change_model_defaults_Frame, SELECT_MODEL_TEXT) + model_param_Label.grid(pady=MENU_PADDING_1) + model_param_Option = ComboBoxMenu(change_model_defaults_Frame, dropdown_name='changemodeldefault', textvariable=change_model_defaults_var, values=default_change_model_list, offset=310, width=READ_ONLY_COMBO_WIDTH) + model_param_Option.grid(pady=MENU_PADDING_1) + tooltip = ToolTip(model_param_Option) + + self.spacer_label(change_model_defaults_Frame) + + change_params_Button = ttk.Button(change_model_defaults_Frame, text=CHANGE_PARAMETERS_TEXT, command=change_default, width=20) + change_params_Button.grid(pady=MENU_PADDING_1) + + delete_params_Button = ttk.Button(change_model_defaults_Frame, text=DELETE_PARAMETERS_TEXT, command=delete_entry, width=20) + delete_params_Button.grid(pady=MENU_PADDING_1) + + cancel_Button = ttk.Button(change_model_defaults_Frame, text=CANCEL_TEXT, command=lambda:change_model_defaults.destroy()) + cancel_Button.grid(pady=MENU_PADDING_1) + + self.menu_placement(change_model_defaults, CHANGE_MODEL_DEFAULT_TEXT, top_window=top_window) + + def pop_up_set_vocal_splitter(self, top_window): + """ + Set vocal splitter + """ + + try: + set_vocal_splitter = tk.Toplevel(root) + + model_list = self.assemble_model_data(arch_type=KARAOKEE_CHECK, is_dry_check=True) + if not model_list: + self.set_vocal_splitter_var.set(NO_MODEL) + model_list.insert(0, NO_MODEL) + + enable_voc_split_model = lambda:(model_select_Option.configure(state=READ_ONLY), save_inst_Button.configure(state=tk.NORMAL)) + disable_voc_split_model = lambda:(model_select_Option.configure(state=tk.DISABLED), save_inst_Button.configure(state=tk.DISABLED), self.is_save_inst_set_vocal_splitter_var.set(False)) + voc_split_model_toggle = lambda:enable_voc_split_model() if self.is_set_vocal_splitter_var.get() else disable_voc_split_model() + + enable_deverb_opt = lambda:(deverb_vocals_Option.configure(state=READ_ONLY)) + disable_deverb_opt= lambda:(deverb_vocals_Option.configure(state=tk.DISABLED)) + deverb_opt_toggle = lambda:enable_deverb_opt() if self.is_deverb_vocals_var.get() else disable_deverb_opt() + + set_vocal_splitter_Frame = self.menu_FRAME_SET(set_vocal_splitter) + set_vocal_splitter_Frame.grid(row=1) + + set_vocal_splitter_title = self.menu_title_LABEL_SET(set_vocal_splitter_Frame, VOCAL_SPLIT_MODE_OPTIONS_TEXT) + set_vocal_splitter_title.grid(pady=MENU_PADDING_2) + + model_select_Label = self.menu_sub_LABEL_SET(set_vocal_splitter_Frame, SELECT_MODEL_TEXT) + model_select_Label.grid(pady=MENU_PADDING_1) + model_select_Option = ComboBoxMenu(set_vocal_splitter_Frame, dropdown_name='setvocalsplit', textvariable=self.set_vocal_splitter_var, values=model_list, offset=310, width=READ_ONLY_COMBO_WIDTH) + model_select_Option.grid(pady=7) + self.help_hints(model_select_Option, text=VOC_SPLIT_MODEL_SELECT_HELP)# + + save_inst_Button = ttk.Checkbutton(set_vocal_splitter_Frame, text=SAVE_SPLIT_VOCAL_INSTRUMENTALS_TEXT, variable=self.is_save_inst_set_vocal_splitter_var, width=SET_VOC_SPLIT_CHECK_WIDTH, command=voc_split_model_toggle) + save_inst_Button.grid()# + self.help_hints(save_inst_Button, text=IS_VOC_SPLIT_INST_SAVE_SELECT_HELP)# + + change_params_Button = ttk.Checkbutton(set_vocal_splitter_Frame, text=ENABLE_VOCAL_SPLIT_MODE_TEXT, variable=self.is_set_vocal_splitter_var, width=SET_VOC_SPLIT_CHECK_WIDTH, command=voc_split_model_toggle) + change_params_Button.grid()# + self.help_hints(change_params_Button, text=IS_VOC_SPLIT_MODEL_SELECT_HELP)# + + set_vocal_splitter_title = self.menu_title_LABEL_SET(set_vocal_splitter_Frame, VOCAL_DEVERB_OPTIONS_TEXT) + set_vocal_splitter_title.grid(pady=MENU_PADDING_2) + + deverb_vocals_Label = self.menu_sub_LABEL_SET(set_vocal_splitter_Frame, 'Select Vocal Type to Deverb') + deverb_vocals_Label.grid(pady=MENU_PADDING_1) + deverb_vocals_Option = ComboBoxMenu(set_vocal_splitter_Frame, dropdown_name='setvocaldeverb', textvariable=self.deverb_vocal_opt_var, values=list(DEVERB_MAPPER.keys()), width=23) + deverb_vocals_Option.grid(pady=7) + self.help_hints(deverb_vocals_Option, text=IS_DEVERB_OPT_HELP)# + + is_deverb_vocals_Option = ttk.Checkbutton(set_vocal_splitter_Frame, text=DEVERB_VOCALS_TEXT, width=15 if is_windows else 11, variable=self.is_deverb_vocals_var, command=deverb_opt_toggle) + is_deverb_vocals_Option.grid(pady=0) + self.help_hints(is_deverb_vocals_Option, text=IS_DEVERB_VOC_HELP)# + + if not os.path.isfile(DEVERBER_MODEL_PATH): + self.is_deverb_vocals_var.set(False) + is_deverb_vocals_Option.configure(state=tk.DISABLED) + disable_deverb_opt() + + cancel_Button = ttk.Button(set_vocal_splitter_Frame, text=CLOSE_WINDOW, command=lambda:set_vocal_splitter.destroy(), width=16) + cancel_Button.grid(pady=MENU_PADDING_3) + + voc_split_model_toggle() + deverb_opt_toggle() + + self.menu_placement(set_vocal_splitter, VOCAL_SPLIT_OPTIONS_TEXT, top_window=top_window, pop_up=True) + except Exception as e: + error_name = f'{type(e).__name__}' + traceback_text = ''.join(traceback.format_tb(e.__traceback__)) + message = f'{error_name}: "{e}"\n{traceback_text}"' + self.error_log_var.set(message) + + def pop_up_mdx_model(self, mdx_model_hash, model_path): + """Opens MDX-Net model settings""" + + is_compatible_model = True + is_ckpt = False + primary_stem = VOCAL_STEM + + try: + if model_path.endswith(ONNX): + model = onnx.load(model_path) + model_shapes = [[d.dim_value for d in _input.type.tensor_type.shape.dim] for _input in model.graph.input][0] + dim_f = model_shapes[2] + dim_t = int(math.log(model_shapes[3], 2)) + n_fft = '6144' + + if model_path.endswith(CKPT): + is_ckpt = True + model_params = torch.load(model_path, map_location=lambda storage, loc: storage) + model_params = model_params['hyper_parameters'] + dim_f = model_params['dim_f'] + dim_t = int(math.log(model_params['dim_t'], 2)) + n_fft = model_params['n_fft'] + + for stem in STEM_SET_MENU: + if model_params['target_name'] == stem.lower(): + primary_stem = INST_STEM if model_params['target_name'] == OTHER_STEM.lower() else stem + + except Exception as e: + error_name = f'{type(e).__name__}' + traceback_text = ''.join(traceback.format_tb(e.__traceback__)) + message = f'{error_name}: "{e}"\n{traceback_text}"' + #self.error_log_var.set(message) + is_compatible_model = False + if is_ckpt: + self.pop_up_mdx_c_param(mdx_model_hash) + else: + dim_f = 0 + dim_t = 0 + self.error_dialoge(INVALID_ONNX_MODEL_ERROR) + self.error_log_var.set("{}".format(error_text('MDX-Net Model Settings', e))) + self.mdx_model_params = None + + if is_compatible_model: + mdx_model_set = tk.Toplevel(root) + mdx_n_fft_scale_set_var = tk.StringVar(value=n_fft) + mdx_dim_f_set_var = tk.StringVar(value=dim_f) + mdx_dim_t_set_var = tk.StringVar(value=dim_t) + primary_stem_var = tk.StringVar(value=primary_stem) + mdx_compensate_var = tk.StringVar(value=1.035) + + balance_value_var = tk.StringVar(value=0) + is_kara_model_var = tk.BooleanVar(value=False) + is_bv_model_var = tk.BooleanVar(value=False) + + def toggle_kara(): + if is_kara_model_var.get(): + is_bv_model_var.set(False) + balance_value_Option.configure(state=tk.DISABLED) + + def toggle_bv(): + if is_bv_model_var.get(): + is_kara_model_var.set(False) + balance_value_Option.configure(state=READ_ONLY) + else: + balance_value_Option.configure(state=tk.DISABLED) + + def opt_menu_selection(selection): + if not selection in [VOCAL_STEM, INST_STEM]: + balance_value_Option.configure(state=tk.DISABLED) + is_kara_model_Option.configure(state=tk.DISABLED) + is_bv_model_Option.configure(state=tk.DISABLED) + is_kara_model_var.set(False) + is_bv_model_var.set(False) + balance_value_var.set(0) + else: + is_kara_model_Option.configure(state=tk.NORMAL) + is_bv_model_Option.configure(state=tk.NORMAL) + + mdx_model_set_Frame = self.menu_FRAME_SET(mdx_model_set) + mdx_model_set_Frame.grid(row=2) + + mdx_model_set_title = self.menu_title_LABEL_SET(mdx_model_set_Frame, SPECIFY_MDX_NET_MODEL_PARAMETERS_TEXT) + mdx_model_set_title.grid(pady=MENU_PADDING_3) + + set_stem_name_Label = self.menu_sub_LABEL_SET(mdx_model_set_Frame, PRIMARY_STEM_TEXT) + set_stem_name_Label.grid(pady=MENU_PADDING_1) + set_stem_name_Option = ttk.OptionMenu(mdx_model_set_Frame, primary_stem_var, None, *STEM_SET_MENU, command=opt_menu_selection) + set_stem_name_Option.configure(width=15) + set_stem_name_Option.grid(pady=MENU_PADDING_1) + set_stem_name_Option['menu'].insert_separator(len(STEM_SET_MENU)) + set_stem_name_Option['menu'].add_radiobutton(label=INPUT_STEM_NAME, command=tk._setit(primary_stem_var, INPUT_STEM_NAME, lambda e:self.pop_up_input_stem_name(primary_stem_var, mdx_model_set))) + self.help_hints(set_stem_name_Label, text=SET_STEM_NAME_HELP) + + is_kara_model_Option = ttk.Checkbutton(mdx_model_set_Frame, text=KARAOKE_MODEL_TEXT, width=SET_MENUS_CHECK_WIDTH, variable=is_kara_model_var, command=toggle_kara) + is_kara_model_Option.grid(pady=0) + + is_bv_model_Option = ttk.Checkbutton(mdx_model_set_Frame, text=BV_MODEL_TEXT, width=SET_MENUS_CHECK_WIDTH, variable=is_bv_model_var, command=toggle_bv) + is_bv_model_Option.grid(pady=0) + + balance_value_Label = self.menu_sub_LABEL_SET(mdx_model_set_Frame, BALANCE_VALUE_TEXT) + balance_value_Label.grid(pady=MENU_PADDING_1) + balance_value_Option = ComboBoxMenu(mdx_model_set_Frame, textvariable=balance_value_var, values=BALANCE_VALUES, width=COMBO_WIDTH) + balance_value_Option.configure(state=tk.DISABLED) + balance_value_Option.grid(pady=MENU_PADDING_1) + #self.help_hints(balance_value_Label, text=balance_value_HELP) + + mdx_dim_t_set_Label = self.menu_sub_LABEL_SET(mdx_model_set_Frame, 'Dim_t') + mdx_dim_t_set_Label.grid(pady=MENU_PADDING_1) + mdx_dim_f_set_Label = self.menu_sub_LABEL_SET(mdx_model_set_Frame, MDX_MENU_WAR_TEXT) + mdx_dim_f_set_Label.grid(pady=MENU_PADDING_1) + mdx_dim_t_set_Option = ComboBoxEditableMenu(mdx_model_set_Frame, values=('7', '8'), textvariable=mdx_dim_t_set_var, pattern=REG_SHIFTS, default=mdx_dim_t_set_var.get(), width=COMBO_WIDTH, is_stay_disabled=is_ckpt) + mdx_dim_t_set_Option.grid(pady=MENU_PADDING_1) + self.help_hints(mdx_dim_t_set_Label, text=MDX_DIM_T_SET_HELP) + + mdx_dim_f_set_Label = self.menu_sub_LABEL_SET(mdx_model_set_Frame, 'Dim_f') + mdx_dim_f_set_Label.grid(pady=MENU_PADDING_1) + mdx_dim_f_set_Label = self.menu_sub_LABEL_SET(mdx_model_set_Frame, MDX_MENU_WAR_TEXT) + mdx_dim_f_set_Label.grid(pady=MENU_PADDING_1) + mdx_dim_f_set_Option = ComboBoxEditableMenu(mdx_model_set_Frame, values=(MDX_POP_DIMF), textvariable=mdx_dim_f_set_var, pattern=REG_SHIFTS, default=mdx_dim_f_set_var.get(), width=COMBO_WIDTH, is_stay_disabled=is_ckpt) + mdx_dim_f_set_Option.grid(pady=MENU_PADDING_1) + self.help_hints(mdx_dim_f_set_Label, text=MDX_DIM_F_SET_HELP) + + mdx_n_fft_scale_set_Label = self.menu_sub_LABEL_SET(mdx_model_set_Frame, 'N_FFT Scale') + mdx_n_fft_scale_set_Label.grid(pady=MENU_PADDING_1) + mdx_n_fft_scale_set_Option = ComboBoxEditableMenu(mdx_model_set_Frame, values=(MDX_POP_NFFT), textvariable=mdx_n_fft_scale_set_var, pattern=REG_SHIFTS, default=mdx_n_fft_scale_set_var.get(), width=COMBO_WIDTH, is_stay_disabled=is_ckpt) + mdx_n_fft_scale_set_Option.grid(pady=MENU_PADDING_1) + self.help_hints(mdx_n_fft_scale_set_Label, text=MDX_N_FFT_SCALE_SET_HELP) + + mdx_compensate_Label = self.menu_sub_LABEL_SET(mdx_model_set_Frame, VOLUME_COMPENSATION_TEXT) + mdx_compensate_Label.grid(pady=MENU_PADDING_1) + mdx_compensate_Entry = ComboBoxEditableMenu(mdx_model_set_Frame, values=('1.035', '1.08'), textvariable=mdx_compensate_var, pattern=REG_VOL_COMP, default=mdx_compensate_var.get(), width=COMBO_WIDTH) + mdx_compensate_Entry.grid(pady=MENU_PADDING_1) + self.help_hints(mdx_compensate_Label, text=POPUP_COMPENSATE_HELP) + + mdx_param_set_Button = ttk.Button(mdx_model_set_Frame, text=CONFIRM_TEXT, command=lambda:pull_data()) + mdx_param_set_Button.grid(pady=MENU_PADDING_2) + + stop_process_Button = ttk.Button(mdx_model_set_Frame, text=CANCEL_TEXT, command=lambda:cancel()) + stop_process_Button.grid(pady=0) + + if is_ckpt: + mdx_dim_t_set_Option.configure(state=tk.DISABLED) + mdx_dim_f_set_Option.configure(state=tk.DISABLED) + mdx_n_fft_scale_set_Option.configure(state=tk.DISABLED) + + def pull_data(): + mdx_model_params = { + 'compensate': float(mdx_compensate_var.get()), + 'mdx_dim_f_set': int(mdx_dim_f_set_var.get()), + 'mdx_dim_t_set': int(mdx_dim_t_set_var.get()), + 'mdx_n_fft_scale_set': int(mdx_n_fft_scale_set_var.get()), + 'primary_stem': primary_stem_var.get(), + IS_KARAOKEE: bool(is_kara_model_var.get()), + IS_BV_MODEL: bool(is_bv_model_var.get()), + IS_BV_MODEL_REBAL: float(balance_value_var.get()) + } + + self.pop_up_mdx_model_sub_json_dump(mdx_model_params, mdx_model_hash) + mdx_model_set.destroy() + + def cancel(): + mdx_model_set.destroy() + + mdx_model_set.protocol("WM_DELETE_WINDOW", cancel) + + frame_list = [mdx_model_set_Frame] + opt_menu_selection(primary_stem_var.get()) + self.menu_placement(mdx_model_set, SPECIFY_PARAMETERS_TEXT, pop_up=False if is_macos else True, frame_list=frame_list) + + def pop_up_mdx_model_sub_json_dump(self, mdx_model_params, mdx_model_hash): + """Dumps current selected MDX-Net model settings to a json named after model hash""" + + self.mdx_model_params = mdx_model_params + + mdx_model_params_dump = json.dumps(mdx_model_params, indent=4) + with open(os.path.join(MDX_HASH_DIR, f'{mdx_model_hash}.json'), "w") as outfile: + outfile.write(mdx_model_params_dump) + + def pop_up_mdx_c_param(self, mdx_model_hash): + """Opens MDX-C param settings""" + + mdx_c_param_menu = tk.Toplevel() + + get_mdx_c_params = lambda dir, ext:tuple(os.path.splitext(x)[0] for x in os.listdir(dir) if x.endswith(ext)) + new_mdx_c_params = get_mdx_c_params(MDX_C_CONFIG_PATH, YAML) + mdx_c_model_param_var = tk.StringVar(value=NONE_SELECTED) + + def pull_data(): + mdx_c_model_params = { + 'config_yaml': f"{mdx_c_model_param_var.get()}{YAML}"} + + if not mdx_c_model_param_var.get() == NONE_SELECTED: + self.pop_up_mdx_model_sub_json_dump(mdx_c_model_params, mdx_model_hash) + mdx_c_param_menu.destroy() + else: + self.mdx_model_params = None + + def cancel(): + self.mdx_model_params = None + mdx_c_param_menu.destroy() + + mdx_c_param_Frame = self.menu_FRAME_SET(mdx_c_param_menu) + mdx_c_param_Frame.grid(row=0) + + mdx_c_param_title_title = self.menu_title_LABEL_SET(mdx_c_param_Frame, MDXNET_C_MODEL_PARAMETERS_TEXT, width=28) + mdx_c_param_title_title.grid(row=0,column=0,padx=0,pady=0) + + mdx_c_model_param_Label = self.menu_sub_LABEL_SET(mdx_c_param_Frame, SELECT_MODEL_PARAM_TEXT) + mdx_c_model_param_Label.grid(pady=MENU_PADDING_1) + mdx_c_model_param_Option = ComboBoxMenu(mdx_c_param_Frame, textvariable=mdx_c_model_param_var, values=new_mdx_c_params, width=30) + mdx_c_model_param_Option.grid(padx=20,pady=MENU_PADDING_1) + self.help_hints(mdx_c_model_param_Label, text=VR_MODEL_PARAM_HELP) + + mdx_c_param_confrim_Button = ttk.Button(mdx_c_param_Frame, text=CONFIRM_TEXT, command=lambda:pull_data()) + mdx_c_param_confrim_Button.grid(pady=MENU_PADDING_1) + + mdx_c_param_cancel_Button = ttk.Button(mdx_c_param_Frame, text=CANCEL_TEXT, command=cancel) + mdx_c_param_cancel_Button.grid(pady=MENU_PADDING_1) + + mdx_c_param_menu.protocol("WM_DELETE_WINDOW", cancel) + + self.menu_placement(mdx_c_param_menu, CHOOSE_MODEL_PARAM_TEXT, pop_up=True) + + def pop_up_vr_param(self, vr_model_hash): + """Opens VR param settings""" + + vr_param_menu = tk.Toplevel() + + get_vr_params = lambda dir, ext:tuple(os.path.splitext(x)[0] for x in os.listdir(dir) if x.endswith(ext)) + new_vr_params = get_vr_params(VR_PARAM_DIR, JSON) + vr_model_param_var = tk.StringVar(value=NONE_SELECTED) + vr_model_stem_var = tk.StringVar(value='Vocals') + vr_model_nout_var = tk.StringVar(value=32) + vr_model_nout_lstm_var = tk.StringVar(value=128) + is_new_vr_model_var = tk.BooleanVar(value=False) + balance_value_var = tk.StringVar(value=0) + is_kara_model_var = tk.BooleanVar(value=False) + is_bv_model_var = tk.BooleanVar(value=False) + + enable_new_vr_op = lambda:(vr_model_nout_Option.configure(state=READ_ONLY), vr_model_nout_lstm_Option.configure(state=READ_ONLY)) + disable_new_vr_op = lambda:(vr_model_nout_Option.configure(state=tk.DISABLED), vr_model_nout_lstm_Option.configure(state=tk.DISABLED)) + vr_new_toggle = lambda:enable_new_vr_op() if is_new_vr_model_var.get() else disable_new_vr_op() + + def pull_data(): + if is_new_vr_model_var.get(): + vr_model_params = { + 'vr_model_param': vr_model_param_var.get(), + 'primary_stem': vr_model_stem_var.get(), + 'nout': int(vr_model_nout_var.get()), + 'nout_lstm': int(vr_model_nout_lstm_var.get()), + IS_KARAOKEE: bool(is_kara_model_var.get()), + IS_BV_MODEL: bool(is_bv_model_var.get()), + IS_BV_MODEL_REBAL: float(balance_value_var.get()) + } + else: + vr_model_params = { + 'vr_model_param': vr_model_param_var.get(), + 'primary_stem': vr_model_stem_var.get(), + IS_KARAOKEE: bool(is_kara_model_var.get()), + IS_BV_MODEL: bool(is_bv_model_var.get()), + IS_BV_MODEL_REBAL: float(balance_value_var.get())} + + if not vr_model_param_var.get() == NONE_SELECTED: + self.pop_up_vr_param_sub_json_dump(vr_model_params, vr_model_hash) + vr_param_menu.destroy() + else: + self.vr_model_params = None + self.error_dialoge(INVALID_PARAM_MODEL_ERROR) + + def cancel(): + self.vr_model_params = None + vr_param_menu.destroy() + + def toggle_kara(): + if is_kara_model_var.get(): + is_bv_model_var.set(False) + balance_value_Option.configure(state=tk.DISABLED) + + def toggle_bv(): + if is_bv_model_var.get(): + is_kara_model_var.set(False) + balance_value_Option.configure(state=READ_ONLY) + else: + balance_value_Option.configure(state=tk.DISABLED) + + def opt_menu_selection(selection): + if not selection in [VOCAL_STEM, INST_STEM]: + balance_value_Option.configure(state=tk.DISABLED) + is_kara_model_Option.configure(state=tk.DISABLED) + is_bv_model_Option.configure(state=tk.DISABLED) + is_kara_model_var.set(False) + is_bv_model_var.set(False) + balance_value_var.set(0) + else: + is_kara_model_Option.configure(state=tk.NORMAL) + is_bv_model_Option.configure(state=tk.NORMAL) + + vr_param_Frame = self.menu_FRAME_SET(vr_param_menu) + vr_param_Frame.grid(row=0, padx=20) + + vr_param_title_title = self.menu_title_LABEL_SET(vr_param_Frame, SPECIFY_VR_MODEL_PARAMETERS_TEXT) + vr_param_title_title.grid() + + vr_model_stem_Label = self.menu_sub_LABEL_SET(vr_param_Frame, PRIMARY_STEM_TEXT) + vr_model_stem_Label.grid(pady=MENU_PADDING_1) + vr_model_stem_Option = ttk.OptionMenu(vr_param_Frame, vr_model_stem_var, None, *STEM_SET_MENU, command=opt_menu_selection) + vr_model_stem_Option.configure(width=15) + vr_model_stem_Option.grid(pady=MENU_PADDING_1) + vr_model_stem_Option['menu'].insert_separator(len(STEM_SET_MENU)) + vr_model_stem_Option['menu'].add_radiobutton(label=INPUT_STEM_NAME, command=tk._setit(vr_model_stem_var, INPUT_STEM_NAME, lambda e:self.pop_up_input_stem_name(vr_model_stem_var, vr_param_menu))) + self.help_hints(vr_model_stem_Label, text=SET_STEM_NAME_HELP) + + is_kara_model_Option = ttk.Checkbutton(vr_param_Frame, text=KARAOKE_MODEL_TEXT, width=SET_MENUS_CHECK_WIDTH, variable=is_kara_model_var, command=toggle_kara) + is_kara_model_Option.grid(pady=0) + + is_bv_model_Option = ttk.Checkbutton(vr_param_Frame, text=BV_MODEL_TEXT, width=SET_MENUS_CHECK_WIDTH, variable=is_bv_model_var, command=toggle_bv) + is_bv_model_Option.grid(pady=0) + + balance_value_Label = self.menu_sub_LABEL_SET(vr_param_Frame, BALANCE_VALUE_TEXT) + balance_value_Label.grid(pady=MENU_PADDING_1) + balance_value_Option = ComboBoxMenu(vr_param_Frame, textvariable=balance_value_var, values=BALANCE_VALUES, width=COMBO_WIDTH) + balance_value_Option.configure(state=tk.DISABLED) + balance_value_Option.grid(pady=MENU_PADDING_1) + + is_new_vr_model_Option = ttk.Checkbutton(vr_param_Frame, text=VR_51_MODEL_TEXT, width=SET_MENUS_CHECK_WIDTH, variable=is_new_vr_model_var, command=vr_new_toggle) + is_new_vr_model_Option.grid(pady=MENU_PADDING_1) + + vr_model_nout_Label = self.menu_sub_LABEL_SET(vr_param_Frame, 'Out Channels') + vr_model_nout_Label.grid(pady=MENU_PADDING_1) + vr_model_nout_Option = ComboBoxEditableMenu(vr_param_Frame, values=NOUT_SEL, textvariable=vr_model_nout_var, pattern=REG_SHIFTS, default='32', width=COMBO_WIDTH) + vr_model_nout_Option.grid(pady=MENU_PADDING_1) + #self.help_hints(vr_model_nout_Label, text=VR_MODEL_NOUT_HELP) + + vr_model_nout_lstm_Label = self.menu_sub_LABEL_SET(vr_param_Frame, 'Out Channels (LSTM layer)') + vr_model_nout_lstm_Label.grid(pady=MENU_PADDING_1) + vr_model_nout_lstm_Option = ComboBoxEditableMenu(vr_param_Frame, values=NOUT_LSTM_SEL, textvariable=vr_model_nout_lstm_var, pattern=REG_SHIFTS, default='128', width=COMBO_WIDTH)# + vr_model_nout_lstm_Option.grid(pady=MENU_PADDING_1) + #self.help_hints(vr_model_param_Label, text=VR_MODEL_NOUT_LSTM_HELP) + + vr_model_param_Label = self.menu_sub_LABEL_SET(vr_param_Frame, SELECT_MODEL_PARAM_TEXT) + vr_model_param_Label.grid(pady=MENU_PADDING_1) + vr_model_param_Option = ComboBoxMenu(vr_param_Frame, textvariable=vr_model_param_var, values=new_vr_params, width=30) + vr_model_param_Option.grid(pady=MENU_PADDING_1) + self.help_hints(vr_model_param_Label, text=VR_MODEL_PARAM_HELP) + + vr_param_confrim_Button = ttk.Button(vr_param_Frame, text=CONFIRM_TEXT, command=lambda:pull_data()) + vr_param_confrim_Button.grid(pady=MENU_PADDING_1) + + vr_param_cancel_Button = ttk.Button(vr_param_Frame, text=CANCEL_TEXT, command=cancel) + vr_param_cancel_Button.grid(pady=MENU_PADDING_1) + + vr_new_toggle() + opt_menu_selection(vr_model_stem_var.get()) + + vr_param_menu.protocol("WM_DELETE_WINDOW", cancel) + + frame_list = [vr_param_Frame] + self.menu_placement(vr_param_menu, CHOOSE_MODEL_PARAM_TEXT, pop_up=False if is_macos else True, frame_list=frame_list) + + def pop_up_vr_param_sub_json_dump(self, vr_model_params, vr_model_hash): + """Dumps current selected VR model settings to a json named after model hash""" + + self.vr_model_params = vr_model_params + + vr_model_params_dump = json.dumps(vr_model_params, indent=4) + + with open(os.path.join(VR_HASH_DIR, f'{vr_model_hash}.json'), "w") as outfile: + outfile.write(vr_model_params_dump) + + def pop_up_input_stem_name(self, stem_var:tk.StringVar, parent_window:tk.Toplevel): + """ + Input Stem Name + """ + + stem_input_save = tk.Toplevel(root) + + def close_window(is_cancel=True): + + if is_cancel or not stem_input_save_var.get(): + stem_var.set(VOCAL_STEM) + else: + stem_input_save_text = stem_input_save_var.get().capitalize() + + if stem_input_save_text == VOCAL_STEM: + stem_text = INST_STEM if is_inverse_stem_var.get() else stem_input_save_text + elif stem_input_save_text == INST_STEM: + stem_text = VOCAL_STEM if is_inverse_stem_var.get() else stem_input_save_text + else: + stem_text = f"{NO_STEM}{stem_input_save_text}" if is_inverse_stem_var.get() else stem_input_save_text + + stem_var.set(stem_text) + + stem_input_save.destroy() + + parent_window.attributes('-topmost', 'true') if OPERATING_SYSTEM == "Linux" else None + parent_window.grab_set() + root.wait_window(parent_window) + + stem_input_save_var = tk.StringVar(value='') + is_inverse_stem_var = tk.BooleanVar(value=False) + + validation = lambda value:False if re.fullmatch(REG_INPUT_STEM_NAME, value) is None else True + stem_input_save_Frame = self.menu_FRAME_SET(stem_input_save) + stem_input_save_Frame.grid(row=1) + + stem_input_save_title = self.menu_title_LABEL_SET(stem_input_save_Frame, INPUT_STEM_NAME_TEXT) + stem_input_save_title.grid(pady=0) + + stem_input_name_Label = self.menu_sub_LABEL_SET(stem_input_save_Frame, STEM_NAME_TEXT) + stem_input_name_Label.grid(pady=MENU_PADDING_1) + stem_input_name_Entry = ttk.Combobox(stem_input_save_Frame, textvariable=stem_input_save_var, values=STEM_SET_MENU_2, justify='center', width=25) + invalid_message = self.invalid_tooltip(stem_input_name_Entry, REG_INPUT_STEM_NAME) + stem_input_name_Entry.grid(pady=MENU_PADDING_1) + stem_input_name_Entry.focus_set() + + self.spacer_label(stem_input_save_Frame) + + is_inverse_stem_Button = ttk.Checkbutton(stem_input_save_Frame, text=IS_INVERSE_STEM_TEXT, variable=is_inverse_stem_var) + is_inverse_stem_Button.grid(pady=0) + + entry_rules_Label = tk.Label(stem_input_save_Frame, text=STEM_INPUT_RULE, font=(MAIN_FONT_NAME, f"{FONT_SIZE_1}"), foreground='#868687', justify="left") + entry_rules_Label.grid(pady=MENU_PADDING_1) + + mdx_param_set_Button = ttk.Button(stem_input_save_Frame, text=DONE_MENU_TEXT, command=lambda:close_window(is_cancel=False) if validation(stem_input_save_var.get()) else invalid_message()) + mdx_param_set_Button.grid(pady=MENU_PADDING_1) + + stop_process_Button = ttk.Button(stem_input_save_Frame, text=CANCEL_TEXT, command=close_window) + stop_process_Button.grid(pady=MENU_PADDING_1) + + stem_input_save.protocol("WM_DELETE_WINDOW", close_window) + + frame_list = [stem_input_save_Frame] + self.menu_placement(stem_input_save, INPUT_UNIQUE_STEM_NAME_TEXT, pop_up=True, frame_list=frame_list) + + def pop_up_save_ensemble(self): + """ + Save Ensemble as... + """ + + ensemble_save = tk.Toplevel(root) + + ensemble_save_var = tk.StringVar(value='') + + ensemble_save_Frame = self.menu_FRAME_SET(ensemble_save) + ensemble_save_Frame.grid(row=1) + + validation = lambda value:False if re.fullmatch(REG_SAVE_INPUT, value) is None else True + save_func = lambda:(self.pop_up_save_ensemble_sub_json_dump(self.ensemble_listbox_get_all_selected_models(), ensemble_save_var.get()), ensemble_save.destroy()) + + if len(self.ensemble_listbox_get_all_selected_models()) <= 1: + ensemble_save_title = self.menu_title_LABEL_SET(ensemble_save_Frame, ENSEMBLE_WARNING_NOT_ENOUGH_SHORT_TEXT, width=20) + ensemble_save_title.grid() + + ensemble_save_title = self.menu_sub_LABEL_SET(ensemble_save_Frame, ENSEMBLE_WARNING_NOT_ENOUGH_TEXT) + ensemble_save_title.grid(pady=MENU_PADDING_1) + + stop_process_Button = ttk.Button(ensemble_save_Frame, text=OK_TEXT, command=lambda:ensemble_save.destroy()) + stop_process_Button.grid() + else: + ensemble_save_title = self.menu_title_LABEL_SET(ensemble_save_Frame, SAVE_CURRENT_ENSEMBLE_TEXT) + ensemble_save_title.grid() + + ensemble_name_Label = self.menu_sub_LABEL_SET(ensemble_save_Frame, ENSEMBLE_NAME_TEXT) + ensemble_name_Label.grid(pady=MENU_PADDING_1) + ensemble_name_Entry = ttk.Entry(ensemble_save_Frame, textvariable=ensemble_save_var, justify='center', width=25) + ensemble_name_Entry.grid(pady=MENU_PADDING_1) + invalid_message = self.invalid_tooltip(ensemble_name_Entry) + ensemble_name_Entry.focus_set() + self.spacer_label(ensemble_save_Frame) + + entry_rules_Label = tk.Label(ensemble_save_Frame, text=ENSEMBLE_INPUT_RULE, font=(MAIN_FONT_NAME, f"{FONT_SIZE_1}"), foreground='#868687', justify="left") + entry_rules_Label.grid() + + mdx_param_set_Button = ttk.Button(ensemble_save_Frame, text=SAVE_TEXT, command=lambda:save_func() if validation(ensemble_save_var.get()) else invalid_message()) + mdx_param_set_Button.grid(pady=MENU_PADDING_1) + + stop_process_Button = ttk.Button(ensemble_save_Frame, text=CANCEL_TEXT, command=lambda:ensemble_save.destroy()) + stop_process_Button.grid(pady=MENU_PADDING_1) + + self.menu_placement(ensemble_save, SAVE_CURRENT_ENSEMBLE_TEXT, pop_up=True) + + def pop_up_save_ensemble_sub_json_dump(self, selected_ensemble_model, ensemble_save_name: str): + """Dumps current ensemble settings to a json named after user input""" + + if ensemble_save_name: + self.chosen_ensemble_var.set(ensemble_save_name) + ensemble_save_name = ensemble_save_name.replace(" ", "_") + saved_data = { + 'ensemble_main_stem': self.ensemble_main_stem_var.get(), + 'ensemble_type': self.ensemble_type_var.get(), + 'selected_models': selected_ensemble_model, + } + + saved_data_dump = json.dumps(saved_data, indent=4) + with open(os.path.join(ENSEMBLE_CACHE_DIR, f'{ensemble_save_name}.json'), "w") as outfile: + outfile.write(saved_data_dump) + + def deletion_list_fill(self, option_menu: ComboBoxMenu, selection_var: tk.StringVar, selection_dir, var_set, menu_name=None): + """Fills the saved settings menu located in tab 2 of the main settings window""" + + def command_callback(event=None): + self.deletion_entry(selection_var.get(), selection_dir, refresh_menu) + selection_var.set(var_set) + + def refresh_menu(remove=None): + selection_list = self.last_found_ensembles if menu_name == 'deleteensemble' else self.last_found_settings + main_var = self.chosen_ensemble_var if menu_name == 'deleteensemble' else self.save_current_settings_var + + if remove and remove in selection_list: + selection_list = list(selection_list) + selection_list.remove(remove) + main_var.set(CHOOSE_ENSEMBLE_OPTION) + + self.update_menus(option_widget=option_menu, + style_name=menu_name, + command=command_callback, + new_items=selection_list) + + refresh_menu() + + def deletion_entry(self, selection: str, path, callback): + """Deletes selected user saved application settings""" + + if selection not in [SELECT_SAVED_SET, SELECT_SAVED_ENSEMBLE]: + saved_path = os.path.join(path, f'{selection.replace(" ", "_")}.json') + confirm = self.message_box(DELETE_ENS_ENTRY) + if confirm: + if os.path.isfile(saved_path): + os.remove(saved_path) + callback(selection) + + #--Download Center Methods-- + + def online_data_refresh(self, user_refresh=True, confirmation_box=False, refresh_list_Button=False, is_start_up=False, is_download_complete=False): + """Checks for application updates""" + + def online_check(): + if not is_start_up: + self.app_update_status_Text_var.set(LOADING_VERSION_INFO_TEXT) + self.app_update_button_Text_var.set(CHECK_FOR_UPDATES_TEXT) + + is_new_update = False + try: + self.online_data = json.load(urllib.request.urlopen(DOWNLOAD_CHECKS)) + self.is_online = True + + try: + with urllib.request.urlopen(BULLETIN_CHECK) as response: + self.bulletin_data = response.read().decode('utf-8') + + if not is_windows: + self.bulletin_data = read_bulliten_text_mac(CR_TEXT, self.bulletin_data) + else: + self.bulletin_data = self.bulletin_data.replace("~", "•") + + except Exception as e: + self.bulletin_data = INFO_UNAVAILABLE_TEXT + print(e) + + if user_refresh: + self.download_list_state() + for widget in self.download_center_Buttons: + widget.configure(state=tk.NORMAL) + + if refresh_list_Button: + self.download_progress_info_var.set('Download List Refreshed!') + + if OPERATING_SYSTEM=="Darwin": + self.lastest_version = self.online_data["current_version_mac"] + elif OPERATING_SYSTEM=="Linux": + self.lastest_version = self.online_data["current_version_linux"] + else: + self.lastest_version = self.online_data["current_version"] + + if self.lastest_version == current_patch and not is_start_up: + self.app_update_status_Text_var.set('UVR Version Current') + else: + is_new_update = True + is_beta_version = True if self.lastest_version == PREVIOUS_PATCH_WIN and BETA_VERSION in current_patch else False + + if not is_start_up: + if is_beta_version: + self.app_update_status_Text_var.set(f"Roll Back: {self.lastest_version}") + self.app_update_button_Text_var.set(ROLL_BACK_TEXT) + else: + self.app_update_status_Text_var.set(f"Update Found: {self.lastest_version}") + self.app_update_button_Text_var.set('Click Here to Update') + + if OPERATING_SYSTEM == "Windows": + self.download_update_link_var.set('{}{}{}'.format(UPDATE_REPO, self.lastest_version, application_extension)) + self.download_update_path_var.set(os.path.join(BASE_PATH, f'{self.lastest_version}{application_extension}')) + elif OPERATING_SYSTEM == "Darwin": + self.download_update_link_var.set(UPDATE_MAC_ARM_REPO if SYSTEM_PROC == ARM or ARM in SYSTEM_ARCH else UPDATE_MAC_X86_64_REPO) + elif OPERATING_SYSTEM == "Linux": + self.download_update_link_var.set(UPDATE_LINUX_REPO) + + if not user_refresh: + if not is_beta_version and not self.lastest_version == current_patch: + self.command_Text.write(NEW_UPDATE_FOUND_TEXT(self.lastest_version)) + + + is_update_params = self.is_auto_update_model_params if is_start_up else self.is_auto_update_model_params_var.get() + + if is_update_params and is_start_up or is_download_complete: + self.download_model_settings() + + # if is_download_complete: + # self.download_model_settings() + + except Exception as e: + self.offline_state_set(is_start_up) + is_new_update = False + + if user_refresh: + self.download_list_state(disable_only=True) + for widget in self.download_center_Buttons: + widget.configure(state=tk.DISABLED) + + try: + self.error_log_var.set(error_text('Online Data Refresh', e)) + except Exception as e: + print(e) + + return is_new_update + + if confirmation_box: + return online_check() + else: + self.current_thread = KThread(target=online_check) + self.current_thread.setDaemon(True) if not is_windows else None + self.current_thread.start() + + def offline_state_set(self, is_start_up=False): + """Changes relevant settings and "Download Center" buttons if no internet connection is available""" + + if not is_start_up and self.is_menu_settings_open: + self.app_update_status_Text_var.set(f'Version Status: {NO_CONNECTION}') + self.download_progress_info_var.set(NO_CONNECTION) + self.app_update_button_Text_var.set('Refresh') + self.refresh_list_Button.configure(state=tk.NORMAL) + self.stop_download_Button_DISABLE() + self.enable_tabs() + + self.is_online = False + + def download_validate_code(self, confirm=False, code_message=None): + """Verifies the VIP download code""" + + self.decoded_vip_link = vip_downloads(self.user_code_var.get()) + + if confirm: + if not self.decoded_vip_link == NO_CODE: + info_text = 'VIP Models Added!' + is_success_message = True + else: + info_text = 'Incorrect Code' + is_success_message = False + + self.download_progress_info_var.set(info_text) + self.user_code_validation_var.set(info_text) + + if code_message: + code_message(info_text, is_success_message) + + self.download_list_fill() + + def download_list_fill(self, model_type=ALL_TYPES): + """Fills the download lists with the data retrieved from the update check.""" + + self.download_demucs_models_list.clear() + + model_download_mdx_list, model_download_mdx_name = [], "mdxdownload" + model_download_vr_list, model_download_vr_name = [], "vrdownload" + model_download_demucs_list, model_download_demucs_name = [], "demucsmdxdownload" + + self.vr_download_list = self.online_data["vr_download_list"] + self.mdx_download_list = self.online_data["mdx_download_list"] + self.demucs_download_list = self.online_data["demucs_download_list"] + self.mdx_download_list.update(self.online_data["mdx23c_download_list"]) + + if not self.decoded_vip_link is NO_CODE: + self.vr_download_list.update(self.online_data["vr_download_vip_list"]) + self.mdx_download_list.update(self.online_data["mdx_download_vip_list"]) + self.mdx_download_list.update(self.online_data["mdx23c_download_vip_list"]) + + def configure_combobox(combobox:ComboBoxMenu, values:list, variable:tk.StringVar, arch_type, name): + values = [NO_NEW_MODELS] if not values else values + combobox['values'] = values + combobox.update_dropdown_size(values, name, offset=310, + command=lambda s: self.download_model_select(variable.get(), arch_type, variable)) + + if model_type in [VR_ARCH_TYPE, ALL_TYPES]: + for (selectable, model) in self.vr_download_list.items(): + if not os.path.isfile(os.path.join(VR_MODELS_DIR, model)): + model_download_vr_list.append(selectable) + + configure_combobox(self.model_download_vr_Option, model_download_vr_list, self.model_download_vr_var, VR_ARCH_TYPE, model_download_vr_name) + + if model_type in [MDX_ARCH_TYPE, ALL_TYPES]: + for (selectable, model) in self.mdx_download_list.items(): + if isinstance(model, dict): + items_list = list(model.items()) + model_name, config = items_list[0] + config_link = f"{MDX23_CONFIG_CHECKS}{config}" + config_local = os.path.join(MDX_C_CONFIG_PATH, config) + if not os.path.isfile(config_local): + with urllib.request.urlopen(config_link) as response: + with open(config_local, 'wb') as out_file: + out_file.write(response.read()) + else: + model_name = str(model) + + if not os.path.isfile(os.path.join(MDX_MODELS_DIR, model_name)): + model_download_mdx_list.append(selectable) + + configure_combobox(self.model_download_mdx_Option, model_download_mdx_list, self.model_download_mdx_var, MDX_ARCH_TYPE, model_download_mdx_name) + + if model_type in [DEMUCS_ARCH_TYPE, ALL_TYPES]: + for (selectable, model) in self.demucs_download_list.items(): + for name in model.items(): + if [True for x in DEMUCS_NEWER_ARCH_TYPES if x in selectable]: + if not os.path.isfile(os.path.join(DEMUCS_NEWER_REPO_DIR, name[0])): + self.download_demucs_models_list.append(selectable) + else: + if not os.path.isfile(os.path.join(DEMUCS_MODELS_DIR, name[0])): + self.download_demucs_models_list.append(selectable) + + self.download_demucs_models_list = list(dict.fromkeys(self.download_demucs_models_list)) + + for option_name in self.download_demucs_models_list: + model_download_demucs_list.append(option_name) + + configure_combobox(self.model_download_demucs_Option, model_download_demucs_list, self.model_download_demucs_var, DEMUCS_ARCH_TYPE, model_download_demucs_name) + + def download_model_settings(self): + '''Update the newest model settings''' + + try: + self.vr_hash_MAPPER = json.load(urllib.request.urlopen(VR_MODEL_DATA_LINK)) + self.mdx_hash_MAPPER = json.load(urllib.request.urlopen(MDX_MODEL_DATA_LINK)) + self.mdx_name_select_MAPPER = json.load(urllib.request.urlopen(MDX_MODEL_NAME_DATA_LINK)) + self.demucs_name_select_MAPPER = json.load(urllib.request.urlopen(DEMUCS_MODEL_NAME_DATA_LINK)) + + vr_hash_MAPPER_dump = json.dumps(self.vr_hash_MAPPER, indent=4) + with open(VR_HASH_JSON, "w") as outfile: + outfile.write(vr_hash_MAPPER_dump) + + mdx_hash_MAPPER_dump = json.dumps(self.mdx_hash_MAPPER, indent=4) + with open(MDX_HASH_JSON, "w") as outfile: + outfile.write(mdx_hash_MAPPER_dump) + + mdx_name_select_MAPPER_dump = json.dumps(self.mdx_name_select_MAPPER, indent=4) + with open(MDX_MODEL_NAME_SELECT, "w") as outfile: + outfile.write(mdx_name_select_MAPPER_dump) + + demucs_name_select_MAPPER_dump = json.dumps(self.demucs_name_select_MAPPER, indent=4) + with open(DEMUCS_MODEL_NAME_SELECT, "w") as outfile: + outfile.write(demucs_name_select_MAPPER_dump) + + except Exception as e: + self.vr_hash_MAPPER = load_model_hash_data(VR_HASH_JSON) + self.mdx_hash_MAPPER = load_model_hash_data(MDX_HASH_JSON) + self.mdx_name_select_MAPPER = load_model_hash_data(MDX_MODEL_NAME_SELECT) + self.demucs_name_select_MAPPER = load_model_hash_data(DEMUCS_MODEL_NAME_SELECT) + self.error_log_var.set(e) + print(e) + + def download_list_state(self, reset=True, disable_only=False): + """Makes sure only the models from the chosen AI network are selectable.""" + + for widget in self.download_lists: + widget.configure(state=tk.DISABLED) + + if reset: + for download_list_var in self.download_list_vars: + if self.is_online: + download_list_var.set(NO_MODEL) + self.download_Button.configure(state=tk.NORMAL) + else: + download_list_var.set(NO_CONNECTION) + self.download_Button.configure(state=tk.DISABLED) + disable_only = True + + if not disable_only: + self.download_Button.configure(state=tk.NORMAL) + if self.select_download_var.get() == VR_ARCH_TYPE: + self.model_download_vr_Option.configure(state=READ_ONLY) + self.selected_download_var = self.model_download_vr_var + self.download_list_fill(model_type=VR_ARCH_TYPE) + if self.select_download_var.get() == MDX_ARCH_TYPE: + self.model_download_mdx_Option.configure(state=READ_ONLY) + self.selected_download_var = self.model_download_mdx_var + self.download_list_fill(model_type=MDX_ARCH_TYPE) + if self.select_download_var.get() == DEMUCS_ARCH_TYPE: + self.model_download_demucs_Option.configure(state=READ_ONLY) + self.selected_download_var = self.model_download_demucs_var + self.download_list_fill(model_type=DEMUCS_ARCH_TYPE) + + self.stop_download_Button_DISABLE() + + def download_model_select(self, selection, type, var:tk.StringVar): + """Prepares the data needed to download selected model.""" + + self.download_demucs_newer_models.clear() + + if selection == NO_NEW_MODELS: + selection = NO_MODEL + var.set(NO_MODEL) + + model_repo = self.decoded_vip_link if VIP_SELECTION in selection else NORMAL_REPO + is_demucs_newer = [True for x in DEMUCS_NEWER_ARCH_TYPES if x in selection] + + if type == VR_ARCH_TYPE: + for selected_model in self.vr_download_list.items(): + if selection in selected_model: + self.download_link_path_var.set("{}{}".format(model_repo, selected_model[1])) + self.download_save_path_var.set(os.path.join(VR_MODELS_DIR, selected_model[1])) + break + + if type == MDX_ARCH_TYPE: + for selected_model in self.mdx_download_list.items(): + if selection in selected_model: + if isinstance(selected_model[1], dict): + model_name = list(selected_model[1].keys())[0] + else: + model_name = str(selected_model[1]) + self.download_link_path_var.set("{}{}".format(model_repo, model_name)) + self.download_save_path_var.set(os.path.join(MDX_MODELS_DIR, model_name)) + break + + if type == DEMUCS_ARCH_TYPE: + for selected_model, model_data in self.demucs_download_list.items(): + if selection == selected_model: + for key, value in model_data.items(): + if is_demucs_newer: + self.download_demucs_newer_models.append([os.path.join(DEMUCS_NEWER_REPO_DIR, key), value]) + else: + self.download_save_path_var.set(os.path.join(DEMUCS_MODELS_DIR, key)) + self.download_link_path_var.set(value) + + def download_item(self, is_update_app=False): + """Downloads the model selected.""" + + if not is_update_app: + if self.selected_download_var.get() == NO_MODEL: + self.download_progress_info_var.set(NO_MODEL) + return + + for widget in self.download_center_Buttons: + widget.configure(state=tk.DISABLED) + self.refresh_list_Button.configure(state=tk.DISABLED) + self.manual_download_Button.configure(state=tk.DISABLED) + + is_demucs_newer = [True for x in DEMUCS_NEWER_ARCH_TYPES if x in self.selected_download_var.get()] + + self.download_list_state(reset=False, disable_only=True) + self.stop_download_Button_ENABLE() + self.disable_tabs() + + def download_progress_bar(current, total, model=80): + progress = ('%s' % (100 * current // total)) + self.download_progress_bar_var.set(int(progress)) + self.download_progress_percent_var.set(progress + ' %') + + def push_download(): + self.is_download_thread_active = True + try: + if is_update_app: + self.download_progress_info_var.set(DOWNLOADING_UPDATE) + if os.path.isfile(self.download_update_path_var.get()): + self.download_progress_info_var.set(FILE_EXISTS) + else: + wget.download(self.download_update_link_var.get(), self.download_update_path_var.get(), bar=download_progress_bar) + + self.download_post_action(DOWNLOAD_UPDATE_COMPLETE) + else: + if self.select_download_var.get() == DEMUCS_ARCH_TYPE and is_demucs_newer: + for model_num, model_data in enumerate(self.download_demucs_newer_models, start=1): + self.download_progress_info_var.set('{} {}/{}...'.format(DOWNLOADING_ITEM, model_num, len(self.download_demucs_newer_models))) + if os.path.isfile(model_data[0]): + continue + else: + wget.download(model_data[1], model_data[0], bar=download_progress_bar) + else: + self.download_progress_info_var.set(SINGLE_DOWNLOAD) + if os.path.isfile(self.download_save_path_var.get()): + self.download_progress_info_var.set(FILE_EXISTS) + else: + wget.download(self.download_link_path_var.get(), self.download_save_path_var.get(), bar=download_progress_bar) + + self.download_post_action(DOWNLOAD_COMPLETE) + + except Exception as e: + self.error_log_var.set(error_text(DOWNLOADING_ITEM, e)) + self.download_progress_info_var.set(DOWNLOAD_FAILED) + + if type(e).__name__ == 'URLError': + self.offline_state_set() + else: + self.download_progress_percent_var.set(f"{type(e).__name__}") + self.download_post_action(DOWNLOAD_FAILED) + + self.active_download_thread = KThread(target=push_download) + self.active_download_thread.start() + + def download_post_action(self, action): + """Resets the widget variables in the "Download Center" based on the state of the download.""" + + for widget in self.download_center_Buttons: + widget.configure(state=tk.NORMAL) + self.refresh_list_Button.configure(state=tk.NORMAL) + self.manual_download_Button.configure(state=tk.NORMAL) + + self.enable_tabs() + self.stop_download_Button_DISABLE() + + if action == DOWNLOAD_FAILED: + try: + self.active_download_thread.terminate() + finally: + self.download_progress_info_var.set(DOWNLOAD_FAILED) + self.download_list_state(reset=False) + if action == DOWNLOAD_STOPPED: + try: + self.active_download_thread.terminate() + finally: + self.download_progress_info_var.set(DOWNLOAD_STOPPED) + self.download_list_state(reset=False) + if action == DOWNLOAD_COMPLETE: + self.online_data_refresh(is_download_complete=True) + self.download_progress_info_var.set(DOWNLOAD_COMPLETE) + self.download_list_state() + if action == DOWNLOAD_UPDATE_COMPLETE: + self.download_progress_info_var.set(DOWNLOAD_UPDATE_COMPLETE) + if os.path.isfile(self.download_update_path_var.get()): + subprocess.Popen(self.download_update_path_var.get()) + self.download_list_state() + + + self.is_download_thread_active = False + + self.delete_temps() + + #--Refresh/Loop Methods-- + + def update_loop(self): + """Update the model dropdown menus""" + + if self.clear_cache_torch: + clear_gpu_cache() + self.clear_cache_torch = False + + if self.is_process_stopped: + if self.thread_check(self.active_processing_thread): + self.conversion_Button_Text_var.set(STOP_PROCESSING) + self.conversion_Button.configure(state=tk.DISABLED) + self.stop_Button.configure(state=tk.DISABLED) + else: + self.stop_Button.configure(state=tk.NORMAL) + self.conversion_Button_Text_var.set(START_PROCESSING) + self.conversion_Button.configure(state=tk.NORMAL) + self.progress_bar_main_var.set(0) + clear_gpu_cache() + self.is_process_stopped = False + + if self.is_confirm_error_var.get(): + self.check_is_menu_open(ERROR_OPTION) + self.is_confirm_error_var.set(False) + + if self.is_check_splash and is_windows: + + while not self.msg_queue.empty(): + message = self.msg_queue.get_nowait() + print(message) + + close_process(self.msg_queue) + self.is_check_splash = False + + #self.auto_save() + + self.update_available_models() + self.after(600, self.update_loop) + + def update_menus(self, option_widget:ComboBoxMenu, style_name, command, new_items, last_items=None, base_options=None): + + if new_items != last_items: + formatted_items = [item.replace("_", " ") for item in new_items] + if not formatted_items and base_options: + base_options = [option for option in base_options if option != OPT_SEPARATOR_SAVE] + + final_options = formatted_items + base_options if base_options else formatted_items + option_widget['values'] = final_options + option_widget.update_dropdown_size(formatted_items, style_name, command=command) + return new_items + return last_items + + def update_available_models(self): + """ + Loops through all models in each model directory and adds them to the appropriate model menu. + Also updates ensemble listbox and user saved settings list. + """ + + def fix_name(name, mapper:dict): return next((new_name for old_name, new_name in mapper.items() if name in old_name), name) + + new_vr_models = self.get_files_from_dir(VR_MODELS_DIR, PTH) + new_mdx_models = self.get_files_from_dir(MDX_MODELS_DIR, (ONNX, CKPT), is_mdxnet=True) + new_demucs_models = self.get_files_from_dir(DEMUCS_MODELS_DIR, (CKPT, '.gz', '.th')) + self.get_files_from_dir(DEMUCS_NEWER_REPO_DIR, YAML) + new_ensembles_found = self.get_files_from_dir(ENSEMBLE_CACHE_DIR, JSON) + new_settings_found = self.get_files_from_dir(SETTINGS_CACHE_DIR, JSON) + new_models_found = new_vr_models + new_mdx_models + new_demucs_models + is_online = self.is_online_model_menu + + def loop_directories(option_menu:ComboBoxMenu, option_var, model_list, model_type, name_mapper=None): + current_selection = option_menu.get() + option_list = [fix_name(file_name, name_mapper) for file_name in model_list] if name_mapper else model_list + sorted_options = natsort.natsorted(option_list) + option_list_option_menu = sorted_options + [OPT_SEPARATOR, DOWNLOAD_MORE] if self.is_online else sorted_options + + if not option_list and self.is_online: + option_list_option_menu = [option for option in option_list_option_menu if option != OPT_SEPARATOR] + + option_menu['values'] = option_list_option_menu + option_menu.set(current_selection) + option_menu.update_dropdown_size(option_list, model_type) + + if self.is_root_defined_var.get() and model_type == MDX_ARCH_TYPE and self.chosen_process_method_var.get() == MDX_ARCH_TYPE: + self.selection_action_models_sub(current_selection, model_type, option_var) + + return tuple(f"{model_type}{ENSEMBLE_PARTITION}{model_name}" for model_name in sorted_options) + + if new_models_found != self.last_found_models or is_online != self.is_online: + self.model_data_table = [] + + vr_model_list = loop_directories(self.vr_model_Option, self.vr_model_var, new_vr_models, VR_ARCH_TYPE, name_mapper=None) + mdx_model_list = loop_directories(self.mdx_net_model_Option, self.mdx_net_model_var, new_mdx_models, MDX_ARCH_TYPE, name_mapper=self.mdx_name_select_MAPPER) + demucs_model_list = loop_directories(self.demucs_model_Option, self.demucs_model_var, new_demucs_models, DEMUCS_ARCH_TYPE, name_mapper=self.demucs_name_select_MAPPER) + + self.ensemble_model_list = vr_model_list + mdx_model_list + demucs_model_list + self.default_change_model_list = vr_model_list + mdx_model_list + self.last_found_models = new_models_found + self.is_online_model_menu = self.is_online + + if not self.chosen_ensemble_var.get() == CHOOSE_ENSEMBLE_OPTION: + self.selection_action_chosen_ensemble(self.chosen_ensemble_var.get()) + else: + if not self.ensemble_main_stem_var.get() == CHOOSE_STEM_PAIR: + self.selection_action_ensemble_stems(self.ensemble_main_stem_var.get(), auto_update=self.ensemble_listbox_get_all_selected_models()) + else: + self.ensemble_listbox_clear_and_insert_new(self.ensemble_model_list) + + self.last_found_ensembles = self.update_menus(option_widget=self.chosen_ensemble_Option, + style_name='savedensembles', + command=None, + new_items=new_ensembles_found, + last_items=self.last_found_ensembles, + base_options=ENSEMBLE_OPTIONS + ) + + self.last_found_settings = self.update_menus(option_widget=self.save_current_settings_Option, + style_name='savedsettings', + command=None, + new_items=new_settings_found, + last_items=self.last_found_settings, + base_options=SAVE_SET_OPTIONS + ) + + def update_main_widget_states_mdx(self): + if not self.mdx_net_model_var.get() == DOWNLOAD_MORE: + self.update_main_widget_states() + + def move_widget_offscreen(self, widget, step=10): + current_x = widget.winfo_x() + current_y = widget.winfo_y() + if current_x > -1000: # assuming -1000 is your off-screen target + widget.place(x=current_x - step, y=current_y) + widget.after(10, lambda: self.move_widget_offscreen(widget, step)) + + def update_main_widget_states(self): + """Updates main widget states based on chosen process method""" + + def place_widgets(*widgets): + for widget in widgets: + widget() + + def general_shared_buttons(): + place_widgets(self.is_gpu_conversion_Option_place, + self.model_sample_mode_Option_place) + + def stem_save_options(): + place_widgets(self.is_primary_stem_only_Option_place, + self.is_secondary_stem_only_Option_place) + + def stem_save_demucs_options(): + place_widgets(self.is_primary_stem_only_Demucs_Option_place, + self.is_secondary_stem_only_Demucs_Option_place) + + def no_ensemble_shared(): + place_widgets(self.save_current_settings_Label_place, + self.save_current_settings_Option_place) + + process_method = self.chosen_process_method_var.get() + audio_tool = self.chosen_audio_tool_var.get() + + for widget in self.GUI_LIST: + widget.place(x=-1000, y=-1000) + + if process_method == MDX_ARCH_TYPE: + place_widgets(self.mdx_net_model_Label_place, + self.mdx_net_model_Option_place, + general_shared_buttons, + stem_save_options, + no_ensemble_shared) + elif process_method == VR_ARCH_PM: + place_widgets(self.vr_model_Label_place, self.vr_model_Option_place, + self.aggression_setting_Label_place, + self.aggression_setting_Option_place, + self.window_size_Label_place, + self.window_size_Option_place, + general_shared_buttons, + stem_save_options, + no_ensemble_shared) + elif process_method == DEMUCS_ARCH_TYPE: + place_widgets(self.demucs_model_Label_place, + self.demucs_model_Option_place, + self.demucs_stems_Label_place, + self.demucs_stems_Option_place, + self.segment_Label_place, + self.segment_Option_place, + general_shared_buttons, + stem_save_demucs_options, + no_ensemble_shared) + elif process_method == AUDIO_TOOLS: + place_widgets(self.chosen_audio_tool_Label_place, + self.chosen_audio_tool_Option_place) + + if audio_tool == ALIGN_INPUTS: + self.file_one_sub_var.set(FILE_ONE_MAIN_LABEL) + self.file_two_sub_var.set(FILE_TWO_MAIN_LABEL) + elif audio_tool == MATCH_INPUTS: + self.file_one_sub_var.set(FILE_ONE_MATCH_MAIN_LABEL) + self.file_two_sub_var.set(FILE_TWO_MATCH_MAIN_LABEL) + + audio_tool_options = { + MANUAL_ENSEMBLE: [self.choose_algorithm_Label_place, + self.choose_algorithm_Option_place, + self.is_wav_ensemble_Option_place], + TIME_STRETCH: [lambda: self.model_sample_mode_Option_place(rely=5), + self.time_stretch_rate_Label_place, + self.time_stretch_rate_Option_place], + CHANGE_PITCH: [self.is_time_correction_Option_place, + lambda: self.model_sample_mode_Option_place(rely=6), + self.pitch_rate_Label_place, + self.pitch_rate_Option_place], + ALIGN_INPUTS: [self.fileOne_Label_place, + self.fileOne_Entry_place, + self.fileTwo_Label_place, + self.fileTwo_Entry_place, + self.fileOne_Open_place, + self.fileTwo_Open_place, + self.intro_analysis_Label_place, + self.intro_analysis_Option_place, + self.db_analysis_Label_place, + self.db_analysis_Option_place, + self.time_window_Label_place, + self.time_window_Option_place], + MATCH_INPUTS: [self.fileOne_Label_place, + self.fileOne_Entry_place, + self.fileTwo_Label_place, + self.fileTwo_Entry_place, + self.fileOne_Open_place, + self.fileTwo_Open_place, + self.wav_type_set_Label_place, + self.wav_type_set_Option_place], + } + place_widgets(*audio_tool_options.get(audio_tool, [])) + elif process_method == ENSEMBLE_MODE: + place_widgets(self.chosen_ensemble_Label_place, + self.chosen_ensemble_Option_place, + self.ensemble_main_stem_Label_place, + self.ensemble_main_stem_Option_place, + self.ensemble_type_Label_place, + self.ensemble_type_Option_place, + self.ensemble_listbox_Label_place, + self.ensemble_listbox_Option_place, + self.ensemble_listbox_Option_pack, + general_shared_buttons, + stem_save_options) + + if not self.is_gpu_available: + self.is_gpu_conversion_Disable() + + self.update_inputPaths() + + def update_button_states(self): + """Updates the available stems for selected Demucs model""" + + if self.chosen_process_method_var.get() == DEMUCS_ARCH_TYPE: + if self.demucs_stems_var.get() == ALL_STEMS: + self.update_stem_checkbox_labels(PRIMARY_STEM, demucs=True) + elif self.demucs_stems_var.get() == VOCAL_STEM: + self.update_stem_checkbox_labels(VOCAL_STEM, demucs=True, is_disable_demucs_boxes=False) + self.is_stem_only_Demucs_Options_Enable() + else: + self.is_stem_only_Demucs_Options_Enable() + + #self.demucs_stems_Option['menu'].delete(0,'end') + + if not self.demucs_model_var.get() == CHOOSE_MODEL: + if DEMUCS_UVR_MODEL in self.demucs_model_var.get(): + stems = DEMUCS_2_STEM_OPTIONS + elif DEMUCS_6_STEM_MODEL in self.demucs_model_var.get(): + stems = DEMUCS_6_STEM_OPTIONS + else: + stems = DEMUCS_4_STEM_OPTIONS + + self.demucs_stems_Option['values'] = stems + self.demucs_stems_Option.command(lambda e:self.update_stem_checkbox_labels(self.demucs_stems_var.get(), demucs=True)) + + def update_button_states_mdx(self, model_stems): + """Updates the available stems for selected Demucs model""" + + model_stems = [stem for stem in model_stems] + + if len(model_stems) >= 3: + model_stems.insert(0, ALL_STEMS) + self.mdxnet_stems_var.set(ALL_STEMS) + else: + self.mdxnet_stems_var.set(model_stems[0]) + + if self.mdxnet_stems_var.get() == ALL_STEMS: + self.update_stem_checkbox_labels(PRIMARY_STEM, disable_boxes=True) + elif self.mdxnet_stems_var.get() == VOCAL_STEM: + self.update_stem_checkbox_labels(VOCAL_STEM) + self.is_stem_only_Options_Enable() + else: + self.is_stem_only_Options_Enable() + + if not self.mdx_net_model_var.get() == CHOOSE_MODEL: + self.mdxnet_stems_Option['values'] = model_stems + self.mdxnet_stems_Option.command(lambda e:self.update_stem_checkbox_labels(self.mdxnet_stems_var.get())) + + def update_stem_checkbox_labels(self, selection, demucs=False, disable_boxes=False, is_disable_demucs_boxes=True): + """Updates the "save only" checkboxes based on the model selected""" + + stem_text = self.is_primary_stem_only_Text_var, self.is_secondary_stem_only_Text_var + + if selection == ALL_STEMS: + selection = PRIMARY_STEM + else: + self.is_stem_only_Options_Enable() + + if disable_boxes or selection == PRIMARY_STEM: + self.is_primary_stem_only_Option.configure(state=tk.DISABLED) + self.is_secondary_stem_only_Option.configure(state=tk.DISABLED) + self.is_primary_stem_only_var.set(False) + self.is_secondary_stem_only_var.set(False) + else: + self.is_primary_stem_only_Option.configure(state=tk.NORMAL) + self.is_secondary_stem_only_Option.configure(state=tk.NORMAL) + + if demucs: + stem_text = self.is_primary_stem_only_Demucs_Text_var, self.is_secondary_stem_only_Demucs_Text_var + + if is_disable_demucs_boxes: + self.is_primary_stem_only_Demucs_Option.configure(state=tk.DISABLED) + self.is_secondary_stem_only_Demucs_Option.configure(state=tk.DISABLED) + self.is_primary_stem_only_Demucs_var.set(False) + self.is_secondary_stem_only_Demucs_var.set(False) + + if not selection == PRIMARY_STEM: + self.is_primary_stem_only_Demucs_Option.configure(state=tk.NORMAL) + self.is_secondary_stem_only_Demucs_Option.configure(state=tk.NORMAL) + + stem_text[0].set(f"{selection} Only") + stem_text[1].set(f"{secondary_stem(selection)} Only") + + def update_ensemble_algorithm_menu(self, is_4_stem=False): + options = ENSEMBLE_TYPE_4_STEM if is_4_stem else ENSEMBLE_TYPE + + if not "/" in self.ensemble_type_var.get() or is_4_stem: + self.ensemble_type_var.set(options[0]) + + self.ensemble_type_Option["values"] = options + + def selection_action(self, event, option_var, is_mdx_net=False): + selected_value = event.widget.get() + selected_value = CHOOSE_MODEL if selected_value == OPT_SEPARATOR else selected_value + option_var.set(selected_value) + if is_mdx_net: + self.update_main_widget_states_mdx() + self.selection_action_models(selected_value) + + def selection_action_models(self, selection): + """Accepts model names and verifies their state.""" + + # Handle different selections. + if selection in CHOOSE_MODEL: + self.update_stem_checkbox_labels(PRIMARY_STEM, disable_boxes=True) + else: + self.is_stem_only_Options_Enable() + + # Process method matching current selection. + self._handle_model_by_chosen_method(selection) + + # Handle Ensemble mode case. + if self.chosen_process_method_var.get() == ENSEMBLE_MODE: + return self._handle_ensemble_mode_selection(selection) + + if not self.is_menu_settings_open and selection == DOWNLOAD_MORE: + self.update_checkbox_text() + self.menu_settings(select_tab_3=True) + + def _handle_model_by_chosen_method(self, selection): + """Handles model selection based on the currently chosen method.""" + current_method = self.chosen_process_method_var.get() + model_var = self.method_mapper.get(current_method) + if model_var: + self.selection_action_models_sub(selection, current_method, model_var) + + def _handle_ensemble_mode_selection(self, selection): + """Handles the case where the current method is 'ENSEMBLE_MODE'.""" + model_data = self.assemble_model_data(selection, ENSEMBLE_CHECK)[0] + if not model_data.model_status: + return self.model_stems_list.index(selection) + return False + + def selection_action_models_sub(self, selection, ai_type, var: tk.StringVar): + """Takes input directly from the selection_action_models parent function""" + + if selection == DOWNLOAD_MORE: + is_model_status = False + else: + model_data = self.assemble_model_data(selection, ai_type)[0] + is_model_status = model_data.model_status + + if not is_model_status: + var.set(CHOOSE_MODEL) + if ai_type == MDX_ARCH_TYPE: + self.mdx_segment_size_Label_place() + self.mdx_segment_size_Option_place() + self.overlap_mdx_Label_place() + self.overlap_mdx_Option_place() + self.update_stem_checkbox_labels(PRIMARY_STEM, disable_boxes=True) + else: + if ai_type == DEMUCS_ARCH_TYPE: + if not self.demucs_stems_var.get().lower() in model_data.demucs_source_list: + self.demucs_stems_var.set(ALL_STEMS if model_data.demucs_stem_count == 4 else VOCAL_STEM) + + self.update_button_states() + else: + if model_data.is_mdx_c and len(model_data.mdx_model_stems) >= 1: + if len(model_data.mdx_model_stems) >= 3: + self.mdxnet_stems_Label_place() + self.mdxnet_stems_Option_place() + else: + self.mdx_segment_size_Label_place() + self.mdx_segment_size_Option_place() + self.overlap_mdx_Label_place() + self.overlap_mdx23_Option_place() + self.update_button_states_mdx(model_data.mdx_model_stems) + else: + if ai_type == MDX_ARCH_TYPE: + self.mdx_segment_size_Label_place() + self.mdx_segment_size_Option_place() + self.overlap_mdx_Label_place() + self.overlap_mdx_Option_place() + + stem = model_data.primary_stem + self.update_stem_checkbox_labels(stem) + + def selection_action_process_method(self, selection, from_widget=False, is_from_conv_menu=False): + """Checks model and variable status when toggling between process methods""" + + if is_from_conv_menu: + self.update_main_widget_states() + + if from_widget: + self.save_current_settings_var.set(CHOOSE_ENSEMBLE_OPTION) + + if selection == ENSEMBLE_MODE: + ensemble_choice = self.ensemble_main_stem_var.get() + if ensemble_choice in [CHOOSE_STEM_PAIR, FOUR_STEM_ENSEMBLE, MULTI_STEM_ENSEMBLE]: + self.update_stem_checkbox_labels(PRIMARY_STEM, disable_boxes=True) + else: + self.update_stem_checkbox_labels(self.return_ensemble_stems(is_primary=True)) + self.is_stem_only_Options_Enable() + return + + for method_type, model_var in self.method_mapper.items(): + if method_type in selection: + self.selection_action_models(model_var.get()) + break + + def selection_action_chosen_ensemble(self, selection): + """Activates specific actions depending on selected ensemble option""" + + if selection not in ENSEMBLE_OPTIONS: + self.selection_action_chosen_ensemble_load_saved(selection) + elif selection == SAVE_ENSEMBLE: + self.chosen_ensemble_var.set(CHOOSE_ENSEMBLE_OPTION) + self.pop_up_save_ensemble() + elif selection == OPT_SEPARATOR_SAVE: + self.chosen_ensemble_var.set(CHOOSE_ENSEMBLE_OPTION) + elif selection == CLEAR_ENSEMBLE: + self.ensemble_listbox_Option.selection_clear(0, 'end') + self.chosen_ensemble_var.set(CHOOSE_ENSEMBLE_OPTION) + + def selection_action_chosen_ensemble_load_saved(self, saved_ensemble): + """Loads the data from selected saved ensemble""" + + saved_data = None + saved_ensemble = saved_ensemble.replace(" ", "_") + saved_ensemble_path = os.path.join(ENSEMBLE_CACHE_DIR, f'{saved_ensemble}.json') + + if os.path.isfile(saved_ensemble_path): + saved_data = json.load(open(saved_ensemble_path)) + + if saved_data: + self.selection_action_ensemble_stems(saved_data['ensemble_main_stem'], from_menu=False) + self.ensemble_main_stem_var.set(saved_data['ensemble_main_stem']) + self.ensemble_type_var.set(saved_data['ensemble_type']) + self.saved_model_list = saved_data['selected_models'] + + for saved_model in self.saved_model_list: + status = self.assemble_model_data(saved_model, ENSEMBLE_CHECK)[0].model_status + if not status: + self.saved_model_list.remove(saved_model) + + indexes = self.ensemble_listbox_get_indexes_for_files(self.model_stems_list, self.saved_model_list) + + for i in indexes: + self.ensemble_listbox_Option.selection_set(i) + + self.update_checkbox_text() + + def selection_action_ensemble_stems(self, selection: str, from_menu=True, auto_update=None): + """Filters out all models from ensemble listbox that are incompatible with selected ensemble stem""" + + is_multi_stem = False + + if not selection == CHOOSE_STEM_PAIR: + if selection in [FOUR_STEM_ENSEMBLE, MULTI_STEM_ENSEMBLE]: + self.update_stem_checkbox_labels(PRIMARY_STEM, disable_boxes=True) + self.update_ensemble_algorithm_menu(is_4_stem=True) + self.ensemble_primary_stem = PRIMARY_STEM + self.ensemble_secondary_stem = SECONDARY_STEM + is_4_stem_check = True + if selection == MULTI_STEM_ENSEMBLE: + is_multi_stem = True + else: + self.update_ensemble_algorithm_menu() + self.is_stem_only_Options_Enable() + stems = selection.partition("/") + self.update_stem_checkbox_labels(stems[0]) + self.ensemble_primary_stem = stems[0] + self.ensemble_secondary_stem = stems[2] + is_4_stem_check = False + + self.model_stems_list = self.model_list(self.ensemble_primary_stem, self.ensemble_secondary_stem, is_4_stem_check=is_4_stem_check, is_multi_stem=is_multi_stem) + self.ensemble_listbox_Option.configure(state=tk.NORMAL) + self.ensemble_listbox_clear_and_insert_new(self.model_stems_list) + + if auto_update: + indexes = self.ensemble_listbox_get_indexes_for_files(self.model_stems_list, auto_update) + self.ensemble_listbox_select_from_indexs(indexes) + else: + self.ensemble_listbox_Option.configure(state=tk.DISABLED) + self.update_stem_checkbox_labels(PRIMARY_STEM, disable_boxes=True) + self.model_stems_list = () + + if from_menu: + self.chosen_ensemble_var.set(CHOOSE_ENSEMBLE_OPTION) + + def selection_action_saved_settings(self, selection, process_method=None): + """Activates specific action based on the selected settings from the saved settings selections""" + + if self.thread_check(self.active_processing_thread): + self.error_dialoge(SET_TO_ANY_PROCESS_ERROR) + return + + chosen_process_method = self.chosen_process_method_var.get() + if process_method: + chosen_process_method = process_method + + if selection in SAVE_SET_OPTIONS: + self.handle_special_options(selection, chosen_process_method) + else: + self.handle_saved_settings(selection, chosen_process_method) + + self.update_checkbox_text() + + def handle_special_options(self, selection, process_method): + """Handles actions for special options.""" + + if selection == SAVE_SETTINGS: + self.save_current_settings_var.set(SELECT_SAVED_SET) + self.pop_up_save_current_settings() + + elif selection == RESET_TO_DEFAULT: + self.save_current_settings_var.set(SELECT_SAVED_SET) + self.load_saved_settings(DEFAULT_DATA, process_method) + + elif selection == OPT_SEPARATOR_SAVE: + self.save_current_settings_var.set(SELECT_SAVED_SET) + + def handle_saved_settings(self, selection, process_method): + """Handles actions for saved settings.""" + + selection = selection.replace(" ", "_") + saved_ensemble_path = os.path.join(SETTINGS_CACHE_DIR, f'{selection}.json') + + if os.path.isfile(saved_ensemble_path): + with open(saved_ensemble_path, 'r') as file: + saved_data = json.load(file) + + if saved_data: + self.load_saved_settings(saved_data, process_method) + + #--Processing Methods-- + + def process_input_selections(self): + """Grabbing all audio files from selected directories.""" + + input_list = [] + + ext = FFMPEG_EXT if not self.is_accept_any_input_var.get() else ANY_EXT + + for i in self.inputPaths: + if os.path.isfile(i): + if i.endswith(ext): + input_list.append(i) + for root, dirs, files in os.walk(i): + for file in files: + if file.endswith(ext): + file = os.path.join(root, file) + if os.path.isfile(file): + input_list.append(file) + + self.inputPaths = tuple(input_list) + + def process_check_wav_type(self): + if self.wav_type_set_var.get() == '32-bit Float': + self.wav_type_set = 'FLOAT' + elif self.wav_type_set_var.get() == '64-bit Float':# + self.wav_type_set = 'FLOAT' if not self.save_format_var.get() == WAV else 'DOUBLE' + else: + self.wav_type_set = self.wav_type_set_var.get() + + def process_preliminary_checks(self): + """Verifies a valid model is chosen""" + + self.process_check_wav_type() + + if self.chosen_process_method_var.get() == ENSEMBLE_MODE: + continue_process = lambda:False if len(self.ensemble_listbox_get_all_selected_models()) <= 1 else True + if self.chosen_process_method_var.get() == VR_ARCH_PM: + continue_process = lambda:False if self.vr_model_var.get() == CHOOSE_MODEL else True + if self.chosen_process_method_var.get() == MDX_ARCH_TYPE: + continue_process = lambda:False if self.mdx_net_model_var.get() == CHOOSE_MODEL else True + if self.chosen_process_method_var.get() == DEMUCS_ARCH_TYPE: + continue_process = lambda:False if self.demucs_model_var.get() == CHOOSE_MODEL else True + + return continue_process() + + def process_storage_check(self): + """Verifies storage requirments""" + + total, used, free = shutil.disk_usage("/") + + space_details = f'Detected Total Space: {int(total/1.074e+9)} GB\'s\n' +\ + f'Detected Used Space: {int(used/1.074e+9)} GB\'s\n' +\ + f'Detected Free Space: {int(free/1.074e+9)} GB\'s\n' + + appropriate_storage = True + + if int(free/1.074e+9) <= int(2): + self.error_dialoge([STORAGE_ERROR[0], f'{STORAGE_ERROR[1]}{space_details}']) + appropriate_storage = False + + if int(free/1.074e+9) in [3, 4, 5, 6, 7, 8]: + appropriate_storage = self.message_box([STORAGE_WARNING[0], f'{STORAGE_WARNING[1]}{space_details}{CONFIRM_WARNING}']) + + return appropriate_storage + + def process_initialize(self): + """Verifies the input/output directories are valid and prepares to thread the main process.""" + + if not ( + self.chosen_process_method_var.get() == AUDIO_TOOLS + and self.chosen_audio_tool_var.get() in [ALIGN_INPUTS, MATCH_INPUTS] + and self.fileOneEntry_var.get() + and self.fileTwoEntry_var.get() + ) and not ( + self.inputPaths and os.path.isfile(self.inputPaths[0]) + ): + self.error_dialoge(INVALID_INPUT) + return + + + if not os.path.isdir(self.export_path_var.get()): + self.error_dialoge(INVALID_EXPORT) + return + + if not self.process_storage_check(): + return + + if self.chosen_process_method_var.get() != AUDIO_TOOLS: + if not self.process_preliminary_checks(): + error_msg = INVALID_ENSEMBLE if self.chosen_process_method_var.get() == ENSEMBLE_MODE else INVALID_MODEL + self.error_dialoge(error_msg) + return + target_function = self.process_start + else: + target_function = self.process_tool_start + + self.active_processing_thread = KThread(target=target_function) + self.active_processing_thread.start() + + def process_button_init(self): + self.auto_save() + self.conversion_Button_Text_var.set(WAIT_PROCESSING) + self.conversion_Button.configure(state=tk.DISABLED) + self.command_Text.clear() + + def process_get_baseText(self, total_files, file_num, is_dual=False): + """Create the base text for the command widget""" + + init_text = 'Files' if is_dual else 'File' + + text = '{init_text} {file_num}/{total_files} '.format(init_text=init_text, + file_num=file_num, + total_files=total_files) + + return text + + def process_update_progress(self, total_files, step: float = 1): + """Calculate the progress for the progress widget in the GUI""" + + total_count = self.true_model_count * total_files + base = (100 / total_count) + progress = base * self.iteration - base + progress += base * step + + self.progress_bar_main_var.set(progress) + + self.conversion_Button_Text_var.set(f'Process Progress: {int(progress)}%') + + def confirm_stop_process(self): + """Asks for confirmation before halting active process""" + + self.auto_save() + + if self.thread_check(self.active_processing_thread): + confirm = messagebox.askyesno(parent=root, title=STOP_PROCESS_CONFIRM[0], message=STOP_PROCESS_CONFIRM[1]) + + if confirm: + try: + self.active_processing_thread.terminate() + finally: + self.is_process_stopped = True + self.command_Text.write(PROCESS_STOPPED_BY_USER) + else: + self.clear_cache_torch = True + + def process_end(self, error=None): + """End of process actions""" + + self.auto_save() + self.cached_sources_clear() + self.clear_cache_torch = True + self.conversion_Button_Text_var.set(START_PROCESSING) + self.conversion_Button.configure(state=tk.NORMAL) + self.progress_bar_main_var.set(0) + + if error: + error_message_box_text = f'{error_dialouge(error)}{ERROR_OCCURED[1]}' + confirm = messagebox.askyesno(parent=root, + title=ERROR_OCCURED[0], + message=error_message_box_text) + + if confirm: + self.is_confirm_error_var.set(True) + self.clear_cache_torch = True + + self.clear_cache_torch = True + + if MODEL_MISSING_CHECK in error_message_box_text: + self.update_checkbox_text() + + def process_tool_start(self): + """Start the conversion for all the given mp3 and wav files""" + + def time_elapsed(): + return f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}' + + def get_audio_file_base(audio_file): + if audio_tool.audio_tool == MANUAL_ENSEMBLE: + return f'{os.path.splitext(os.path.basename(inputPaths[0]))[0]}' + elif audio_tool.audio_tool in [ALIGN_INPUTS, MATCH_INPUTS]: + return f'{os.path.splitext(os.path.basename(audio_file[0]))[0]}' + else: + return f'{os.path.splitext(os.path.basename(audio_file))[0]}' + + def handle_ensemble(inputPaths, audio_file_base): + self.progress_bar_main_var.set(50) + if self.choose_algorithm_var.get() == COMBINE_INPUTS: + audio_tool.combine_audio(inputPaths, audio_file_base) + else: + audio_tool.ensemble_manual(inputPaths, audio_file_base) + self.progress_bar_main_var.set(100) + self.command_Text.write(DONE) + + def handle_alignment_match(audio_file, audio_file_base, command_Text, set_progress_bar): + audio_file_2_base = f'{os.path.splitext(os.path.basename(audio_file[1]))[0]}' + if audio_tool.audio_tool == MATCH_INPUTS: + audio_tool.match_inputs(audio_file, audio_file_base, command_Text) + else: + command_Text(f"{PROCESS_STARTING_TEXT}\n") + audio_tool.align_inputs(audio_file, audio_file_base, audio_file_2_base, command_Text, set_progress_bar) + self.progress_bar_main_var.set(base * file_num) + self.command_Text.write(f"{DONE}\n") + + def handle_pitch_time_shift(audio_file, audio_file_base): + audio_tool.pitch_or_time_shift(audio_file, audio_file_base) + self.progress_bar_main_var.set(base * file_num) + self.command_Text.write(DONE) + + multiple_files = False + stime = time.perf_counter() + self.process_button_init() + inputPaths = self.inputPaths + is_verified_audio = True + is_dual = False + is_model_sample_mode = self.model_sample_mode_var.get() + self.iteration = 0 + self.true_model_count = 1 + self.process_check_wav_type() + process_complete_text = PROCESS_COMPLETE + + if self.chosen_audio_tool_var.get() in [ALIGN_INPUTS, MATCH_INPUTS]: + if self.DualBatch_inputPaths: + inputPaths = tuple(self.DualBatch_inputPaths) + else: + if not self.fileOneEntry_Full_var.get() or not self.fileTwoEntry_Full_var.get(): + self.command_Text.write(NOT_ENOUGH_ERROR_TEXT) + self.process_end() + return + else: + inputPaths = [(self.fileOneEntry_Full_var.get(), self.fileTwoEntry_Full_var.get())] + + try: + total_files = len(inputPaths) + if self.chosen_audio_tool_var.get() == TIME_STRETCH: + audio_tool = AudioTools(TIME_STRETCH) + self.progress_bar_main_var.set(2) + elif self.chosen_audio_tool_var.get() == CHANGE_PITCH: + audio_tool = AudioTools(CHANGE_PITCH) + self.progress_bar_main_var.set(2) + elif self.chosen_audio_tool_var.get() == MANUAL_ENSEMBLE: + if self.chosen_audio_tool_var.get() == MANUAL_ENSEMBLE: + audio_tool = Ensembler(is_manual_ensemble=True) + multiple_files = True + if total_files <= 1: + self.command_Text.write(NOT_ENOUGH_ERROR_TEXT) + self.process_end() + return + elif self.chosen_audio_tool_var.get() in [ALIGN_INPUTS, MATCH_INPUTS]: + audio_tool = AudioTools(self.chosen_audio_tool_var.get()) + self.progress_bar_main_var.set(2) + is_dual = True + + for file_num, audio_file in enumerate(inputPaths, start=1): + self.iteration += 1 + base = (100 / total_files) + audio_file_base = get_audio_file_base(audio_file) + self.base_text = self.process_get_baseText(total_files=total_files, file_num=total_files if multiple_files else file_num, is_dual=is_dual) + command_Text = lambda text: self.command_Text.write(self.base_text + text) + + set_progress_bar = lambda step, inference_iterations=0:self.process_update_progress(total_files=total_files, step=(step + (inference_iterations))) + + if not self.verify_audio(audio_file): + error_text_console = f'{self.base_text}"{os.path.basename(audio_file)}\" {MISSING_MESS_TEXT}\n' + if total_files >= 2: + self.command_Text.write(f'\n{error_text_console}') + is_verified_audio = False + continue + + audio_tool_action = audio_tool.audio_tool + if audio_tool_action not in [MANUAL_ENSEMBLE, ALIGN_INPUTS, MATCH_INPUTS]: + audio_file = self.create_sample(audio_file) if is_model_sample_mode else audio_file + self.command_Text.write(f'{NEW_LINE if file_num != 1 else NO_LINE}{self.base_text}"{os.path.basename(audio_file)}\".{NEW_LINES}') + elif audio_tool_action in [ALIGN_INPUTS, MATCH_INPUTS]: + text_write = ("File 1", "File 2") if audio_tool_action == ALIGN_INPUTS else ("Target", "Reference") + if audio_file[0] != audio_file[1]: + self.command_Text.write(f'{self.base_text}{text_write[0]}: "{os.path.basename(audio_file[0])}"{NEW_LINE}') + self.command_Text.write(f'{self.base_text}{text_write[1]}: "{os.path.basename(audio_file[1])}"{NEW_LINES}') + else: + self.command_Text.write(f'{self.base_text}{text_write[0]} & {text_write[1]} {SIMILAR_TEXT}{NEW_LINES}') + continue + elif audio_tool_action == MANUAL_ENSEMBLE: + for n, i in enumerate(inputPaths): + self.command_Text.write(f'File {n+1} "{os.path.basename(i)}"{NEW_LINE}') + self.command_Text.write(NEW_LINE) + + is_verified_audio = True + + if not audio_tool_action in [ALIGN_INPUTS, MATCH_INPUTS]: + command_Text(PROCESS_STARTING_TEXT) + + if audio_tool_action == MANUAL_ENSEMBLE: + handle_ensemble(inputPaths, audio_file_base) + break + if audio_tool_action in [ALIGN_INPUTS, MATCH_INPUTS]: + process_complete_text = PROCESS_COMPLETE_2 + handle_alignment_match(audio_file, audio_file_base, command_Text, set_progress_bar) + if audio_tool_action in [TIME_STRETCH, CHANGE_PITCH]: + handle_pitch_time_shift(audio_file, audio_file_base) + + if total_files == 1 and not is_verified_audio: + self.command_Text.write(f'{error_text_console}\n{PROCESS_FAILED}') + self.command_Text.write(time_elapsed()) + playsound(FAIL_CHIME) if self.is_task_complete_var.get() else None + else: + self.command_Text.write('{}{}'.format(process_complete_text, time_elapsed())) + playsound(COMPLETE_CHIME) if self.is_task_complete_var.get() else None + + self.process_end() + + except Exception as e: + self.error_log_var.set(error_text(self.chosen_audio_tool_var.get(), e)) + self.command_Text.write(f'\n\n{PROCESS_FAILED}') + self.command_Text.write(time_elapsed()) + playsound(FAIL_CHIME) if self.is_task_complete_var.get() else None + self.process_end(error=e) + + def process_determine_secondary_model(self, process_method, main_model_primary_stem, is_primary_stem_only=False, is_secondary_stem_only=False): + """Obtains the correct secondary model data for conversion.""" + + secondary_model_scale = None + secondary_model = tk.StringVar(value=NO_MODEL) + + if process_method == VR_ARCH_TYPE: + secondary_model_vars = self.vr_secondary_model_vars + if process_method == MDX_ARCH_TYPE: + secondary_model_vars = self.mdx_secondary_model_vars + if process_method == DEMUCS_ARCH_TYPE: + secondary_model_vars = self.demucs_secondary_model_vars + + if main_model_primary_stem in [VOCAL_STEM, INST_STEM]: + secondary_model = secondary_model_vars["voc_inst_secondary_model"] + secondary_model_scale = secondary_model_vars["voc_inst_secondary_model_scale"].get() + if main_model_primary_stem in [OTHER_STEM, NO_OTHER_STEM]: + secondary_model = secondary_model_vars["other_secondary_model"] + secondary_model_scale = secondary_model_vars["other_secondary_model_scale"].get() + if main_model_primary_stem in [DRUM_STEM, NO_DRUM_STEM]: + secondary_model = secondary_model_vars["drums_secondary_model"] + secondary_model_scale = secondary_model_vars["drums_secondary_model_scale"].get() + if main_model_primary_stem in [BASS_STEM, NO_BASS_STEM]: + secondary_model = secondary_model_vars["bass_secondary_model"] + secondary_model_scale = secondary_model_vars["bass_secondary_model_scale"].get() + + if secondary_model_scale: + secondary_model_scale = float(secondary_model_scale) + + if not secondary_model.get() == NO_MODEL: + secondary_model = ModelData(secondary_model.get(), + is_secondary_model=True, + primary_model_primary_stem=main_model_primary_stem, + is_primary_model_primary_stem_only=is_primary_stem_only, + is_primary_model_secondary_stem_only=is_secondary_stem_only) + if not secondary_model.model_status: + secondary_model = None + else: + secondary_model = None + + return secondary_model, secondary_model_scale + + def process_determine_demucs_pre_proc_model(self, primary_stem=None): + """Obtains the correct pre-process secondary model data for conversion.""" + + # Check if a pre-process model is set and it's not the 'NO_MODEL' value + if self.demucs_pre_proc_model_var.get() != NO_MODEL and self.is_demucs_pre_proc_model_activate_var.get(): + pre_proc_model = ModelData(self.demucs_pre_proc_model_var.get(), + primary_model_primary_stem=primary_stem, + is_pre_proc_model=True) + + # Return the model if it's valid + if pre_proc_model.model_status: + return pre_proc_model + + return None + + def process_determine_vocal_split_model(self): + """Obtains the correct vocal splitter secondary model data for conversion.""" + + # Check if a vocal splitter model is set and if it's not the 'NO_MODEL' value + if self.set_vocal_splitter_var.get() != NO_MODEL and self.is_set_vocal_splitter_var.get(): + vocal_splitter_model = ModelData(self.set_vocal_splitter_var.get(), is_vocal_split_model=True) + + # Return the model if it's valid + if vocal_splitter_model.model_status: + return vocal_splitter_model + + return None + + def check_only_selection_stem(self, checktype): + + chosen_method = self.chosen_process_method_var.get() + is_demucs = chosen_method == DEMUCS_ARCH_TYPE# + + stem_primary_label = self.is_primary_stem_only_Demucs_Text_var.get() if is_demucs else self.is_primary_stem_only_Text_var.get() + stem_primary_bool = self.is_primary_stem_only_Demucs_var.get() if is_demucs else self.is_primary_stem_only_var.get() + stem_secondary_label = self.is_secondary_stem_only_Demucs_Text_var.get() if is_demucs else self.is_secondary_stem_only_Text_var.get() + stem_secondary_bool = self.is_secondary_stem_only_Demucs_var.get() if is_demucs else self.is_secondary_stem_only_var.get() + + if checktype == VOCAL_STEM_ONLY: + return not ( + (not VOCAL_STEM_ONLY == stem_primary_label and stem_primary_bool) or + (not VOCAL_STEM_ONLY in stem_secondary_label and stem_secondary_bool) + ) + elif checktype == INST_STEM_ONLY: + return ( + (INST_STEM_ONLY == stem_primary_label and stem_primary_bool and self.is_save_inst_set_vocal_splitter_var.get() and self.set_vocal_splitter_var.get() != NO_MODEL) or + (INST_STEM_ONLY == stem_secondary_label and stem_secondary_bool and self.is_save_inst_set_vocal_splitter_var.get() and self.set_vocal_splitter_var.get() != NO_MODEL) + ) + elif checktype == IS_SAVE_VOC_ONLY: + return ( + (VOCAL_STEM_ONLY == stem_primary_label and stem_primary_bool) or + (VOCAL_STEM_ONLY == stem_secondary_label and stem_secondary_bool) + ) + elif checktype == IS_SAVE_INST_ONLY: + return ( + (INST_STEM_ONLY == stem_primary_label and stem_primary_bool) or + (INST_STEM_ONLY == stem_secondary_label and stem_secondary_bool) + ) + + def determine_voc_split(self, models): + is_vocal_active = self.check_only_selection_stem(VOCAL_STEM_ONLY) or self.check_only_selection_stem(INST_STEM_ONLY) + + if self.set_vocal_splitter_var.get() != NO_MODEL and self.is_set_vocal_splitter_var.get() and is_vocal_active: + model_stems_list = self.model_list(VOCAL_STEM, INST_STEM, is_dry_check=True, is_check_vocal_split=True) + if any(model.model_basename in model_stems_list for model in models): + return 1 + + return 0 + + def process_start(self): + """Start the conversion for all the given mp3 and wav files""" + + stime = time.perf_counter() + time_elapsed = lambda:f'Time Elapsed: {time.strftime("%H:%M:%S", time.gmtime(int(time.perf_counter() - stime)))}' + export_path = self.export_path_var.get() + is_ensemble = False + self.true_model_count = 0 + self.iteration = 0 + is_verified_audio = True + self.process_button_init() + inputPaths = self.inputPaths + inputPath_total_len = len(inputPaths) + is_model_sample_mode = self.model_sample_mode_var.get() + + try: + if self.chosen_process_method_var.get() == ENSEMBLE_MODE: + model, ensemble = self.assemble_model_data(), Ensembler() + export_path, is_ensemble = ensemble.ensemble_folder_name, True + if self.chosen_process_method_var.get() == VR_ARCH_PM: + model = self.assemble_model_data(self.vr_model_var.get(), VR_ARCH_TYPE) + if self.chosen_process_method_var.get() == MDX_ARCH_TYPE: + model = self.assemble_model_data(self.mdx_net_model_var.get(), MDX_ARCH_TYPE) + if self.chosen_process_method_var.get() == DEMUCS_ARCH_TYPE: + model = self.assemble_model_data(self.demucs_model_var.get(), DEMUCS_ARCH_TYPE) + + print(1,export_path, is_ensemble) + self.cached_source_model_list_check(model) + + true_model_4_stem_count = sum(m.demucs_4_stem_added_count if m.process_method == DEMUCS_ARCH_TYPE else 0 for m in model) + true_model_pre_proc_model_count = sum(2 if m.pre_proc_model_activated else 0 for m in model) + self.true_model_count = sum(2 if m.is_secondary_model_activated else 1 for m in model) + true_model_4_stem_count + true_model_pre_proc_model_count + self.determine_voc_split(model) + + #print("self.true_model_count", self.true_model_count) + + for file_num, audio_file in enumerate(inputPaths, start=1): + self.cached_sources_clear() + base_text = self.process_get_baseText(total_files=inputPath_total_len, file_num=file_num) + + if self.verify_audio(audio_file): + audio_file = self.create_sample(audio_file) if is_model_sample_mode else audio_file + self.command_Text.write(f'{NEW_LINE if not file_num ==1 else NO_LINE}{base_text}"{os.path.basename(audio_file)}\".{NEW_LINES}') + is_verified_audio = True + else: + error_text_console = f'{base_text}"{os.path.basename(audio_file)}\" {MISSING_MESS_TEXT}\n' + self.command_Text.write(f'\n{error_text_console}') if inputPath_total_len >= 2 else None + self.iteration += self.true_model_count + is_verified_audio = False + continue + + print('assemble_model_data',model) + for current_model_num, current_model in enumerate(model, start=1): + self.iteration += 1 + + if is_ensemble: + self.command_Text.write(f'Ensemble Mode - {current_model.model_basename} - Model {current_model_num}/{len(model)}{NEW_LINES}') + + model_name_text = f'({current_model.model_basename})' if not is_ensemble else '' + self.command_Text.write(base_text + f'{LOADING_MODEL_TEXT} {model_name_text}...') + + set_progress_bar = lambda step, inference_iterations=0:self.process_update_progress(total_files=inputPath_total_len, step=(step + (inference_iterations))) + write_to_console = lambda progress_text, base_text=base_text:self.command_Text.write(base_text + progress_text) + + audio_file_base = f"{file_num}_{os.path.splitext(os.path.basename(audio_file))[0]}" + audio_file_base = audio_file_base if not self.is_testing_audio_var.get() or is_ensemble else f"{round(time.time())}_{audio_file_base}" + audio_file_base = audio_file_base if not is_ensemble else f"{audio_file_base}_{current_model.model_basename}" + if not is_ensemble: + audio_file_base = audio_file_base if not self.is_add_model_name_var.get() else f"{audio_file_base}_{current_model.model_basename}" + + if self.is_create_model_folder_var.get() and not is_ensemble: + export_path = os.path.join(Path(self.export_path_var.get()), current_model.model_basename, os.path.splitext(os.path.basename(audio_file))[0]) + if not os.path.isdir(export_path):os.makedirs(export_path) + + process_data = { + 'model_data': current_model, + 'export_path': export_path, + 'audio_file_base': audio_file_base, + 'audio_file': audio_file, + 'set_progress_bar': set_progress_bar, + 'write_to_console': write_to_console, + 'process_iteration': self.process_iteration, + 'cached_source_callback': self.cached_source_callback, + 'cached_model_source_holder': self.cached_model_source_holder, + 'list_all_models': self.all_models, + 'is_ensemble_master': is_ensemble, + 'is_4_stem_ensemble': True if self.ensemble_main_stem_var.get() in [FOUR_STEM_ENSEMBLE, MULTI_STEM_ENSEMBLE] and is_ensemble else False} + + print('process_data',process_data) + if current_model.process_method == VR_ARCH_TYPE: + seperator = SeperateVR(current_model, process_data) + if current_model.process_method == MDX_ARCH_TYPE: + seperator = SeperateMDXC(current_model, process_data) if current_model.is_mdx_c else SeperateMDX(current_model, process_data) + if current_model.process_method == DEMUCS_ARCH_TYPE: + seperator = SeperateDemucs(current_model, process_data) + + seperator.seperate() + + if is_ensemble: + self.command_Text.write('\n') + + if is_ensemble: + + audio_file_base = audio_file_base.replace(f"_{current_model.model_basename}","") + self.command_Text.write(base_text + ENSEMBLING_OUTPUTS) + + if self.ensemble_main_stem_var.get() in [FOUR_STEM_ENSEMBLE, MULTI_STEM_ENSEMBLE]: + stem_list = extract_stems(audio_file_base, export_path) + for output_stem in stem_list: + ensemble.ensemble_outputs(audio_file_base, export_path, output_stem, is_4_stem=True) + else: + if not self.is_secondary_stem_only_var.get(): + ensemble.ensemble_outputs(audio_file_base, export_path, PRIMARY_STEM) + if not self.is_primary_stem_only_var.get(): + ensemble.ensemble_outputs(audio_file_base, export_path, SECONDARY_STEM) + ensemble.ensemble_outputs(audio_file_base, export_path, SECONDARY_STEM, is_inst_mix=True) + + self.command_Text.write(DONE) + + if is_model_sample_mode: + if os.path.isfile(audio_file): + os.remove(audio_file) + + clear_gpu_cache() + + shutil.rmtree(export_path) if is_ensemble and len(os.listdir(export_path)) == 0 else None + + if inputPath_total_len == 1 and not is_verified_audio: + self.command_Text.write(f'{error_text_console}\n{PROCESS_FAILED}') + self.command_Text.write(time_elapsed()) + playsound(FAIL_CHIME) if self.is_task_complete_var.get() else None + else: + set_progress_bar(1.0) + self.command_Text.write(PROCESS_COMPLETE) + self.command_Text.write(time_elapsed()) + playsound(COMPLETE_CHIME) if self.is_task_complete_var.get() else None + + self.process_end() + + except Exception as e: + self.error_log_var.set("{}{}".format(error_text(self.chosen_process_method_var.get(), e), self.get_settings_list())) + self.command_Text.write(f'\n\n{PROCESS_FAILED}') + self.command_Text.write(time_elapsed()) + playsound(FAIL_CHIME) if self.is_task_complete_var.get() else None + self.process_end(error=e) + + #--Varible Methods-- + + def load_to_default_confirm(self): + """Reset settings confirmation after asking for confirmation""" + if self.thread_check(self.active_processing_thread): + self.error_dialogue(SET_TO_DEFAULT_PROCESS_ERROR) + return + + confirm = messagebox.askyesno( + parent=root, + title=RESET_ALL_TO_DEFAULT_WARNING[0], + message=RESET_ALL_TO_DEFAULT_WARNING[1] + ) + if not confirm: + return + + self.load_saved_settings(DEFAULT_DATA, is_default_reset=True) + self.update_checkbox_text() + + if self.pre_proc_model_toggle is not None and self.is_open_menu_advanced_demucs_options.get(): + self.pre_proc_model_toggle() + + if (self.change_state_lambda is not None and ( + self.is_open_menu_advanced_vr_options.get() or + self.is_open_menu_advanced_mdx_options.get() or + self.is_open_menu_advanced_demucs_options.get() + )): + self.change_state_lambda() + + def load_saved_vars(self, data): + """Initializes primary Tkinter vars""" + + for key, value in DEFAULT_DATA.items(): + if not key in data.keys(): + data = {**data, **{key:value}} + data['batch_size'] = DEF_OPT + + ## ADD_BUTTON + self.chosen_process_method_var = tk.StringVar(value=data['chosen_process_method']) + + #VR Architecture Vars + self.vr_model_var = tk.StringVar(value=data['vr_model']) + self.aggression_setting_var = tk.StringVar(value=data['aggression_setting']) + self.window_size_var = tk.StringVar(value=data['window_size']) + self.mdx_segment_size_var = tk.StringVar(value=data['mdx_segment_size']) + self.batch_size_var = tk.StringVar(value=data['batch_size']) + self.crop_size_var = tk.StringVar(value=data['crop_size']) + self.is_tta_var = tk.BooleanVar(value=data['is_tta']) + self.is_output_image_var = tk.BooleanVar(value=data['is_output_image']) + self.is_post_process_var = tk.BooleanVar(value=data['is_post_process']) + self.is_high_end_process_var = tk.BooleanVar(value=data['is_high_end_process']) + self.post_process_threshold_var = tk.StringVar(value=data['post_process_threshold']) + self.vr_voc_inst_secondary_model_var = tk.StringVar(value=data['vr_voc_inst_secondary_model']) + self.vr_other_secondary_model_var = tk.StringVar(value=data['vr_other_secondary_model']) + self.vr_bass_secondary_model_var = tk.StringVar(value=data['vr_bass_secondary_model']) + self.vr_drums_secondary_model_var = tk.StringVar(value=data['vr_drums_secondary_model']) + self.vr_is_secondary_model_activate_var = tk.BooleanVar(value=data['vr_is_secondary_model_activate']) + self.vr_voc_inst_secondary_model_scale_var = tk.StringVar(value=data['vr_voc_inst_secondary_model_scale']) + self.vr_other_secondary_model_scale_var = tk.StringVar(value=data['vr_other_secondary_model_scale']) + self.vr_bass_secondary_model_scale_var = tk.StringVar(value=data['vr_bass_secondary_model_scale']) + self.vr_drums_secondary_model_scale_var = tk.StringVar(value=data['vr_drums_secondary_model_scale']) + + #Demucs Vars + self.demucs_model_var = tk.StringVar(value=data['demucs_model']) + self.segment_var = tk.StringVar(value=data['segment']) + self.overlap_var = tk.StringVar(value=data['overlap']) + self.overlap_mdx_var = tk.StringVar(value=data['overlap_mdx']) + self.overlap_mdx23_var = tk.StringVar(value=data['overlap_mdx23']) + self.shifts_var = tk.StringVar(value=data['shifts']) + self.chunks_demucs_var = tk.StringVar(value=data['chunks_demucs']) + self.margin_demucs_var = tk.StringVar(value=data['margin_demucs']) + self.is_chunk_demucs_var = tk.BooleanVar(value=data['is_chunk_demucs']) + self.is_chunk_mdxnet_var = tk.BooleanVar(value=False) + self.is_primary_stem_only_Demucs_var = tk.BooleanVar(value=data['is_primary_stem_only_Demucs']) + self.is_secondary_stem_only_Demucs_var = tk.BooleanVar(value=data['is_secondary_stem_only_Demucs']) + self.is_split_mode_var = tk.BooleanVar(value=data['is_split_mode']) + self.is_demucs_combine_stems_var = tk.BooleanVar(value=data['is_demucs_combine_stems'])#is_mdx23_combine_stems + self.is_mdx23_combine_stems_var = tk.BooleanVar(value=data['is_mdx23_combine_stems']) + self.demucs_voc_inst_secondary_model_var = tk.StringVar(value=data['demucs_voc_inst_secondary_model']) + self.demucs_other_secondary_model_var = tk.StringVar(value=data['demucs_other_secondary_model']) + self.demucs_bass_secondary_model_var = tk.StringVar(value=data['demucs_bass_secondary_model']) + self.demucs_drums_secondary_model_var = tk.StringVar(value=data['demucs_drums_secondary_model']) + self.demucs_is_secondary_model_activate_var = tk.BooleanVar(value=data['demucs_is_secondary_model_activate']) + self.demucs_voc_inst_secondary_model_scale_var = tk.StringVar(value=data['demucs_voc_inst_secondary_model_scale']) + self.demucs_other_secondary_model_scale_var = tk.StringVar(value=data['demucs_other_secondary_model_scale']) + self.demucs_bass_secondary_model_scale_var = tk.StringVar(value=data['demucs_bass_secondary_model_scale']) + self.demucs_drums_secondary_model_scale_var = tk.StringVar(value=data['demucs_drums_secondary_model_scale']) + self.demucs_pre_proc_model_var = tk.StringVar(value=data['demucs_pre_proc_model']) + self.is_demucs_pre_proc_model_activate_var = tk.BooleanVar(value=data['is_demucs_pre_proc_model_activate']) + self.is_demucs_pre_proc_model_inst_mix_var = tk.BooleanVar(value=data['is_demucs_pre_proc_model_inst_mix']) + + #MDX-Net Vars + self.mdx_net_model_var = tk.StringVar(value=data['mdx_net_model']) + self.chunks_var = tk.StringVar(value=data['chunks']) + self.margin_var = tk.StringVar(value=data['margin']) + self.compensate_var = tk.StringVar(value=data['compensate']) + self.denoise_option_var = tk.StringVar(value=data['denoise_option'])# + self.phase_option_var = tk.StringVar(value=data['phase_option'])# + self.phase_shifts_var = tk.StringVar(value=data['phase_shifts'])# + self.is_save_align_var = tk.BooleanVar(value=data['is_save_align'])#, + self.is_match_silence_var = tk.BooleanVar(value=data['is_match_silence'])# + self.is_spec_match_var = tk.BooleanVar(value=data['is_spec_match'])# + self.is_match_frequency_pitch_var = tk.BooleanVar(value=data['is_match_frequency_pitch'])# + self.is_mdx_c_seg_def_var = tk.BooleanVar(value=data['is_mdx_c_seg_def'])# + self.is_invert_spec_var = tk.BooleanVar(value=data['is_invert_spec'])# + self.is_deverb_vocals_var = tk.BooleanVar(value=data['is_deverb_vocals'])# + self.deverb_vocal_opt_var = tk.StringVar(value=data['deverb_vocal_opt'])# + self.voc_split_save_opt_var = tk.StringVar(value=data['voc_split_save_opt'])# + self.is_mixer_mode_var = tk.BooleanVar(value=data['is_mixer_mode']) + self.mdx_batch_size_var = tk.StringVar(value=data['mdx_batch_size']) + self.mdx_voc_inst_secondary_model_var = tk.StringVar(value=data['mdx_voc_inst_secondary_model']) + self.mdx_other_secondary_model_var = tk.StringVar(value=data['mdx_other_secondary_model']) + self.mdx_bass_secondary_model_var = tk.StringVar(value=data['mdx_bass_secondary_model']) + self.mdx_drums_secondary_model_var = tk.StringVar(value=data['mdx_drums_secondary_model']) + self.mdx_is_secondary_model_activate_var = tk.BooleanVar(value=data['mdx_is_secondary_model_activate']) + self.mdx_voc_inst_secondary_model_scale_var = tk.StringVar(value=data['mdx_voc_inst_secondary_model_scale']) + self.mdx_other_secondary_model_scale_var = tk.StringVar(value=data['mdx_other_secondary_model_scale']) + self.mdx_bass_secondary_model_scale_var = tk.StringVar(value=data['mdx_bass_secondary_model_scale']) + self.mdx_drums_secondary_model_scale_var = tk.StringVar(value=data['mdx_drums_secondary_model_scale']) + self.is_mdxnet_c_model_var = tk.BooleanVar(value=False) + + #Ensemble Vars + self.is_save_all_outputs_ensemble_var = tk.BooleanVar(value=data['is_save_all_outputs_ensemble']) + self.is_append_ensemble_name_var = tk.BooleanVar(value=data['is_append_ensemble_name']) + + #Audio Tool Vars + self.chosen_audio_tool_var = tk.StringVar(value=data['chosen_audio_tool']) + self.choose_algorithm_var = tk.StringVar(value=data['choose_algorithm']) + self.time_stretch_rate_var = tk.StringVar(value=data['time_stretch_rate']) + self.pitch_rate_var = tk.StringVar(value=data['pitch_rate']) + self.is_time_correction_var = tk.BooleanVar(value=data['is_time_correction']) + + #Shared Vars + self.semitone_shift_var = tk.StringVar(value=data['semitone_shift']) + self.mp3_bit_set_var = tk.StringVar(value=data['mp3_bit_set']) + self.save_format_var = tk.StringVar(value=data['save_format']) + self.wav_type_set_var = tk.StringVar(value=data['wav_type_set'])# + self.device_set_var = tk.StringVar(value=data['device_set'])# + self.user_code_var = tk.StringVar(value=data['user_code']) + self.is_gpu_conversion_var = tk.BooleanVar(value=data['is_gpu_conversion']) + self.is_primary_stem_only_var = tk.BooleanVar(value=data['is_primary_stem_only']) + self.is_secondary_stem_only_var = tk.BooleanVar(value=data['is_secondary_stem_only']) + self.is_testing_audio_var = tk.BooleanVar(value=data['is_testing_audio'])# + self.is_auto_update_model_params_var = tk.BooleanVar(value=True)# + self.is_auto_update_model_params = data['is_auto_update_model_params'] + self.is_add_model_name_var = tk.BooleanVar(value=data['is_add_model_name']) + self.is_accept_any_input_var = tk.BooleanVar(value=data['is_accept_any_input']) + self.is_task_complete_var = tk.BooleanVar(value=data['is_task_complete']) + self.is_normalization_var = tk.BooleanVar(value=data['is_normalization'])# + self.is_use_opencl_var = tk.BooleanVar(value=False)#True if is_opencl_only else data['is_use_opencl'])# + self.is_wav_ensemble_var = tk.BooleanVar(value=data['is_wav_ensemble'])# + self.is_create_model_folder_var = tk.BooleanVar(value=data['is_create_model_folder']) + self.help_hints_var = tk.BooleanVar(value=data['help_hints_var']) + self.model_sample_mode_var = tk.BooleanVar(value=data['model_sample_mode']) + self.model_sample_mode_duration_var = tk.StringVar(value=data['model_sample_mode_duration']) + self.model_sample_mode_duration_checkbox_var = tk.StringVar(value=SAMPLE_MODE_CHECKBOX(self.model_sample_mode_duration_var.get())) + self.model_sample_mode_duration_label_var = tk.StringVar(value=f'{self.model_sample_mode_duration_var.get()} Seconds') + self.set_vocal_splitter_var = tk.StringVar(value=data['set_vocal_splitter']) + self.is_set_vocal_splitter_var = tk.BooleanVar(value=data['is_set_vocal_splitter'])# + self.is_save_inst_set_vocal_splitter_var = tk.BooleanVar(value=data['is_save_inst_set_vocal_splitter'])# + + #Path Vars + self.export_path_var = tk.StringVar(value=data['export_path']) + self.inputPaths = data['input_paths'] + self.lastDir = data['lastDir'] + + #DualPaths-Align + self.time_window_var = tk.StringVar(value=data['time_window'])# + self.intro_analysis_var = tk.StringVar(value=data['intro_analysis']) + self.db_analysis_var = tk.StringVar(value=data['db_analysis']) + + self.fileOneEntry_var = tk.StringVar(value=data['fileOneEntry']) + self.fileOneEntry_Full_var = tk.StringVar(value=data['fileOneEntry_Full']) + self.fileTwoEntry_var = tk.StringVar(value=data['fileTwoEntry']) + self.fileTwoEntry_Full_var = tk.StringVar(value=data['fileTwoEntry_Full']) + self.DualBatch_inputPaths = data['DualBatch_inputPaths'] + + def load_saved_settings(self, loaded_setting: dict, process_method=None, is_default_reset=False): + """Loads user saved application settings or resets to default""" + + for key, value in DEFAULT_DATA.items(): + if not key in loaded_setting.keys(): + loaded_setting = {**loaded_setting, **{key:value}} + loaded_setting['batch_size'] = DEF_OPT + + is_default_reset = True if process_method == ENSEMBLE_MODE or is_default_reset else False + + if process_method == VR_ARCH_PM or is_default_reset: + self.vr_model_var.set(loaded_setting['vr_model']) + self.aggression_setting_var.set(loaded_setting['aggression_setting']) + self.window_size_var.set(loaded_setting['window_size']) + self.mdx_segment_size_var.set(loaded_setting['mdx_segment_size']) + self.batch_size_var.set(loaded_setting['batch_size']) + self.crop_size_var.set(loaded_setting['crop_size']) + self.is_tta_var.set(loaded_setting['is_tta']) + self.is_output_image_var.set(loaded_setting['is_output_image']) + self.is_post_process_var.set(loaded_setting['is_post_process']) + self.is_high_end_process_var.set(loaded_setting['is_high_end_process']) + self.post_process_threshold_var.set(loaded_setting['post_process_threshold']) + self.vr_voc_inst_secondary_model_var.set(loaded_setting['vr_voc_inst_secondary_model']) + self.vr_other_secondary_model_var.set(loaded_setting['vr_other_secondary_model']) + self.vr_bass_secondary_model_var.set(loaded_setting['vr_bass_secondary_model']) + self.vr_drums_secondary_model_var.set(loaded_setting['vr_drums_secondary_model']) + self.vr_is_secondary_model_activate_var.set(loaded_setting['vr_is_secondary_model_activate']) + self.vr_voc_inst_secondary_model_scale_var.set(loaded_setting['vr_voc_inst_secondary_model_scale']) + self.vr_other_secondary_model_scale_var.set(loaded_setting['vr_other_secondary_model_scale']) + self.vr_bass_secondary_model_scale_var.set(loaded_setting['vr_bass_secondary_model_scale']) + self.vr_drums_secondary_model_scale_var.set(loaded_setting['vr_drums_secondary_model_scale']) + + if process_method == DEMUCS_ARCH_TYPE or is_default_reset: + self.demucs_model_var.set(loaded_setting['demucs_model']) + self.segment_var.set(loaded_setting['segment']) + self.overlap_var.set(loaded_setting['overlap']) + self.shifts_var.set(loaded_setting['shifts']) + self.chunks_demucs_var.set(loaded_setting['chunks_demucs']) + self.margin_demucs_var.set(loaded_setting['margin_demucs']) + self.is_chunk_demucs_var.set(loaded_setting['is_chunk_demucs']) + self.is_chunk_mdxnet_var.set(loaded_setting['is_chunk_mdxnet']) + self.is_primary_stem_only_Demucs_var.set(loaded_setting['is_primary_stem_only_Demucs']) + self.is_secondary_stem_only_Demucs_var.set(loaded_setting['is_secondary_stem_only_Demucs']) + self.is_split_mode_var.set(loaded_setting['is_split_mode']) + self.is_demucs_combine_stems_var.set(loaded_setting['is_demucs_combine_stems'])# + self.is_mdx23_combine_stems_var.set(loaded_setting['is_mdx23_combine_stems'])# + self.demucs_voc_inst_secondary_model_var.set(loaded_setting['demucs_voc_inst_secondary_model']) + self.demucs_other_secondary_model_var.set(loaded_setting['demucs_other_secondary_model']) + self.demucs_bass_secondary_model_var.set(loaded_setting['demucs_bass_secondary_model']) + self.demucs_drums_secondary_model_var.set(loaded_setting['demucs_drums_secondary_model']) + self.demucs_is_secondary_model_activate_var.set(loaded_setting['demucs_is_secondary_model_activate']) + self.demucs_voc_inst_secondary_model_scale_var.set(loaded_setting['demucs_voc_inst_secondary_model_scale']) + self.demucs_other_secondary_model_scale_var.set(loaded_setting['demucs_other_secondary_model_scale']) + self.demucs_bass_secondary_model_scale_var.set(loaded_setting['demucs_bass_secondary_model_scale']) + self.demucs_drums_secondary_model_scale_var.set(loaded_setting['demucs_drums_secondary_model_scale']) + self.demucs_stems_var.set(loaded_setting['demucs_stems']) + self.mdxnet_stems_var.set(loaded_setting['mdx_stems']) + self.update_stem_checkbox_labels(self.demucs_stems_var.get(), demucs=True) + self.demucs_pre_proc_model_var.set(loaded_setting['demucs_pre_proc_model']) + self.is_demucs_pre_proc_model_activate_var.set(loaded_setting['is_demucs_pre_proc_model_activate']) + self.is_demucs_pre_proc_model_inst_mix_var.set(loaded_setting['is_demucs_pre_proc_model_inst_mix']) + + if process_method == MDX_ARCH_TYPE or is_default_reset: + self.mdx_net_model_var.set(loaded_setting['mdx_net_model']) + self.chunks_var.set(loaded_setting['chunks']) + self.margin_var.set(loaded_setting['margin']) + self.compensate_var.set(loaded_setting['compensate']) + self.denoise_option_var.set(loaded_setting['denoise_option']) + self.is_match_frequency_pitch_var.set(loaded_setting['is_match_frequency_pitch'])# + self.overlap_mdx_var.set(loaded_setting['overlap_mdx']) + self.overlap_mdx23_var.set(loaded_setting['overlap_mdx23']) + self.is_mdx_c_seg_def_var.set(loaded_setting['is_mdx_c_seg_def'])# + self.is_invert_spec_var.set(loaded_setting['is_invert_spec'])# + self.is_mixer_mode_var.set(loaded_setting['is_mixer_mode']) + self.mdx_batch_size_var.set(loaded_setting['mdx_batch_size']) + self.mdx_voc_inst_secondary_model_var.set(loaded_setting['mdx_voc_inst_secondary_model']) + self.mdx_other_secondary_model_var.set(loaded_setting['mdx_other_secondary_model']) + self.mdx_bass_secondary_model_var.set(loaded_setting['mdx_bass_secondary_model']) + self.mdx_drums_secondary_model_var.set(loaded_setting['mdx_drums_secondary_model']) + self.mdx_is_secondary_model_activate_var.set(loaded_setting['mdx_is_secondary_model_activate']) + self.mdx_voc_inst_secondary_model_scale_var.set(loaded_setting['mdx_voc_inst_secondary_model_scale']) + self.mdx_other_secondary_model_scale_var.set(loaded_setting['mdx_other_secondary_model_scale']) + self.mdx_bass_secondary_model_scale_var.set(loaded_setting['mdx_bass_secondary_model_scale']) + self.mdx_drums_secondary_model_scale_var.set(loaded_setting['mdx_drums_secondary_model_scale']) + + if is_default_reset: + self.is_save_all_outputs_ensemble_var.set(loaded_setting['is_save_all_outputs_ensemble']) + self.is_append_ensemble_name_var.set(loaded_setting['is_append_ensemble_name']) + self.choose_algorithm_var.set(loaded_setting['choose_algorithm']) + self.time_stretch_rate_var.set(loaded_setting['time_stretch_rate']) + self.pitch_rate_var.set(loaded_setting['pitch_rate'])# + self.is_time_correction_var.set(loaded_setting['is_time_correction'])# + self.is_primary_stem_only_var.set(loaded_setting['is_primary_stem_only']) + self.is_secondary_stem_only_var.set(loaded_setting['is_secondary_stem_only']) + self.is_testing_audio_var.set(loaded_setting['is_testing_audio'])# + self.is_auto_update_model_params_var.set(loaded_setting['is_auto_update_model_params']) + self.is_add_model_name_var.set(loaded_setting['is_add_model_name']) + self.is_accept_any_input_var.set(loaded_setting["is_accept_any_input"]) + self.is_task_complete_var.set(loaded_setting['is_task_complete']) + self.is_create_model_folder_var.set(loaded_setting['is_create_model_folder']) + self.mp3_bit_set_var.set(loaded_setting['mp3_bit_set']) + self.semitone_shift_var.set(loaded_setting['semitone_shift'])# + self.save_format_var.set(loaded_setting['save_format']) + self.wav_type_set_var.set(loaded_setting['wav_type_set'])# + self.device_set_var.set(loaded_setting['device_set'])# + self.user_code_var.set(loaded_setting['user_code']) + self.phase_option_var.set(loaded_setting['phase_option'])# + self.phase_shifts_var.set(loaded_setting['phase_shifts'])# + self.is_save_align_var.set(loaded_setting['is_save_align'])#i + self.time_window_var.set(loaded_setting['time_window'])# + self.is_match_silence_var.set(loaded_setting['is_match_silence'])# + self.is_spec_match_var.set(loaded_setting['is_spec_match'])# + self.intro_analysis_var.set(loaded_setting['intro_analysis'])# + self.db_analysis_var.set(loaded_setting['db_analysis'])# + self.fileOneEntry_var.set(loaded_setting['fileOneEntry'])# + self.fileOneEntry_Full_var.set(loaded_setting['fileOneEntry_Full'])# + self.fileTwoEntry_var.set(loaded_setting['fileTwoEntry'])# + self.fileTwoEntry_Full_var.set(loaded_setting['fileTwoEntry_Full'])# + self.DualBatch_inputPaths = [] + + self.is_gpu_conversion_var.set(loaded_setting['is_gpu_conversion']) + self.is_normalization_var.set(loaded_setting['is_normalization'])# + self.is_use_opencl_var.set(False)#True if is_opencl_only else loaded_setting['is_use_opencl'])# + self.is_wav_ensemble_var.set(loaded_setting['is_wav_ensemble'])# + self.help_hints_var.set(loaded_setting['help_hints_var']) + self.is_wav_ensemble_var.set(loaded_setting['is_wav_ensemble']) + self.set_vocal_splitter_var.set(loaded_setting['set_vocal_splitter']) + self.is_set_vocal_splitter_var.set(loaded_setting['is_set_vocal_splitter'])# + self.is_save_inst_set_vocal_splitter_var.set(loaded_setting['is_save_inst_set_vocal_splitter'])# + self.deverb_vocal_opt_var.set(loaded_setting['deverb_vocal_opt'])# + self.voc_split_save_opt_var.set(loaded_setting['voc_split_save_opt'])# + self.is_deverb_vocals_var.set(loaded_setting['is_deverb_vocals'])# + + self.model_sample_mode_var.set(loaded_setting['model_sample_mode']) + self.model_sample_mode_duration_var.set(loaded_setting['model_sample_mode_duration']) + self.model_sample_mode_duration_checkbox_var.set(SAMPLE_MODE_CHECKBOX(self.model_sample_mode_duration_var.get())) + self.model_sample_mode_duration_label_var.set(f'{self.model_sample_mode_duration_var.get()} Seconds') + + def save_values(self, app_close=True, is_restart=False, is_auto_save=False): + """Saves application data""" + + # -Save Data- + main_settings={ + 'vr_model': self.vr_model_var.get(), + 'aggression_setting': self.aggression_setting_var.get(), + 'window_size': self.window_size_var.get(), + 'mdx_segment_size': self.mdx_segment_size_var.get(), + 'batch_size': self.batch_size_var.get(), + 'crop_size': self.crop_size_var.get(), + 'is_tta': self.is_tta_var.get(), + 'is_output_image': self.is_output_image_var.get(), + 'is_post_process': self.is_post_process_var.get(), + 'is_high_end_process': self.is_high_end_process_var.get(), + 'post_process_threshold': self.post_process_threshold_var.get(), + 'vr_voc_inst_secondary_model': self.vr_voc_inst_secondary_model_var.get(), + 'vr_other_secondary_model': self.vr_other_secondary_model_var.get(), + 'vr_bass_secondary_model': self.vr_bass_secondary_model_var.get(), + 'vr_drums_secondary_model': self.vr_drums_secondary_model_var.get(), + 'vr_is_secondary_model_activate': self.vr_is_secondary_model_activate_var.get(), + 'vr_voc_inst_secondary_model_scale': self.vr_voc_inst_secondary_model_scale_var.get(), + 'vr_other_secondary_model_scale': self.vr_other_secondary_model_scale_var.get(), + 'vr_bass_secondary_model_scale': self.vr_bass_secondary_model_scale_var.get(), + 'vr_drums_secondary_model_scale': self.vr_drums_secondary_model_scale_var.get(), + 'demucs_model': self.demucs_model_var.get(), + 'segment': self.segment_var.get(), + 'overlap': self.overlap_var.get(), + 'overlap_mdx': self.overlap_mdx_var.get(), + 'overlap_mdx23': self.overlap_mdx23_var.get(), + 'shifts': self.shifts_var.get(), + 'chunks_demucs': self.chunks_demucs_var.get(), + 'margin_demucs': self.margin_demucs_var.get(), + 'is_chunk_demucs': self.is_chunk_demucs_var.get(), + 'is_chunk_mdxnet': self.is_chunk_mdxnet_var.get(), + 'is_primary_stem_only_Demucs': self.is_primary_stem_only_Demucs_var.get(), + 'is_secondary_stem_only_Demucs': self.is_secondary_stem_only_Demucs_var.get(), + 'is_split_mode': self.is_split_mode_var.get(), + 'is_demucs_combine_stems': self.is_demucs_combine_stems_var.get(),# + 'is_mdx23_combine_stems': self.is_mdx23_combine_stems_var.get(),# + 'demucs_voc_inst_secondary_model': self.demucs_voc_inst_secondary_model_var.get(), + 'demucs_other_secondary_model': self.demucs_other_secondary_model_var.get(), + 'demucs_bass_secondary_model': self.demucs_bass_secondary_model_var.get(), + 'demucs_drums_secondary_model': self.demucs_drums_secondary_model_var.get(), + 'demucs_is_secondary_model_activate': self.demucs_is_secondary_model_activate_var.get(), + 'demucs_voc_inst_secondary_model_scale': self.demucs_voc_inst_secondary_model_scale_var.get(), + 'demucs_other_secondary_model_scale': self.demucs_other_secondary_model_scale_var.get(), + 'demucs_bass_secondary_model_scale': self.demucs_bass_secondary_model_scale_var.get(), + 'demucs_drums_secondary_model_scale': self.demucs_drums_secondary_model_scale_var.get(), + 'demucs_pre_proc_model': self.demucs_pre_proc_model_var.get(), + 'is_demucs_pre_proc_model_activate': self.is_demucs_pre_proc_model_activate_var.get(), + 'is_demucs_pre_proc_model_inst_mix': self.is_demucs_pre_proc_model_inst_mix_var.get(), + 'mdx_net_model': self.mdx_net_model_var.get(), + 'chunks': self.chunks_var.get(), + 'margin': self.margin_var.get(), + 'compensate': self.compensate_var.get(), + 'denoise_option': self.denoise_option_var.get(),# + 'is_match_frequency_pitch': self.is_match_frequency_pitch_var.get(),# + 'phase_option': self.phase_option_var.get(),# + 'phase_shifts': self.phase_shifts_var.get(),# + 'is_save_align': self.is_save_align_var.get(),# + 'is_match_silence': self.is_match_silence_var.get(),# + 'is_spec_match': self.is_spec_match_var.get(),# + 'is_mdx_c_seg_def': self.is_mdx_c_seg_def_var.get(),# + 'is_invert_spec': self.is_invert_spec_var.get(),# + 'is_deverb_vocals': self.is_deverb_vocals_var.get(),##, + 'deverb_vocal_opt': self.deverb_vocal_opt_var.get(),# + 'voc_split_save_opt': self.voc_split_save_opt_var.get(),##, + 'is_mixer_mode': self.is_mixer_mode_var.get(), + 'mdx_batch_size':self.mdx_batch_size_var.get(), + 'mdx_voc_inst_secondary_model': self.mdx_voc_inst_secondary_model_var.get(), + 'mdx_other_secondary_model': self.mdx_other_secondary_model_var.get(), + 'mdx_bass_secondary_model': self.mdx_bass_secondary_model_var.get(), + 'mdx_drums_secondary_model': self.mdx_drums_secondary_model_var.get(), + 'mdx_is_secondary_model_activate': self.mdx_is_secondary_model_activate_var.get(), + 'mdx_voc_inst_secondary_model_scale': self.mdx_voc_inst_secondary_model_scale_var.get(), + 'mdx_other_secondary_model_scale': self.mdx_other_secondary_model_scale_var.get(), + 'mdx_bass_secondary_model_scale': self.mdx_bass_secondary_model_scale_var.get(), + 'mdx_drums_secondary_model_scale': self.mdx_drums_secondary_model_scale_var.get(), + 'is_save_all_outputs_ensemble': self.is_save_all_outputs_ensemble_var.get(), + 'is_append_ensemble_name': self.is_append_ensemble_name_var.get(), + 'chosen_audio_tool': self.chosen_audio_tool_var.get(), + 'choose_algorithm': self.choose_algorithm_var.get(), + 'time_stretch_rate': self.time_stretch_rate_var.get(), + 'pitch_rate': self.pitch_rate_var.get(),# + 'is_time_correction': self.is_time_correction_var.get(),# + 'is_gpu_conversion': self.is_gpu_conversion_var.get(), + 'is_primary_stem_only': self.is_primary_stem_only_var.get(), + 'is_secondary_stem_only': self.is_secondary_stem_only_var.get(), + 'is_testing_audio': self.is_testing_audio_var.get(),# + 'is_auto_update_model_params': self.is_auto_update_model_params_var.get(), + 'is_add_model_name': self.is_add_model_name_var.get(), + 'is_accept_any_input': self.is_accept_any_input_var.get(), + 'is_task_complete': self.is_task_complete_var.get(), + 'is_normalization': self.is_normalization_var.get(),# + 'is_use_opencl': self.is_use_opencl_var.get(),# + 'is_wav_ensemble': self.is_wav_ensemble_var.get(),# + 'is_create_model_folder': self.is_create_model_folder_var.get(), + 'mp3_bit_set': self.mp3_bit_set_var.get(), + 'semitone_shift': self.semitone_shift_var.get(),# + 'save_format': self.save_format_var.get(), + 'wav_type_set': self.wav_type_set_var.get(),# + 'device_set': self.device_set_var.get(),# + 'user_code': self.user_code_var.get(), + 'help_hints_var': self.help_hints_var.get(), + 'set_vocal_splitter': self.set_vocal_splitter_var.get(), + 'is_set_vocal_splitter': self.is_set_vocal_splitter_var.get(),# + 'is_save_inst_set_vocal_splitter': self.is_save_inst_set_vocal_splitter_var.get(),# + 'model_sample_mode': self.model_sample_mode_var.get(), + 'model_sample_mode_duration': self.model_sample_mode_duration_var.get() + } + + other_data = { + 'chosen_process_method': self.chosen_process_method_var.get(), + 'input_paths': self.inputPaths, + 'lastDir': self.lastDir, + 'export_path': self.export_path_var.get(), + 'time_window': self.time_window_var.get(), + 'intro_analysis': self.intro_analysis_var.get(), + 'db_analysis': self.db_analysis_var.get(), + 'fileOneEntry': self.fileOneEntry_var.get(), + 'fileOneEntry_Full': self.fileOneEntry_Full_var.get(), + 'fileTwoEntry': self.fileTwoEntry_var.get(), + 'fileTwoEntry_Full': self.fileTwoEntry_Full_var.get(), + 'DualBatch_inputPaths': self.DualBatch_inputPaths, + #'model_hash_table': model_hash_table, + } + + user_saved_extras = { + 'demucs_stems': self.demucs_stems_var.get(), + 'mdx_stems': self.mdxnet_stems_var.get()} + + if app_close: + save_data(data={**main_settings, **other_data}) + + if self.thread_check(self.active_download_thread): + self.error_dialoge(EXIT_DOWNLOAD_ERROR) + return + + if self.thread_check(self.active_processing_thread): + if self.is_process_stopped: + self.error_dialoge(EXIT_HALTED_PROCESS_ERROR) + else: + self.error_dialoge(EXIT_PROCESS_ERROR) + return + + remove_temps(ENSEMBLE_TEMP_PATH) + remove_temps(SAMPLE_CLIP_PATH) + self.delete_temps() + + if is_restart: + try: + subprocess.Popen(f'UVR_Launcher.exe') + except Exception: + subprocess.Popen(f'python "{__file__}"', shell=True) + + self.destroy() + + elif is_auto_save: + save_data(data={**main_settings, **other_data}) + else: + return {**main_settings, **user_saved_extras} + + def get_settings_list(self): + + settings_dict = self.save_values(app_close=False) + settings_list = '\n'.join(''.join(f"{key}: {value}") for key, value in settings_dict.items() if not key == 'user_code') + + return f"\n{FULL_APP_SET_TEXT}:\n\n{settings_list}" + +def read_bulliten_text_mac(path, data): + try: + with open(path, 'w') as f: + f.write(data) + + if os.path.isfile(path): + with open(path, 'r') as file : + data = file.read().replace("~", "•") + except Exception as e: + data = 'No information available.' + + return data + +def open_link(event, link=None): + webbrowser.open(link) + +def auto_hyperlink(text_widget:tk.Text): + content = text_widget.get('1.0', tk.END) + + # Regular expression to identify URLs + urls = re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', content) + + for url in urls: + start_idx = content.find(url) + end_idx = start_idx + len(url) + + # Convert indices to tk.Text widget format + start_line = content.count('\n', 0, start_idx) + 1 + start_char = start_idx - content.rfind('\n', 0, start_idx) - 1 + end_line = content.count('\n', 0, end_idx) + 1 + end_char = end_idx - content.rfind('\n', 0, end_idx) - 1 + + start_tag = f"{start_line}.{start_char}" + end_tag = f"{end_line}.{end_char}" + + # Tag the hyperlink text and configure it + text_widget.tag_add(url, start_tag, end_tag) + text_widget.tag_configure(url, foreground=FG_COLOR, underline=True) + text_widget.tag_bind(url, "", lambda e, link=url: open_link(e, link)) + text_widget.tag_bind(url, "", lambda e: text_widget.config(cursor="hand2")) + text_widget.tag_bind(url, "", lambda e: text_widget.config(cursor="arrow")) + +def vip_downloads(password, link_type=VIP_REPO): + """Attempts to decrypt VIP model link with given input code""" + + try: + kdf = PBKDF2HMAC( + algorithm=hashes.SHA256(), + length=32, + salt=link_type[0], + iterations=390000,) + + key = base64.urlsafe_b64encode(kdf.derive(bytes(password, 'utf-8'))) + f = Fernet(key) + + return str(f.decrypt(link_type[1]), 'UTF-8') + except Exception: + return NO_CODE + +def extract_stems(audio_file_base, export_path): + + filenames = [file for file in os.listdir(export_path) if file.startswith(audio_file_base)] + + pattern = r'\(([^()]+)\)(?=[^()]*\.wav)' + stem_list = [] + + for filename in filenames: + match = re.search(pattern, filename) + if match: + stem_list.append(match.group(1)) + + counter = Counter(stem_list) + filtered_lst = [item for item in stem_list if counter[item] > 1] + + return list(set(filtered_lst)) + +if __name__ == "__main__": + + try: + windll.user32.SetThreadDpiAwarenessContext(wintypes.HANDLE(-1)) + except Exception as e: + if OPERATING_SYSTEM == 'Windows': + print(e) + + root = MainWindow() + root.update_checkbox_text() + root.is_root_defined_var.set(True) + root.is_check_splash = True + + root.update() if is_windows else root.update_idletasks() + root.deiconify() + root.configure(bg=BG_COLOR) + root.mainloop() diff --git a/__version__.py b/__version__.py new file mode 100644 index 0000000000000000000000000000000000000000..990c401b5ddec7a7416cbc5bac1d74eac3bf4430 --- /dev/null +++ b/__version__.py @@ -0,0 +1,4 @@ +VERSION = 'v5.6.0' +PATCH = 'UVR_Patch_9_29_23_1_39' +PATCH_MAC = 'UVR_Patch_9_29_23_1_39' +PATCH_LINUX = 'UVR_Patch_9_29_23_1_39' diff --git a/demucs/__init__.py b/demucs/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5656d59e07f3fa33dd3bad1a0f9279ff4b8a6128 --- /dev/null +++ b/demucs/__init__.py @@ -0,0 +1,5 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. diff --git a/demucs/__main__.py b/demucs/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..5de878f198029af74afa2db7e6c06b9b306bf99d --- /dev/null +++ b/demucs/__main__.py @@ -0,0 +1,272 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import json +import os +import sys +import time +from dataclasses import dataclass, field +from fractions import Fraction + +import torch as th +from torch import distributed, nn +from torch.nn.parallel.distributed import DistributedDataParallel + +from .augment import FlipChannels, FlipSign, Remix, Shift +from .compressed import StemsSet, build_musdb_metadata, get_musdb_tracks +from .model import Demucs +from .parser import get_name, get_parser +from .raw import Rawset +from .tasnet import ConvTasNet +from .test import evaluate +from .train import train_model, validate_model +from .utils import human_seconds, load_model, save_model, sizeof_fmt + + +@dataclass +class SavedState: + metrics: list = field(default_factory=list) + last_state: dict = None + best_state: dict = None + optimizer: dict = None + + +def main(): + parser = get_parser() + args = parser.parse_args() + name = get_name(parser, args) + print(f"Experiment {name}") + + if args.musdb is None and args.rank == 0: + print( + "You must provide the path to the MusDB dataset with the --musdb flag. " + "To download the MusDB dataset, see https://sigsep.github.io/datasets/musdb.html.", + file=sys.stderr) + sys.exit(1) + + eval_folder = args.evals / name + eval_folder.mkdir(exist_ok=True, parents=True) + args.logs.mkdir(exist_ok=True) + metrics_path = args.logs / f"{name}.json" + eval_folder.mkdir(exist_ok=True, parents=True) + args.checkpoints.mkdir(exist_ok=True, parents=True) + args.models.mkdir(exist_ok=True, parents=True) + + if args.device is None: + device = "cpu" + if th.cuda.is_available(): + device = "cuda" + else: + device = args.device + + th.manual_seed(args.seed) + # Prevents too many threads to be started when running `museval` as it can be quite + # inefficient on NUMA architectures. + os.environ["OMP_NUM_THREADS"] = "1" + + if args.world_size > 1: + if device != "cuda" and args.rank == 0: + print("Error: distributed training is only available with cuda device", file=sys.stderr) + sys.exit(1) + th.cuda.set_device(args.rank % th.cuda.device_count()) + distributed.init_process_group(backend="nccl", + init_method="tcp://" + args.master, + rank=args.rank, + world_size=args.world_size) + + checkpoint = args.checkpoints / f"{name}.th" + checkpoint_tmp = args.checkpoints / f"{name}.th.tmp" + if args.restart and checkpoint.exists(): + checkpoint.unlink() + + if args.test: + args.epochs = 1 + args.repeat = 0 + model = load_model(args.models / args.test) + elif args.tasnet: + model = ConvTasNet(audio_channels=args.audio_channels, samplerate=args.samplerate, X=args.X) + else: + model = Demucs( + audio_channels=args.audio_channels, + channels=args.channels, + context=args.context, + depth=args.depth, + glu=args.glu, + growth=args.growth, + kernel_size=args.kernel_size, + lstm_layers=args.lstm_layers, + rescale=args.rescale, + rewrite=args.rewrite, + sources=4, + stride=args.conv_stride, + upsample=args.upsample, + samplerate=args.samplerate + ) + model.to(device) + if args.show: + print(model) + size = sizeof_fmt(4 * sum(p.numel() for p in model.parameters())) + print(f"Model size {size}") + return + + optimizer = th.optim.Adam(model.parameters(), lr=args.lr) + + try: + saved = th.load(checkpoint, map_location='cpu') + except IOError: + saved = SavedState() + else: + model.load_state_dict(saved.last_state) + optimizer.load_state_dict(saved.optimizer) + + if args.save_model: + if args.rank == 0: + model.to("cpu") + model.load_state_dict(saved.best_state) + save_model(model, args.models / f"{name}.th") + return + + if args.rank == 0: + done = args.logs / f"{name}.done" + if done.exists(): + done.unlink() + + if args.augment: + augment = nn.Sequential(FlipSign(), FlipChannels(), Shift(args.data_stride), + Remix(group_size=args.remix_group_size)).to(device) + else: + augment = Shift(args.data_stride) + + if args.mse: + criterion = nn.MSELoss() + else: + criterion = nn.L1Loss() + + # Setting number of samples so that all convolution windows are full. + # Prevents hard to debug mistake with the prediction being shifted compared + # to the input mixture. + samples = model.valid_length(args.samples) + print(f"Number of training samples adjusted to {samples}") + + if args.raw: + train_set = Rawset(args.raw / "train", + samples=samples + args.data_stride, + channels=args.audio_channels, + streams=[0, 1, 2, 3, 4], + stride=args.data_stride) + + valid_set = Rawset(args.raw / "valid", channels=args.audio_channels) + else: + if not args.metadata.is_file() and args.rank == 0: + build_musdb_metadata(args.metadata, args.musdb, args.workers) + if args.world_size > 1: + distributed.barrier() + metadata = json.load(open(args.metadata)) + duration = Fraction(samples + args.data_stride, args.samplerate) + stride = Fraction(args.data_stride, args.samplerate) + train_set = StemsSet(get_musdb_tracks(args.musdb, subsets=["train"], split="train"), + metadata, + duration=duration, + stride=stride, + samplerate=args.samplerate, + channels=args.audio_channels) + valid_set = StemsSet(get_musdb_tracks(args.musdb, subsets=["train"], split="valid"), + metadata, + samplerate=args.samplerate, + channels=args.audio_channels) + + best_loss = float("inf") + for epoch, metrics in enumerate(saved.metrics): + print(f"Epoch {epoch:03d}: " + f"train={metrics['train']:.8f} " + f"valid={metrics['valid']:.8f} " + f"best={metrics['best']:.4f} " + f"duration={human_seconds(metrics['duration'])}") + best_loss = metrics['best'] + + if args.world_size > 1: + dmodel = DistributedDataParallel(model, + device_ids=[th.cuda.current_device()], + output_device=th.cuda.current_device()) + else: + dmodel = model + + for epoch in range(len(saved.metrics), args.epochs): + begin = time.time() + model.train() + train_loss = train_model(epoch, + train_set, + dmodel, + criterion, + optimizer, + augment, + batch_size=args.batch_size, + device=device, + repeat=args.repeat, + seed=args.seed, + workers=args.workers, + world_size=args.world_size) + model.eval() + valid_loss = validate_model(epoch, + valid_set, + model, + criterion, + device=device, + rank=args.rank, + split=args.split_valid, + world_size=args.world_size) + + duration = time.time() - begin + if valid_loss < best_loss: + best_loss = valid_loss + saved.best_state = { + key: value.to("cpu").clone() + for key, value in model.state_dict().items() + } + saved.metrics.append({ + "train": train_loss, + "valid": valid_loss, + "best": best_loss, + "duration": duration + }) + if args.rank == 0: + json.dump(saved.metrics, open(metrics_path, "w")) + + saved.last_state = model.state_dict() + saved.optimizer = optimizer.state_dict() + if args.rank == 0 and not args.test: + th.save(saved, checkpoint_tmp) + checkpoint_tmp.rename(checkpoint) + + print(f"Epoch {epoch:03d}: " + f"train={train_loss:.8f} valid={valid_loss:.8f} best={best_loss:.4f} " + f"duration={human_seconds(duration)}") + + del dmodel + model.load_state_dict(saved.best_state) + if args.eval_cpu: + device = "cpu" + model.to(device) + model.eval() + evaluate(model, + args.musdb, + eval_folder, + rank=args.rank, + world_size=args.world_size, + device=device, + save=args.save, + split=args.split_valid, + shifts=args.shifts, + workers=args.eval_workers) + model.to("cpu") + save_model(model, args.models / f"{name}.th") + if args.rank == 0: + print("done") + done.write_text("done") + + +if __name__ == "__main__": + main() diff --git a/demucs/__pycache__/__init__.cpython-310.pyc b/demucs/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da65465c6d44f9d9a571da1383d2142cacb9ec2a Binary files /dev/null and b/demucs/__pycache__/__init__.cpython-310.pyc differ diff --git a/demucs/__pycache__/apply.cpython-310.pyc b/demucs/__pycache__/apply.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58817b431ab3ea0ae2e9d70275918741f2491e43 Binary files /dev/null and b/demucs/__pycache__/apply.cpython-310.pyc differ diff --git a/demucs/__pycache__/demucs.cpython-310.pyc b/demucs/__pycache__/demucs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b03484d9a3fbe3cacab08e04372b7b7a78c258a Binary files /dev/null and b/demucs/__pycache__/demucs.cpython-310.pyc differ diff --git a/demucs/__pycache__/filtering.cpython-310.pyc b/demucs/__pycache__/filtering.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..528cbfcf3c32534c08e5067ed3ecad0d7ff20850 Binary files /dev/null and b/demucs/__pycache__/filtering.cpython-310.pyc differ diff --git a/demucs/__pycache__/hdemucs.cpython-310.pyc b/demucs/__pycache__/hdemucs.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56eac0eb628e0469faab86d700fa770c1fcce6d7 Binary files /dev/null and b/demucs/__pycache__/hdemucs.cpython-310.pyc differ diff --git a/demucs/__pycache__/model.cpython-310.pyc b/demucs/__pycache__/model.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..edce319afd58c8de5da350e326479f9262ecc1d8 Binary files /dev/null and b/demucs/__pycache__/model.cpython-310.pyc differ diff --git a/demucs/__pycache__/model_v2.cpython-310.pyc b/demucs/__pycache__/model_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ad2995e18cdd873767f6096f6e03bd68ff496f5 Binary files /dev/null and b/demucs/__pycache__/model_v2.cpython-310.pyc differ diff --git a/demucs/__pycache__/pretrained.cpython-310.pyc b/demucs/__pycache__/pretrained.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3189a84aeef960a4e0bd259e4c2b73437a3dbcfe Binary files /dev/null and b/demucs/__pycache__/pretrained.cpython-310.pyc differ diff --git a/demucs/__pycache__/repo.cpython-310.pyc b/demucs/__pycache__/repo.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4916cafc42d903201b7905e49f88a11a4c9051ba Binary files /dev/null and b/demucs/__pycache__/repo.cpython-310.pyc differ diff --git a/demucs/__pycache__/spec.cpython-310.pyc b/demucs/__pycache__/spec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7395e9429fcdae893d99a42e7f1930be6f49a143 Binary files /dev/null and b/demucs/__pycache__/spec.cpython-310.pyc differ diff --git a/demucs/__pycache__/states.cpython-310.pyc b/demucs/__pycache__/states.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c5e631702e3fd70bed38189c5e721ea1b194ef2c Binary files /dev/null and b/demucs/__pycache__/states.cpython-310.pyc differ diff --git a/demucs/__pycache__/tasnet_v2.cpython-310.pyc b/demucs/__pycache__/tasnet_v2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45152a6dbd3457cba0f4112a946bd68fd367b3f7 Binary files /dev/null and b/demucs/__pycache__/tasnet_v2.cpython-310.pyc differ diff --git a/demucs/__pycache__/utils.cpython-310.pyc b/demucs/__pycache__/utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8adb26b37eefe6546fb26d3ef002829393062818 Binary files /dev/null and b/demucs/__pycache__/utils.cpython-310.pyc differ diff --git a/demucs/apply.py b/demucs/apply.py new file mode 100644 index 0000000000000000000000000000000000000000..be6d930035a4748192b96d7baee6e80772a02f6e --- /dev/null +++ b/demucs/apply.py @@ -0,0 +1,305 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +""" +Code to apply a model to a mix. It will handle chunking with overlaps and +inteprolation between chunks, as well as the "shift trick". +""" +from concurrent.futures import ThreadPoolExecutor +import random +import typing as tp +from multiprocessing import Process,Queue,Pipe + +import torch as th +from torch import nn +from torch.nn import functional as F +import tqdm +import tkinter as tk + +from .demucs import Demucs +from .hdemucs import HDemucs +from .utils import center_trim, DummyPoolExecutor + +Model = tp.Union[Demucs, HDemucs] + +progress_bar_num = 0 + +class BagOfModels(nn.Module): + def __init__(self, models: tp.List[Model], + weights: tp.Optional[tp.List[tp.List[float]]] = None, + segment: tp.Optional[float] = None): + """ + Represents a bag of models with specific weights. + You should call `apply_model` rather than calling directly the forward here for + optimal performance. + + Args: + models (list[nn.Module]): list of Demucs/HDemucs models. + weights (list[list[float]]): list of weights. If None, assumed to + be all ones, otherwise it should be a list of N list (N number of models), + each containing S floats (S number of sources). + segment (None or float): overrides the `segment` attribute of each model + (this is performed inplace, be careful if you reuse the models passed). + """ + + super().__init__() + assert len(models) > 0 + first = models[0] + for other in models: + assert other.sources == first.sources + assert other.samplerate == first.samplerate + assert other.audio_channels == first.audio_channels + if segment is not None: + other.segment = segment + + self.audio_channels = first.audio_channels + self.samplerate = first.samplerate + self.sources = first.sources + self.models = nn.ModuleList(models) + + if weights is None: + weights = [[1. for _ in first.sources] for _ in models] + else: + assert len(weights) == len(models) + for weight in weights: + assert len(weight) == len(first.sources) + self.weights = weights + + def forward(self, x): + raise NotImplementedError("Call `apply_model` on this.") + +class TensorChunk: + def __init__(self, tensor, offset=0, length=None): + total_length = tensor.shape[-1] + assert offset >= 0 + assert offset < total_length + + if length is None: + length = total_length - offset + else: + length = min(total_length - offset, length) + + if isinstance(tensor, TensorChunk): + self.tensor = tensor.tensor + self.offset = offset + tensor.offset + else: + self.tensor = tensor + self.offset = offset + self.length = length + self.device = tensor.device + + @property + def shape(self): + shape = list(self.tensor.shape) + shape[-1] = self.length + return shape + + def padded(self, target_length): + delta = target_length - self.length + total_length = self.tensor.shape[-1] + assert delta >= 0 + + start = self.offset - delta // 2 + end = start + target_length + + correct_start = max(0, start) + correct_end = min(total_length, end) + + pad_left = correct_start - start + pad_right = end - correct_end + + out = F.pad(self.tensor[..., correct_start:correct_end], (pad_left, pad_right)) + assert out.shape[-1] == target_length + return out + +def tensor_chunk(tensor_or_chunk): + if isinstance(tensor_or_chunk, TensorChunk): + return tensor_or_chunk + else: + assert isinstance(tensor_or_chunk, th.Tensor) + return TensorChunk(tensor_or_chunk) + +def apply_model(model, + mix, + shifts=1, + split=True, + overlap=0.25, + transition_power=1., + static_shifts=1, + set_progress_bar=None, + device=None, + progress=False, + num_workers=0, + pool=None): + """ + Apply model to a given mixture. + + Args: + shifts (int): if > 0, will shift in time `mix` by a random amount between 0 and 0.5 sec + and apply the oppositve shift to the output. This is repeated `shifts` time and + all predictions are averaged. This effectively makes the model time equivariant + and improves SDR by up to 0.2 points. + split (bool): if True, the input will be broken down in 8 seconds extracts + and predictions will be performed individually on each and concatenated. + Useful for model with large memory footprint like Tasnet. + progress (bool): if True, show a progress bar (requires split=True) + device (torch.device, str, or None): if provided, device on which to + execute the computation, otherwise `mix.device` is assumed. + When `device` is different from `mix.device`, only local computations will + be on `device`, while the entire tracks will be stored on `mix.device`. + """ + + global fut_length + global bag_num + global prog_bar + + if device is None: + device = mix.device + else: + device = th.device(device) + if pool is None: + if num_workers > 0 and device.type == 'cpu': + pool = ThreadPoolExecutor(num_workers) + else: + pool = DummyPoolExecutor() + + kwargs = { + 'shifts': shifts, + 'split': split, + 'overlap': overlap, + 'transition_power': transition_power, + 'progress': progress, + 'device': device, + 'pool': pool, + 'set_progress_bar': set_progress_bar, + 'static_shifts': static_shifts, + } + + if isinstance(model, BagOfModels): + # Special treatment for bag of model. + # We explicitely apply multiple times `apply_model` so that the random shifts + # are different for each model. + + estimates = 0 + totals = [0] * len(model.sources) + bag_num = len(model.models) + fut_length = 0 + prog_bar = 0 + current_model = 0 #(bag_num + 1) + for sub_model, weight in zip(model.models, model.weights): + original_model_device = next(iter(sub_model.parameters())).device + sub_model.to(device) + fut_length += fut_length + current_model += 1 + out = apply_model(sub_model, mix, **kwargs) + sub_model.to(original_model_device) + for k, inst_weight in enumerate(weight): + out[:, k, :, :] *= inst_weight + totals[k] += inst_weight + estimates += out + del out + + for k in range(estimates.shape[1]): + estimates[:, k, :, :] /= totals[k] + return estimates + + model.to(device) + model.eval() + assert transition_power >= 1, "transition_power < 1 leads to weird behavior." + batch, channels, length = mix.shape + + if shifts: + kwargs['shifts'] = 0 + max_shift = int(0.5 * model.samplerate) + mix = tensor_chunk(mix) + padded_mix = mix.padded(length + 2 * max_shift) + out = 0 + for _ in range(shifts): + offset = random.randint(0, max_shift) + shifted = TensorChunk(padded_mix, offset, length + max_shift - offset) + shifted_out = apply_model(model, shifted, **kwargs) + out += shifted_out[..., max_shift - offset:] + out /= shifts + return out + elif split: + kwargs['split'] = False + out = th.zeros(batch, len(model.sources), channels, length, device=mix.device) + sum_weight = th.zeros(length, device=mix.device) + segment = int(model.samplerate * model.segment) + stride = int((1 - overlap) * segment) + offsets = range(0, length, stride) + scale = float(format(stride / model.samplerate, ".2f")) + # We start from a triangle shaped weight, with maximal weight in the middle + # of the segment. Then we normalize and take to the power `transition_power`. + # Large values of transition power will lead to sharper transitions. + weight = th.cat([th.arange(1, segment // 2 + 1, device=device), + th.arange(segment - segment // 2, 0, -1, device=device)]) + assert len(weight) == segment + # If the overlap < 50%, this will translate to linear transition when + # transition_power is 1. + weight = (weight / weight.max())**transition_power + futures = [] + for offset in offsets: + chunk = TensorChunk(mix, offset, segment) + future = pool.submit(apply_model, model, chunk, **kwargs) + futures.append((future, offset)) + offset += segment + if progress: + futures = tqdm.tqdm(futures, unit_scale=scale, ncols=120, unit='seconds') + for future, offset in futures: + if set_progress_bar: + fut_length = (len(futures) * bag_num * static_shifts) + prog_bar += 1 + set_progress_bar(0.1, (0.8/fut_length*prog_bar)) + chunk_out = future.result() + chunk_length = chunk_out.shape[-1] + out[..., offset:offset + segment] += (weight[:chunk_length] * chunk_out).to(mix.device) + sum_weight[offset:offset + segment] += weight[:chunk_length].to(mix.device) + assert sum_weight.min() > 0 + out /= sum_weight + return out + else: + if hasattr(model, 'valid_length'): + valid_length = model.valid_length(length) + else: + valid_length = length + mix = tensor_chunk(mix) + padded_mix = mix.padded(valid_length).to(device) + with th.no_grad(): + out = model(padded_mix) + return center_trim(out, length) + +def demucs_segments(demucs_segment, demucs_model): + + if demucs_segment == 'Default': + segment = None + if isinstance(demucs_model, BagOfModels): + if segment is not None: + for sub in demucs_model.models: + sub.segment = segment + else: + if segment is not None: + sub.segment = segment + else: + try: + segment = int(demucs_segment) + if isinstance(demucs_model, BagOfModels): + if segment is not None: + for sub in demucs_model.models: + sub.segment = segment + else: + if segment is not None: + sub.segment = segment + except: + segment = None + if isinstance(demucs_model, BagOfModels): + if segment is not None: + for sub in demucs_model.models: + sub.segment = segment + else: + if segment is not None: + sub.segment = segment + + return demucs_model \ No newline at end of file diff --git a/demucs/demucs.py b/demucs/demucs.py new file mode 100644 index 0000000000000000000000000000000000000000..d2c08e73d65de3031a1e1be545b68afd5554f7a5 --- /dev/null +++ b/demucs/demucs.py @@ -0,0 +1,459 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math +import typing as tp + +import julius +import torch +from torch import nn +from torch.nn import functional as F + +from .states import capture_init +from .utils import center_trim, unfold + + +class BLSTM(nn.Module): + """ + BiLSTM with same hidden units as input dim. + If `max_steps` is not None, input will be splitting in overlapping + chunks and the LSTM applied separately on each chunk. + """ + def __init__(self, dim, layers=1, max_steps=None, skip=False): + super().__init__() + assert max_steps is None or max_steps % 4 == 0 + self.max_steps = max_steps + self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim) + self.linear = nn.Linear(2 * dim, dim) + self.skip = skip + + def forward(self, x): + B, C, T = x.shape + y = x + framed = False + if self.max_steps is not None and T > self.max_steps: + width = self.max_steps + stride = width // 2 + frames = unfold(x, width, stride) + nframes = frames.shape[2] + framed = True + x = frames.permute(0, 2, 1, 3).reshape(-1, C, width) + + x = x.permute(2, 0, 1) + + x = self.lstm(x)[0] + x = self.linear(x) + x = x.permute(1, 2, 0) + if framed: + out = [] + frames = x.reshape(B, -1, C, width) + limit = stride // 2 + for k in range(nframes): + if k == 0: + out.append(frames[:, k, :, :-limit]) + elif k == nframes - 1: + out.append(frames[:, k, :, limit:]) + else: + out.append(frames[:, k, :, limit:-limit]) + out = torch.cat(out, -1) + out = out[..., :T] + x = out + if self.skip: + x = x + y + return x + + +def rescale_conv(conv, reference): + """Rescale initial weight scale. It is unclear why it helps but it certainly does. + """ + std = conv.weight.std().detach() + scale = (std / reference)**0.5 + conv.weight.data /= scale + if conv.bias is not None: + conv.bias.data /= scale + + +def rescale_module(module, reference): + for sub in module.modules(): + if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d, nn.Conv2d, nn.ConvTranspose2d)): + rescale_conv(sub, reference) + + +class LayerScale(nn.Module): + """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf). + This rescales diagonaly residual outputs close to 0 initially, then learnt. + """ + def __init__(self, channels: int, init: float = 0): + super().__init__() + self.scale = nn.Parameter(torch.zeros(channels, requires_grad=True)) + self.scale.data[:] = init + + def forward(self, x): + return self.scale[:, None] * x + + +class DConv(nn.Module): + """ + New residual branches in each encoder layer. + This alternates dilated convolutions, potentially with LSTMs and attention. + Also before entering each residual branch, dimension is projected on a smaller subspace, + e.g. of dim `channels // compress`. + """ + def __init__(self, channels: int, compress: float = 4, depth: int = 2, init: float = 1e-4, + norm=True, attn=False, heads=4, ndecay=4, lstm=False, gelu=True, + kernel=3, dilate=True): + """ + Args: + channels: input/output channels for residual branch. + compress: amount of channel compression inside the branch. + depth: number of layers in the residual branch. Each layer has its own + projection, and potentially LSTM and attention. + init: initial scale for LayerNorm. + norm: use GroupNorm. + attn: use LocalAttention. + heads: number of heads for the LocalAttention. + ndecay: number of decay controls in the LocalAttention. + lstm: use LSTM. + gelu: Use GELU activation. + kernel: kernel size for the (dilated) convolutions. + dilate: if true, use dilation, increasing with the depth. + """ + + super().__init__() + assert kernel % 2 == 1 + self.channels = channels + self.compress = compress + self.depth = abs(depth) + dilate = depth > 0 + + norm_fn: tp.Callable[[int], nn.Module] + norm_fn = lambda d: nn.Identity() # noqa + if norm: + norm_fn = lambda d: nn.GroupNorm(1, d) # noqa + + hidden = int(channels / compress) + + act: tp.Type[nn.Module] + if gelu: + act = nn.GELU + else: + act = nn.ReLU + + self.layers = nn.ModuleList([]) + for d in range(self.depth): + dilation = 2 ** d if dilate else 1 + padding = dilation * (kernel // 2) + mods = [ + nn.Conv1d(channels, hidden, kernel, dilation=dilation, padding=padding), + norm_fn(hidden), act(), + nn.Conv1d(hidden, 2 * channels, 1), + norm_fn(2 * channels), nn.GLU(1), + LayerScale(channels, init), + ] + if attn: + mods.insert(3, LocalState(hidden, heads=heads, ndecay=ndecay)) + if lstm: + mods.insert(3, BLSTM(hidden, layers=2, max_steps=200, skip=True)) + layer = nn.Sequential(*mods) + self.layers.append(layer) + + def forward(self, x): + for layer in self.layers: + x = x + layer(x) + return x + + +class LocalState(nn.Module): + """Local state allows to have attention based only on data (no positional embedding), + but while setting a constraint on the time window (e.g. decaying penalty term). + + Also a failed experiments with trying to provide some frequency based attention. + """ + def __init__(self, channels: int, heads: int = 4, nfreqs: int = 0, ndecay: int = 4): + super().__init__() + assert channels % heads == 0, (channels, heads) + self.heads = heads + self.nfreqs = nfreqs + self.ndecay = ndecay + self.content = nn.Conv1d(channels, channels, 1) + self.query = nn.Conv1d(channels, channels, 1) + self.key = nn.Conv1d(channels, channels, 1) + if nfreqs: + self.query_freqs = nn.Conv1d(channels, heads * nfreqs, 1) + if ndecay: + self.query_decay = nn.Conv1d(channels, heads * ndecay, 1) + # Initialize decay close to zero (there is a sigmoid), for maximum initial window. + self.query_decay.weight.data *= 0.01 + assert self.query_decay.bias is not None # stupid type checker + self.query_decay.bias.data[:] = -2 + self.proj = nn.Conv1d(channels + heads * nfreqs, channels, 1) + + def forward(self, x): + B, C, T = x.shape + heads = self.heads + indexes = torch.arange(T, device=x.device, dtype=x.dtype) + # left index are keys, right index are queries + delta = indexes[:, None] - indexes[None, :] + + queries = self.query(x).view(B, heads, -1, T) + keys = self.key(x).view(B, heads, -1, T) + # t are keys, s are queries + dots = torch.einsum("bhct,bhcs->bhts", keys, queries) + dots /= keys.shape[2]**0.5 + if self.nfreqs: + periods = torch.arange(1, self.nfreqs + 1, device=x.device, dtype=x.dtype) + freq_kernel = torch.cos(2 * math.pi * delta / periods.view(-1, 1, 1)) + freq_q = self.query_freqs(x).view(B, heads, -1, T) / self.nfreqs ** 0.5 + dots += torch.einsum("fts,bhfs->bhts", freq_kernel, freq_q) + if self.ndecay: + decays = torch.arange(1, self.ndecay + 1, device=x.device, dtype=x.dtype) + decay_q = self.query_decay(x).view(B, heads, -1, T) + decay_q = torch.sigmoid(decay_q) / 2 + decay_kernel = - decays.view(-1, 1, 1) * delta.abs() / self.ndecay**0.5 + dots += torch.einsum("fts,bhfs->bhts", decay_kernel, decay_q) + + # Kill self reference. + dots.masked_fill_(torch.eye(T, device=dots.device, dtype=torch.bool), -100) + weights = torch.softmax(dots, dim=2) + + content = self.content(x).view(B, heads, -1, T) + result = torch.einsum("bhts,bhct->bhcs", weights, content) + if self.nfreqs: + time_sig = torch.einsum("bhts,fts->bhfs", weights, freq_kernel) + result = torch.cat([result, time_sig], 2) + result = result.reshape(B, -1, T) + return x + self.proj(result) + + +class Demucs(nn.Module): + @capture_init + def __init__(self, + sources, + # Channels + audio_channels=2, + channels=64, + growth=2., + # Main structure + depth=6, + rewrite=True, + lstm_layers=0, + # Convolutions + kernel_size=8, + stride=4, + context=1, + # Activations + gelu=True, + glu=True, + # Normalization + norm_starts=4, + norm_groups=4, + # DConv residual branch + dconv_mode=1, + dconv_depth=2, + dconv_comp=4, + dconv_attn=4, + dconv_lstm=4, + dconv_init=1e-4, + # Pre/post processing + normalize=True, + resample=True, + # Weight init + rescale=0.1, + # Metadata + samplerate=44100, + segment=4 * 10): + """ + Args: + sources (list[str]): list of source names + audio_channels (int): stereo or mono + channels (int): first convolution channels + depth (int): number of encoder/decoder layers + growth (float): multiply (resp divide) number of channels by that + for each layer of the encoder (resp decoder) + depth (int): number of layers in the encoder and in the decoder. + rewrite (bool): add 1x1 convolution to each layer. + lstm_layers (int): number of lstm layers, 0 = no lstm. Deactivated + by default, as this is now replaced by the smaller and faster small LSTMs + in the DConv branches. + kernel_size (int): kernel size for convolutions + stride (int): stride for convolutions + context (int): kernel size of the convolution in the + decoder before the transposed convolution. If > 1, + will provide some context from neighboring time steps. + gelu: use GELU activation function. + glu (bool): use glu instead of ReLU for the 1x1 rewrite conv. + norm_starts: layer at which group norm starts being used. + decoder layers are numbered in reverse order. + norm_groups: number of groups for group norm. + dconv_mode: if 1: dconv in encoder only, 2: decoder only, 3: both. + dconv_depth: depth of residual DConv branch. + dconv_comp: compression of DConv branch. + dconv_attn: adds attention layers in DConv branch starting at this layer. + dconv_lstm: adds a LSTM layer in DConv branch starting at this layer. + dconv_init: initial scale for the DConv branch LayerScale. + normalize (bool): normalizes the input audio on the fly, and scales back + the output by the same amount. + resample (bool): upsample x2 the input and downsample /2 the output. + rescale (int): rescale initial weights of convolutions + to get their standard deviation closer to `rescale`. + samplerate (int): stored as meta information for easing + future evaluations of the model. + segment (float): duration of the chunks of audio to ideally evaluate the model on. + This is used by `demucs.apply.apply_model`. + """ + + super().__init__() + self.audio_channels = audio_channels + self.sources = sources + self.kernel_size = kernel_size + self.context = context + self.stride = stride + self.depth = depth + self.resample = resample + self.channels = channels + self.normalize = normalize + self.samplerate = samplerate + self.segment = segment + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + self.skip_scales = nn.ModuleList() + + if glu: + activation = nn.GLU(dim=1) + ch_scale = 2 + else: + activation = nn.ReLU() + ch_scale = 1 + if gelu: + act2 = nn.GELU + else: + act2 = nn.ReLU + + in_channels = audio_channels + padding = 0 + for index in range(depth): + norm_fn = lambda d: nn.Identity() # noqa + if index >= norm_starts: + norm_fn = lambda d: nn.GroupNorm(norm_groups, d) # noqa + + encode = [] + encode += [ + nn.Conv1d(in_channels, channels, kernel_size, stride), + norm_fn(channels), + act2(), + ] + attn = index >= dconv_attn + lstm = index >= dconv_lstm + if dconv_mode & 1: + encode += [DConv(channels, depth=dconv_depth, init=dconv_init, + compress=dconv_comp, attn=attn, lstm=lstm)] + if rewrite: + encode += [ + nn.Conv1d(channels, ch_scale * channels, 1), + norm_fn(ch_scale * channels), activation] + self.encoder.append(nn.Sequential(*encode)) + + decode = [] + if index > 0: + out_channels = in_channels + else: + out_channels = len(self.sources) * audio_channels + if rewrite: + decode += [ + nn.Conv1d(channels, ch_scale * channels, 2 * context + 1, padding=context), + norm_fn(ch_scale * channels), activation] + if dconv_mode & 2: + decode += [DConv(channels, depth=dconv_depth, init=dconv_init, + compress=dconv_comp, attn=attn, lstm=lstm)] + decode += [nn.ConvTranspose1d(channels, out_channels, + kernel_size, stride, padding=padding)] + if index > 0: + decode += [norm_fn(out_channels), act2()] + self.decoder.insert(0, nn.Sequential(*decode)) + in_channels = channels + channels = int(growth * channels) + + channels = in_channels + if lstm_layers: + self.lstm = BLSTM(channels, lstm_layers) + else: + self.lstm = None + + if rescale: + rescale_module(self, reference=rescale) + + def valid_length(self, length): + """ + Return the nearest valid length to use with the model so that + there is no time steps left over in a convolution, e.g. for all + layers, size of the input - kernel_size % stride = 0. + + Note that input are automatically padded if necessary to ensure that the output + has the same length as the input. + """ + if self.resample: + length *= 2 + + for _ in range(self.depth): + length = math.ceil((length - self.kernel_size) / self.stride) + 1 + length = max(1, length) + + for idx in range(self.depth): + length = (length - 1) * self.stride + self.kernel_size + + if self.resample: + length = math.ceil(length / 2) + return int(length) + + def forward(self, mix): + x = mix + length = x.shape[-1] + + if self.normalize: + mono = mix.mean(dim=1, keepdim=True) + mean = mono.mean(dim=-1, keepdim=True) + std = mono.std(dim=-1, keepdim=True) + x = (x - mean) / (1e-5 + std) + else: + mean = 0 + std = 1 + + delta = self.valid_length(length) - length + x = F.pad(x, (delta // 2, delta - delta // 2)) + + if self.resample: + x = julius.resample_frac(x, 1, 2) + + saved = [] + for encode in self.encoder: + x = encode(x) + saved.append(x) + + if self.lstm: + x = self.lstm(x) + + for decode in self.decoder: + skip = saved.pop(-1) + skip = center_trim(skip, x) + x = decode(x + skip) + + if self.resample: + x = julius.resample_frac(x, 2, 1) + x = x * std + mean + x = center_trim(x, length) + x = x.view(x.size(0), len(self.sources), self.audio_channels, x.size(-1)) + return x + + def load_state_dict(self, state, strict=True): + # fix a mismatch with previous generation Demucs models. + for idx in range(self.depth): + for a in ['encoder', 'decoder']: + for b in ['bias', 'weight']: + new = f'{a}.{idx}.3.{b}' + old = f'{a}.{idx}.2.{b}' + if old in state and new not in state: + state[new] = state.pop(old) + super().load_state_dict(state, strict=strict) diff --git a/demucs/filtering.py b/demucs/filtering.py new file mode 100644 index 0000000000000000000000000000000000000000..08a2c17deae2dd4ace033eaa14619821e2a70b03 --- /dev/null +++ b/demucs/filtering.py @@ -0,0 +1,502 @@ +from typing import Optional +import torch +import torch.nn as nn +from torch import Tensor +from torch.utils.data import DataLoader + +def atan2(y, x): + r"""Element-wise arctangent function of y/x. + Returns a new tensor with signed angles in radians. + It is an alternative implementation of torch.atan2 + + Args: + y (Tensor): First input tensor + x (Tensor): Second input tensor [shape=y.shape] + + Returns: + Tensor: [shape=y.shape]. + """ + pi = 2 * torch.asin(torch.tensor(1.0)) + x += ((x == 0) & (y == 0)) * 1.0 + out = torch.atan(y / x) + out += ((y >= 0) & (x < 0)) * pi + out -= ((y < 0) & (x < 0)) * pi + out *= 1 - ((y > 0) & (x == 0)) * 1.0 + out += ((y > 0) & (x == 0)) * (pi / 2) + out *= 1 - ((y < 0) & (x == 0)) * 1.0 + out += ((y < 0) & (x == 0)) * (-pi / 2) + return out + + +# Define basic complex operations on torch.Tensor objects whose last dimension +# consists in the concatenation of the real and imaginary parts. + + +def _norm(x: torch.Tensor) -> torch.Tensor: + r"""Computes the norm value of a torch Tensor, assuming that it + comes as real and imaginary part in its last dimension. + + Args: + x (Tensor): Input Tensor of shape [shape=(..., 2)] + + Returns: + Tensor: shape as x excluding the last dimension. + """ + return torch.abs(x[..., 0]) ** 2 + torch.abs(x[..., 1]) ** 2 + + +def _mul_add(a: torch.Tensor, b: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor: + """Element-wise multiplication of two complex Tensors described + through their real and imaginary parts. + The result is added to the `out` tensor""" + + # check `out` and allocate it if needed + target_shape = torch.Size([max(sa, sb) for (sa, sb) in zip(a.shape, b.shape)]) + if out is None or out.shape != target_shape: + out = torch.zeros(target_shape, dtype=a.dtype, device=a.device) + if out is a: + real_a = a[..., 0] + out[..., 0] = out[..., 0] + (real_a * b[..., 0] - a[..., 1] * b[..., 1]) + out[..., 1] = out[..., 1] + (real_a * b[..., 1] + a[..., 1] * b[..., 0]) + else: + out[..., 0] = out[..., 0] + (a[..., 0] * b[..., 0] - a[..., 1] * b[..., 1]) + out[..., 1] = out[..., 1] + (a[..., 0] * b[..., 1] + a[..., 1] * b[..., 0]) + return out + + +def _mul(a: torch.Tensor, b: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor: + """Element-wise multiplication of two complex Tensors described + through their real and imaginary parts + can work in place in case out is a only""" + target_shape = torch.Size([max(sa, sb) for (sa, sb) in zip(a.shape, b.shape)]) + if out is None or out.shape != target_shape: + out = torch.zeros(target_shape, dtype=a.dtype, device=a.device) + if out is a: + real_a = a[..., 0] + out[..., 0] = real_a * b[..., 0] - a[..., 1] * b[..., 1] + out[..., 1] = real_a * b[..., 1] + a[..., 1] * b[..., 0] + else: + out[..., 0] = a[..., 0] * b[..., 0] - a[..., 1] * b[..., 1] + out[..., 1] = a[..., 0] * b[..., 1] + a[..., 1] * b[..., 0] + return out + + +def _inv(z: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor: + """Element-wise multiplicative inverse of a Tensor with complex + entries described through their real and imaginary parts. + can work in place in case out is z""" + ez = _norm(z) + if out is None or out.shape != z.shape: + out = torch.zeros_like(z) + out[..., 0] = z[..., 0] / ez + out[..., 1] = -z[..., 1] / ez + return out + + +def _conj(z, out: Optional[torch.Tensor] = None) -> torch.Tensor: + """Element-wise complex conjugate of a Tensor with complex entries + described through their real and imaginary parts. + can work in place in case out is z""" + if out is None or out.shape != z.shape: + out = torch.zeros_like(z) + out[..., 0] = z[..., 0] + out[..., 1] = -z[..., 1] + return out + + +def _invert(M: torch.Tensor, out: Optional[torch.Tensor] = None) -> torch.Tensor: + """ + Invert 1x1 or 2x2 matrices + + Will generate errors if the matrices are singular: user must handle this + through his own regularization schemes. + + Args: + M (Tensor): [shape=(..., nb_channels, nb_channels, 2)] + matrices to invert: must be square along dimensions -3 and -2 + + Returns: + invM (Tensor): [shape=M.shape] + inverses of M + """ + nb_channels = M.shape[-2] + + if out is None or out.shape != M.shape: + out = torch.empty_like(M) + + if nb_channels == 1: + # scalar case + out = _inv(M, out) + elif nb_channels == 2: + # two channels case: analytical expression + + # first compute the determinent + det = _mul(M[..., 0, 0, :], M[..., 1, 1, :]) + det = det - _mul(M[..., 0, 1, :], M[..., 1, 0, :]) + # invert it + invDet = _inv(det) + + # then fill out the matrix with the inverse + out[..., 0, 0, :] = _mul(invDet, M[..., 1, 1, :], out[..., 0, 0, :]) + out[..., 1, 0, :] = _mul(-invDet, M[..., 1, 0, :], out[..., 1, 0, :]) + out[..., 0, 1, :] = _mul(-invDet, M[..., 0, 1, :], out[..., 0, 1, :]) + out[..., 1, 1, :] = _mul(invDet, M[..., 0, 0, :], out[..., 1, 1, :]) + else: + raise Exception("Only 2 channels are supported for the torch version.") + return out + + +# Now define the signal-processing low-level functions used by the Separator + + +def expectation_maximization( + y: torch.Tensor, + x: torch.Tensor, + iterations: int = 2, + eps: float = 1e-10, + batch_size: int = 200, +): + r"""Expectation maximization algorithm, for refining source separation + estimates. + + This algorithm allows to make source separation results better by + enforcing multichannel consistency for the estimates. This usually means + a better perceptual quality in terms of spatial artifacts. + + The implementation follows the details presented in [1]_, taking + inspiration from the original EM algorithm proposed in [2]_ and its + weighted refinement proposed in [3]_, [4]_. + It works by iteratively: + + * Re-estimate source parameters (power spectral densities and spatial + covariance matrices) through :func:`get_local_gaussian_model`. + + * Separate again the mixture with the new parameters by first computing + the new modelled mixture covariance matrices with :func:`get_mix_model`, + prepare the Wiener filters through :func:`wiener_gain` and apply them + with :func:`apply_filter``. + + References + ---------- + .. [1] S. Uhlich and M. Porcu and F. Giron and M. Enenkl and T. Kemp and + N. Takahashi and Y. Mitsufuji, "Improving music source separation based + on deep neural networks through data augmentation and network + blending." 2017 IEEE International Conference on Acoustics, Speech + and Signal Processing (ICASSP). IEEE, 2017. + + .. [2] N.Q. Duong and E. Vincent and R.Gribonval. "Under-determined + reverberant audio source separation using a full-rank spatial + covariance model." IEEE Transactions on Audio, Speech, and Language + Processing 18.7 (2010): 1830-1840. + + .. [3] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel audio source + separation with deep neural networks." IEEE/ACM Transactions on Audio, + Speech, and Language Processing 24.9 (2016): 1652-1664. + + .. [4] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel music + separation with deep neural networks." 2016 24th European Signal + Processing Conference (EUSIPCO). IEEE, 2016. + + .. [5] A. Liutkus and R. Badeau and G. Richard "Kernel additive models for + source separation." IEEE Transactions on Signal Processing + 62.16 (2014): 4298-4310. + + Args: + y (Tensor): [shape=(nb_frames, nb_bins, nb_channels, 2, nb_sources)] + initial estimates for the sources + x (Tensor): [shape=(nb_frames, nb_bins, nb_channels, 2)] + complex STFT of the mixture signal + iterations (int): [scalar] + number of iterations for the EM algorithm. + eps (float or None): [scalar] + The epsilon value to use for regularization and filters. + + Returns: + y (Tensor): [shape=(nb_frames, nb_bins, nb_channels, 2, nb_sources)] + estimated sources after iterations + v (Tensor): [shape=(nb_frames, nb_bins, nb_sources)] + estimated power spectral densities + R (Tensor): [shape=(nb_bins, nb_channels, nb_channels, 2, nb_sources)] + estimated spatial covariance matrices + + Notes: + * You need an initial estimate for the sources to apply this + algorithm. This is precisely what the :func:`wiener` function does. + * This algorithm *is not* an implementation of the "exact" EM + proposed in [1]_. In particular, it does compute the posterior + covariance matrices the same (exact) way. Instead, it uses the + simplified approximate scheme initially proposed in [5]_ and further + refined in [3]_, [4]_, that boils down to just take the empirical + covariance of the recent source estimates, followed by a weighted + average for the update of the spatial covariance matrix. It has been + empirically demonstrated that this simplified algorithm is more + robust for music separation. + + Warning: + It is *very* important to make sure `x.dtype` is `torch.float64` + if you want double precision, because this function will **not** + do such conversion for you from `torch.complex32`, in case you want the + smaller RAM usage on purpose. + + It is usually always better in terms of quality to have double + precision, by e.g. calling :func:`expectation_maximization` + with ``x.to(torch.float64)``. + """ + # dimensions + (nb_frames, nb_bins, nb_channels) = x.shape[:-1] + nb_sources = y.shape[-1] + + regularization = torch.cat( + ( + torch.eye(nb_channels, dtype=x.dtype, device=x.device)[..., None], + torch.zeros((nb_channels, nb_channels, 1), dtype=x.dtype, device=x.device), + ), + dim=2, + ) + regularization = torch.sqrt(torch.as_tensor(eps)) * ( + regularization[None, None, ...].expand((-1, nb_bins, -1, -1, -1)) + ) + + # allocate the spatial covariance matrices + R = [ + torch.zeros((nb_bins, nb_channels, nb_channels, 2), dtype=x.dtype, device=x.device) + for j in range(nb_sources) + ] + weight: torch.Tensor = torch.zeros((nb_bins,), dtype=x.dtype, device=x.device) + + v: torch.Tensor = torch.zeros((nb_frames, nb_bins, nb_sources), dtype=x.dtype, device=x.device) + for it in range(iterations): + # constructing the mixture covariance matrix. Doing it with a loop + # to avoid storing anytime in RAM the whole 6D tensor + + # update the PSD as the average spectrogram over channels + v = torch.mean(torch.abs(y[..., 0, :]) ** 2 + torch.abs(y[..., 1, :]) ** 2, dim=-2) + + # update spatial covariance matrices (weighted update) + for j in range(nb_sources): + R[j] = torch.tensor(0.0, device=x.device) + weight = torch.tensor(eps, device=x.device) + pos: int = 0 + batch_size = batch_size if batch_size else nb_frames + while pos < nb_frames: + t = torch.arange(pos, min(nb_frames, pos + batch_size)) + pos = int(t[-1]) + 1 + + R[j] = R[j] + torch.sum(_covariance(y[t, ..., j]), dim=0) + weight = weight + torch.sum(v[t, ..., j], dim=0) + R[j] = R[j] / weight[..., None, None, None] + weight = torch.zeros_like(weight) + + # cloning y if we track gradient, because we're going to update it + if y.requires_grad: + y = y.clone() + + pos = 0 + while pos < nb_frames: + t = torch.arange(pos, min(nb_frames, pos + batch_size)) + pos = int(t[-1]) + 1 + + y[t, ...] = torch.tensor(0.0, device=x.device, dtype=x.dtype) + + # compute mix covariance matrix + Cxx = regularization + for j in range(nb_sources): + Cxx = Cxx + (v[t, ..., j, None, None, None] * R[j][None, ...].clone()) + + # invert it + inv_Cxx = _invert(Cxx) + + # separate the sources + for j in range(nb_sources): + + # create a wiener gain for this source + gain = torch.zeros_like(inv_Cxx) + + # computes multichannel Wiener gain as v_j R_j inv_Cxx + indices = torch.cartesian_prod( + torch.arange(nb_channels), + torch.arange(nb_channels), + torch.arange(nb_channels), + ) + for index in indices: + gain[:, :, index[0], index[1], :] = _mul_add( + R[j][None, :, index[0], index[2], :].clone(), + inv_Cxx[:, :, index[2], index[1], :], + gain[:, :, index[0], index[1], :], + ) + gain = gain * v[t, ..., None, None, None, j] + + # apply it to the mixture + for i in range(nb_channels): + y[t, ..., j] = _mul_add(gain[..., i, :], x[t, ..., i, None, :], y[t, ..., j]) + + return y, v, R + + +def wiener( + targets_spectrograms: torch.Tensor, + mix_stft: torch.Tensor, + iterations: int = 1, + softmask: bool = False, + residual: bool = False, + scale_factor: float = 10.0, + eps: float = 1e-10, +): + """Wiener-based separation for multichannel audio. + + The method uses the (possibly multichannel) spectrograms of the + sources to separate the (complex) Short Term Fourier Transform of the + mix. Separation is done in a sequential way by: + + * Getting an initial estimate. This can be done in two ways: either by + directly using the spectrograms with the mixture phase, or + by using a softmasking strategy. This initial phase is controlled + by the `softmask` flag. + + * If required, adding an additional residual target as the mix minus + all targets. + + * Refinining these initial estimates through a call to + :func:`expectation_maximization` if the number of iterations is nonzero. + + This implementation also allows to specify the epsilon value used for + regularization. It is based on [1]_, [2]_, [3]_, [4]_. + + References + ---------- + .. [1] S. Uhlich and M. Porcu and F. Giron and M. Enenkl and T. Kemp and + N. Takahashi and Y. Mitsufuji, "Improving music source separation based + on deep neural networks through data augmentation and network + blending." 2017 IEEE International Conference on Acoustics, Speech + and Signal Processing (ICASSP). IEEE, 2017. + + .. [2] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel audio source + separation with deep neural networks." IEEE/ACM Transactions on Audio, + Speech, and Language Processing 24.9 (2016): 1652-1664. + + .. [3] A. Nugraha and A. Liutkus and E. Vincent. "Multichannel music + separation with deep neural networks." 2016 24th European Signal + Processing Conference (EUSIPCO). IEEE, 2016. + + .. [4] A. Liutkus and R. Badeau and G. Richard "Kernel additive models for + source separation." IEEE Transactions on Signal Processing + 62.16 (2014): 4298-4310. + + Args: + targets_spectrograms (Tensor): spectrograms of the sources + [shape=(nb_frames, nb_bins, nb_channels, nb_sources)]. + This is a nonnegative tensor that is + usually the output of the actual separation method of the user. The + spectrograms may be mono, but they need to be 4-dimensional in all + cases. + mix_stft (Tensor): [shape=(nb_frames, nb_bins, nb_channels, complex=2)] + STFT of the mixture signal. + iterations (int): [scalar] + number of iterations for the EM algorithm + softmask (bool): Describes how the initial estimates are obtained. + * if `False`, then the mixture phase will directly be used with the + spectrogram as initial estimates. + * if `True`, initial estimates are obtained by multiplying the + complex mix element-wise with the ratio of each target spectrogram + with the sum of them all. This strategy is better if the model are + not really good, and worse otherwise. + residual (bool): if `True`, an additional target is created, which is + equal to the mixture minus the other targets, before application of + expectation maximization + eps (float): Epsilon value to use for computing the separations. + This is used whenever division with a model energy is + performed, i.e. when softmasking and when iterating the EM. + It can be understood as the energy of the additional white noise + that is taken out when separating. + + Returns: + Tensor: shape=(nb_frames, nb_bins, nb_channels, complex=2, nb_sources) + STFT of estimated sources + + Notes: + * Be careful that you need *magnitude spectrogram estimates* for the + case `softmask==False`. + * `softmask=False` is recommended + * The epsilon value will have a huge impact on performance. If it's + large, only the parts of the signal with a significant energy will + be kept in the sources. This epsilon then directly controls the + energy of the reconstruction error. + + Warning: + As in :func:`expectation_maximization`, we recommend converting the + mixture `x` to double precision `torch.float64` *before* calling + :func:`wiener`. + """ + if softmask: + # if we use softmask, we compute the ratio mask for all targets and + # multiply by the mix stft + y = ( + mix_stft[..., None] + * ( + targets_spectrograms + / (eps + torch.sum(targets_spectrograms, dim=-1, keepdim=True).to(mix_stft.dtype)) + )[..., None, :] + ) + else: + # otherwise, we just multiply the targets spectrograms with mix phase + # we tacitly assume that we have magnitude estimates. + angle = atan2(mix_stft[..., 1], mix_stft[..., 0])[..., None] + nb_sources = targets_spectrograms.shape[-1] + y = torch.zeros( + mix_stft.shape + (nb_sources,), dtype=mix_stft.dtype, device=mix_stft.device + ) + y[..., 0, :] = targets_spectrograms * torch.cos(angle) + y[..., 1, :] = targets_spectrograms * torch.sin(angle) + + if residual: + # if required, adding an additional target as the mix minus + # available targets + y = torch.cat([y, mix_stft[..., None] - y.sum(dim=-1, keepdim=True)], dim=-1) + + if iterations == 0: + return y + + # we need to refine the estimates. Scales down the estimates for + # numerical stability + max_abs = torch.max( + torch.as_tensor(1.0, dtype=mix_stft.dtype, device=mix_stft.device), + torch.sqrt(_norm(mix_stft)).max() / scale_factor, + ) + + mix_stft = mix_stft / max_abs + y = y / max_abs + + # call expectation maximization + y = expectation_maximization(y, mix_stft, iterations, eps=eps)[0] + + # scale estimates up again + y = y * max_abs + return y + + +def _covariance(y_j): + """ + Compute the empirical covariance for a source. + + Args: + y_j (Tensor): complex stft of the source. + [shape=(nb_frames, nb_bins, nb_channels, 2)]. + + Returns: + Cj (Tensor): [shape=(nb_frames, nb_bins, nb_channels, nb_channels, 2)] + just y_j * conj(y_j.T): empirical covariance for each TF bin. + """ + (nb_frames, nb_bins, nb_channels) = y_j.shape[:-1] + Cj = torch.zeros( + (nb_frames, nb_bins, nb_channels, nb_channels, 2), + dtype=y_j.dtype, + device=y_j.device, + ) + indices = torch.cartesian_prod(torch.arange(nb_channels), torch.arange(nb_channels)) + for index in indices: + Cj[:, :, index[0], index[1], :] = _mul_add( + y_j[:, :, index[0], :], + _conj(y_j[:, :, index[1], :]), + Cj[:, :, index[0], index[1], :], + ) + return Cj diff --git a/demucs/hdemucs.py b/demucs/hdemucs.py new file mode 100644 index 0000000000000000000000000000000000000000..ea4e652e4749a1888ec1ed8e55e8a03c07616e26 --- /dev/null +++ b/demucs/hdemucs.py @@ -0,0 +1,796 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +""" +This code contains the spectrogram and Hybrid version of Demucs. +""" +from copy import deepcopy +import math +import typing as tp +import torch +from torch import nn +from torch.nn import functional as F +from .filtering import wiener +from .demucs import DConv, rescale_module +from .states import capture_init +from .spec import spectro, ispectro + +def pad1d(x: torch.Tensor, paddings: tp.Tuple[int, int], mode: str = 'constant', value: float = 0.): + """Tiny wrapper around F.pad, just to allow for reflect padding on small input. + If this is the case, we insert extra 0 padding to the right before the reflection happen.""" + x0 = x + length = x.shape[-1] + padding_left, padding_right = paddings + if mode == 'reflect': + max_pad = max(padding_left, padding_right) + if length <= max_pad: + extra_pad = max_pad - length + 1 + extra_pad_right = min(padding_right, extra_pad) + extra_pad_left = extra_pad - extra_pad_right + paddings = (padding_left - extra_pad_left, padding_right - extra_pad_right) + x = F.pad(x, (extra_pad_left, extra_pad_right)) + out = F.pad(x, paddings, mode, value) + assert out.shape[-1] == length + padding_left + padding_right + assert (out[..., padding_left: padding_left + length] == x0).all() + return out + +class ScaledEmbedding(nn.Module): + """ + Boost learning rate for embeddings (with `scale`). + Also, can make embeddings continuous with `smooth`. + """ + def __init__(self, num_embeddings: int, embedding_dim: int, + scale: float = 10., smooth=False): + super().__init__() + self.embedding = nn.Embedding(num_embeddings, embedding_dim) + if smooth: + weight = torch.cumsum(self.embedding.weight.data, dim=0) + # when summing gaussian, overscale raises as sqrt(n), so we nornalize by that. + weight = weight / torch.arange(1, num_embeddings + 1).to(weight).sqrt()[:, None] + self.embedding.weight.data[:] = weight + self.embedding.weight.data /= scale + self.scale = scale + + @property + def weight(self): + return self.embedding.weight * self.scale + + def forward(self, x): + out = self.embedding(x) * self.scale + return out + + +class HEncLayer(nn.Module): + def __init__(self, chin, chout, kernel_size=8, stride=4, norm_groups=1, empty=False, + freq=True, dconv=True, norm=True, context=0, dconv_kw={}, pad=True, + rewrite=True): + """Encoder layer. This used both by the time and the frequency branch. + + Args: + chin: number of input channels. + chout: number of output channels. + norm_groups: number of groups for group norm. + empty: used to make a layer with just the first conv. this is used + before merging the time and freq. branches. + freq: this is acting on frequencies. + dconv: insert DConv residual branches. + norm: use GroupNorm. + context: context size for the 1x1 conv. + dconv_kw: list of kwargs for the DConv class. + pad: pad the input. Padding is done so that the output size is + always the input size / stride. + rewrite: add 1x1 conv at the end of the layer. + """ + super().__init__() + norm_fn = lambda d: nn.Identity() # noqa + if norm: + norm_fn = lambda d: nn.GroupNorm(norm_groups, d) # noqa + if pad: + pad = kernel_size // 4 + else: + pad = 0 + klass = nn.Conv1d + self.freq = freq + self.kernel_size = kernel_size + self.stride = stride + self.empty = empty + self.norm = norm + self.pad = pad + if freq: + kernel_size = [kernel_size, 1] + stride = [stride, 1] + pad = [pad, 0] + klass = nn.Conv2d + self.conv = klass(chin, chout, kernel_size, stride, pad) + if self.empty: + return + self.norm1 = norm_fn(chout) + self.rewrite = None + if rewrite: + self.rewrite = klass(chout, 2 * chout, 1 + 2 * context, 1, context) + self.norm2 = norm_fn(2 * chout) + + self.dconv = None + if dconv: + self.dconv = DConv(chout, **dconv_kw) + + def forward(self, x, inject=None): + """ + `inject` is used to inject the result from the time branch into the frequency branch, + when both have the same stride. + """ + if not self.freq and x.dim() == 4: + B, C, Fr, T = x.shape + x = x.view(B, -1, T) + + if not self.freq: + le = x.shape[-1] + if not le % self.stride == 0: + x = F.pad(x, (0, self.stride - (le % self.stride))) + y = self.conv(x) + if self.empty: + return y + if inject is not None: + assert inject.shape[-1] == y.shape[-1], (inject.shape, y.shape) + if inject.dim() == 3 and y.dim() == 4: + inject = inject[:, :, None] + y = y + inject + y = F.gelu(self.norm1(y)) + if self.dconv: + if self.freq: + B, C, Fr, T = y.shape + y = y.permute(0, 2, 1, 3).reshape(-1, C, T) + y = self.dconv(y) + if self.freq: + y = y.view(B, Fr, C, T).permute(0, 2, 1, 3) + if self.rewrite: + z = self.norm2(self.rewrite(y)) + z = F.glu(z, dim=1) + else: + z = y + return z + + +class MultiWrap(nn.Module): + """ + Takes one layer and replicate it N times. each replica will act + on a frequency band. All is done so that if the N replica have the same weights, + then this is exactly equivalent to applying the original module on all frequencies. + + This is a bit over-engineered to avoid edge artifacts when splitting + the frequency bands, but it is possible the naive implementation would work as well... + """ + def __init__(self, layer, split_ratios): + """ + Args: + layer: module to clone, must be either HEncLayer or HDecLayer. + split_ratios: list of float indicating which ratio to keep for each band. + """ + super().__init__() + self.split_ratios = split_ratios + self.layers = nn.ModuleList() + self.conv = isinstance(layer, HEncLayer) + assert not layer.norm + assert layer.freq + assert layer.pad + if not self.conv: + assert not layer.context_freq + for k in range(len(split_ratios) + 1): + lay = deepcopy(layer) + if self.conv: + lay.conv.padding = (0, 0) + else: + lay.pad = False + for m in lay.modules(): + if hasattr(m, 'reset_parameters'): + m.reset_parameters() + self.layers.append(lay) + + def forward(self, x, skip=None, length=None): + B, C, Fr, T = x.shape + + ratios = list(self.split_ratios) + [1] + start = 0 + outs = [] + for ratio, layer in zip(ratios, self.layers): + if self.conv: + pad = layer.kernel_size // 4 + if ratio == 1: + limit = Fr + frames = -1 + else: + limit = int(round(Fr * ratio)) + le = limit - start + if start == 0: + le += pad + frames = round((le - layer.kernel_size) / layer.stride + 1) + limit = start + (frames - 1) * layer.stride + layer.kernel_size + if start == 0: + limit -= pad + assert limit - start > 0, (limit, start) + assert limit <= Fr, (limit, Fr) + y = x[:, :, start:limit, :] + if start == 0: + y = F.pad(y, (0, 0, pad, 0)) + if ratio == 1: + y = F.pad(y, (0, 0, 0, pad)) + outs.append(layer(y)) + start = limit - layer.kernel_size + layer.stride + else: + if ratio == 1: + limit = Fr + else: + limit = int(round(Fr * ratio)) + last = layer.last + layer.last = True + + y = x[:, :, start:limit] + s = skip[:, :, start:limit] + out, _ = layer(y, s, None) + if outs: + outs[-1][:, :, -layer.stride:] += ( + out[:, :, :layer.stride] - layer.conv_tr.bias.view(1, -1, 1, 1)) + out = out[:, :, layer.stride:] + if ratio == 1: + out = out[:, :, :-layer.stride // 2, :] + if start == 0: + out = out[:, :, layer.stride // 2:, :] + outs.append(out) + layer.last = last + start = limit + out = torch.cat(outs, dim=2) + if not self.conv and not last: + out = F.gelu(out) + if self.conv: + return out + else: + return out, None + + +class HDecLayer(nn.Module): + def __init__(self, chin, chout, last=False, kernel_size=8, stride=4, norm_groups=1, empty=False, + freq=True, dconv=True, norm=True, context=1, dconv_kw={}, pad=True, + context_freq=True, rewrite=True): + """ + Same as HEncLayer but for decoder. See `HEncLayer` for documentation. + """ + super().__init__() + norm_fn = lambda d: nn.Identity() # noqa + if norm: + norm_fn = lambda d: nn.GroupNorm(norm_groups, d) # noqa + if pad: + pad = kernel_size // 4 + else: + pad = 0 + self.pad = pad + self.last = last + self.freq = freq + self.chin = chin + self.empty = empty + self.stride = stride + self.kernel_size = kernel_size + self.norm = norm + self.context_freq = context_freq + klass = nn.Conv1d + klass_tr = nn.ConvTranspose1d + if freq: + kernel_size = [kernel_size, 1] + stride = [stride, 1] + klass = nn.Conv2d + klass_tr = nn.ConvTranspose2d + self.conv_tr = klass_tr(chin, chout, kernel_size, stride) + self.norm2 = norm_fn(chout) + if self.empty: + return + self.rewrite = None + if rewrite: + if context_freq: + self.rewrite = klass(chin, 2 * chin, 1 + 2 * context, 1, context) + else: + self.rewrite = klass(chin, 2 * chin, [1, 1 + 2 * context], 1, + [0, context]) + self.norm1 = norm_fn(2 * chin) + + self.dconv = None + if dconv: + self.dconv = DConv(chin, **dconv_kw) + + def forward(self, x, skip, length): + if self.freq and x.dim() == 3: + B, C, T = x.shape + x = x.view(B, self.chin, -1, T) + + if not self.empty: + x = x + skip + + if self.rewrite: + y = F.glu(self.norm1(self.rewrite(x)), dim=1) + else: + y = x + if self.dconv: + if self.freq: + B, C, Fr, T = y.shape + y = y.permute(0, 2, 1, 3).reshape(-1, C, T) + y = self.dconv(y) + if self.freq: + y = y.view(B, Fr, C, T).permute(0, 2, 1, 3) + else: + y = x + assert skip is None + z = self.norm2(self.conv_tr(y)) + if self.freq: + if self.pad: + z = z[..., self.pad:-self.pad, :] + else: + z = z[..., self.pad:self.pad + length] + assert z.shape[-1] == length, (z.shape[-1], length) + if not self.last: + z = F.gelu(z) + return z, y + + +class HDemucs(nn.Module): + """ + Spectrogram and hybrid Demucs model. + The spectrogram model has the same structure as Demucs, except the first few layers are over the + frequency axis, until there is only 1 frequency, and then it moves to time convolutions. + Frequency layers can still access information across time steps thanks to the DConv residual. + + Hybrid model have a parallel time branch. At some layer, the time branch has the same stride + as the frequency branch and then the two are combined. The opposite happens in the decoder. + + Models can either use naive iSTFT from masking, Wiener filtering ([Ulhih et al. 2017]), + or complex as channels (CaC) [Choi et al. 2020]. Wiener filtering is based on + Open Unmix implementation [Stoter et al. 2019]. + + The loss is always on the temporal domain, by backpropagating through the above + output methods and iSTFT. This allows to define hybrid models nicely. However, this breaks + a bit Wiener filtering, as doing more iteration at test time will change the spectrogram + contribution, without changing the one from the waveform, which will lead to worse performance. + I tried using the residual option in OpenUnmix Wiener implementation, but it didn't improve. + CaC on the other hand provides similar performance for hybrid, and works naturally with + hybrid models. + + This model also uses frequency embeddings are used to improve efficiency on convolutions + over the freq. axis, following [Isik et al. 2020] (https://arxiv.org/pdf/2008.04470.pdf). + + Unlike classic Demucs, there is no resampling here, and normalization is always applied. + """ + @capture_init + def __init__(self, + sources, + # Channels + audio_channels=2, + channels=48, + channels_time=None, + growth=2, + # STFT + nfft=4096, + wiener_iters=0, + end_iters=0, + wiener_residual=False, + cac=True, + # Main structure + depth=6, + rewrite=True, + hybrid=True, + hybrid_old=False, + # Frequency branch + multi_freqs=None, + multi_freqs_depth=2, + freq_emb=0.2, + emb_scale=10, + emb_smooth=True, + # Convolutions + kernel_size=8, + time_stride=2, + stride=4, + context=1, + context_enc=0, + # Normalization + norm_starts=4, + norm_groups=4, + # DConv residual branch + dconv_mode=1, + dconv_depth=2, + dconv_comp=4, + dconv_attn=4, + dconv_lstm=4, + dconv_init=1e-4, + # Weight init + rescale=0.1, + # Metadata + samplerate=44100, + segment=4 * 10): + + """ + Args: + sources (list[str]): list of source names. + audio_channels (int): input/output audio channels. + channels (int): initial number of hidden channels. + channels_time: if not None, use a different `channels` value for the time branch. + growth: increase the number of hidden channels by this factor at each layer. + nfft: number of fft bins. Note that changing this require careful computation of + various shape parameters and will not work out of the box for hybrid models. + wiener_iters: when using Wiener filtering, number of iterations at test time. + end_iters: same but at train time. For a hybrid model, must be equal to `wiener_iters`. + wiener_residual: add residual source before wiener filtering. + cac: uses complex as channels, i.e. complex numbers are 2 channels each + in input and output. no further processing is done before ISTFT. + depth (int): number of layers in the encoder and in the decoder. + rewrite (bool): add 1x1 convolution to each layer. + hybrid (bool): make a hybrid time/frequency domain, otherwise frequency only. + hybrid_old: some models trained for MDX had a padding bug. This replicates + this bug to avoid retraining them. + multi_freqs: list of frequency ratios for splitting frequency bands with `MultiWrap`. + multi_freqs_depth: how many layers to wrap with `MultiWrap`. Only the outermost + layers will be wrapped. + freq_emb: add frequency embedding after the first frequency layer if > 0, + the actual value controls the weight of the embedding. + emb_scale: equivalent to scaling the embedding learning rate + emb_smooth: initialize the embedding with a smooth one (with respect to frequencies). + kernel_size: kernel_size for encoder and decoder layers. + stride: stride for encoder and decoder layers. + time_stride: stride for the final time layer, after the merge. + context: context for 1x1 conv in the decoder. + context_enc: context for 1x1 conv in the encoder. + norm_starts: layer at which group norm starts being used. + decoder layers are numbered in reverse order. + norm_groups: number of groups for group norm. + dconv_mode: if 1: dconv in encoder only, 2: decoder only, 3: both. + dconv_depth: depth of residual DConv branch. + dconv_comp: compression of DConv branch. + dconv_attn: adds attention layers in DConv branch starting at this layer. + dconv_lstm: adds a LSTM layer in DConv branch starting at this layer. + dconv_init: initial scale for the DConv branch LayerScale. + rescale: weight recaling trick + + """ + super().__init__() + + self.cac = cac + self.wiener_residual = wiener_residual + self.audio_channels = audio_channels + self.sources = sources + self.kernel_size = kernel_size + self.context = context + self.stride = stride + self.depth = depth + self.channels = channels + self.samplerate = samplerate + self.segment = segment + + self.nfft = nfft + self.hop_length = nfft // 4 + self.wiener_iters = wiener_iters + self.end_iters = end_iters + self.freq_emb = None + self.hybrid = hybrid + self.hybrid_old = hybrid_old + if hybrid_old: + assert hybrid, "hybrid_old must come with hybrid=True" + if hybrid: + assert wiener_iters == end_iters + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + if hybrid: + self.tencoder = nn.ModuleList() + self.tdecoder = nn.ModuleList() + + chin = audio_channels + chin_z = chin # number of channels for the freq branch + if self.cac: + chin_z *= 2 + chout = channels_time or channels + chout_z = channels + freqs = nfft // 2 + + for index in range(depth): + lstm = index >= dconv_lstm + attn = index >= dconv_attn + norm = index >= norm_starts + freq = freqs > 1 + stri = stride + ker = kernel_size + if not freq: + assert freqs == 1 + ker = time_stride * 2 + stri = time_stride + + pad = True + last_freq = False + if freq and freqs <= kernel_size: + ker = freqs + pad = False + last_freq = True + + kw = { + 'kernel_size': ker, + 'stride': stri, + 'freq': freq, + 'pad': pad, + 'norm': norm, + 'rewrite': rewrite, + 'norm_groups': norm_groups, + 'dconv_kw': { + 'lstm': lstm, + 'attn': attn, + 'depth': dconv_depth, + 'compress': dconv_comp, + 'init': dconv_init, + 'gelu': True, + } + } + kwt = dict(kw) + kwt['freq'] = 0 + kwt['kernel_size'] = kernel_size + kwt['stride'] = stride + kwt['pad'] = True + kw_dec = dict(kw) + multi = False + if multi_freqs and index < multi_freqs_depth: + multi = True + kw_dec['context_freq'] = False + + if last_freq: + chout_z = max(chout, chout_z) + chout = chout_z + + enc = HEncLayer(chin_z, chout_z, + dconv=dconv_mode & 1, context=context_enc, **kw) + if hybrid and freq: + tenc = HEncLayer(chin, chout, dconv=dconv_mode & 1, context=context_enc, + empty=last_freq, **kwt) + self.tencoder.append(tenc) + + if multi: + enc = MultiWrap(enc, multi_freqs) + self.encoder.append(enc) + if index == 0: + chin = self.audio_channels * len(self.sources) + chin_z = chin + if self.cac: + chin_z *= 2 + dec = HDecLayer(chout_z, chin_z, dconv=dconv_mode & 2, + last=index == 0, context=context, **kw_dec) + if multi: + dec = MultiWrap(dec, multi_freqs) + if hybrid and freq: + tdec = HDecLayer(chout, chin, dconv=dconv_mode & 2, empty=last_freq, + last=index == 0, context=context, **kwt) + self.tdecoder.insert(0, tdec) + self.decoder.insert(0, dec) + + chin = chout + chin_z = chout_z + chout = int(growth * chout) + chout_z = int(growth * chout_z) + if freq: + if freqs <= kernel_size: + freqs = 1 + else: + freqs //= stride + if index == 0 and freq_emb: + self.freq_emb = ScaledEmbedding( + freqs, chin_z, smooth=emb_smooth, scale=emb_scale) + self.freq_emb_scale = freq_emb + + if rescale: + rescale_module(self, reference=rescale) + + def _spec(self, x): + hl = self.hop_length + nfft = self.nfft + x0 = x # noqa + + if self.hybrid: + # We re-pad the signal in order to keep the property + # that the size of the output is exactly the size of the input + # divided by the stride (here hop_length), when divisible. + # This is achieved by padding by 1/4th of the kernel size (here nfft). + # which is not supported by torch.stft. + # Having all convolution operations follow this convention allow to easily + # align the time and frequency branches later on. + assert hl == nfft // 4 + le = int(math.ceil(x.shape[-1] / hl)) + pad = hl // 2 * 3 + if not self.hybrid_old: + x = pad1d(x, (pad, pad + le * hl - x.shape[-1]), mode='reflect') + else: + x = pad1d(x, (pad, pad + le * hl - x.shape[-1])) + + z = spectro(x, nfft, hl)[..., :-1, :] + if self.hybrid: + assert z.shape[-1] == le + 4, (z.shape, x.shape, le) + z = z[..., 2:2+le] + return z + + def _ispec(self, z, length=None, scale=0): + hl = self.hop_length // (4 ** scale) + z = F.pad(z, (0, 0, 0, 1)) + if self.hybrid: + z = F.pad(z, (2, 2)) + pad = hl // 2 * 3 + if not self.hybrid_old: + le = hl * int(math.ceil(length / hl)) + 2 * pad + else: + le = hl * int(math.ceil(length / hl)) + x = ispectro(z, hl, length=le) + if not self.hybrid_old: + x = x[..., pad:pad + length] + else: + x = x[..., :length] + else: + x = ispectro(z, hl, length) + return x + + def _magnitude(self, z): + # return the magnitude of the spectrogram, except when cac is True, + # in which case we just move the complex dimension to the channel one. + if self.cac: + B, C, Fr, T = z.shape + m = torch.view_as_real(z).permute(0, 1, 4, 2, 3) + m = m.reshape(B, C * 2, Fr, T) + else: + m = z.abs() + return m + + def _mask(self, z, m): + # Apply masking given the mixture spectrogram `z` and the estimated mask `m`. + # If `cac` is True, `m` is actually a full spectrogram and `z` is ignored. + niters = self.wiener_iters + if self.cac: + B, S, C, Fr, T = m.shape + out = m.view(B, S, -1, 2, Fr, T).permute(0, 1, 2, 4, 5, 3) + out = torch.view_as_complex(out.contiguous()) + return out + if self.training: + niters = self.end_iters + if niters < 0: + z = z[:, None] + return z / (1e-8 + z.abs()) * m + else: + return self._wiener(m, z, niters) + + def _wiener(self, mag_out, mix_stft, niters): + # apply wiener filtering from OpenUnmix. + init = mix_stft.dtype + wiener_win_len = 300 + residual = self.wiener_residual + + B, S, C, Fq, T = mag_out.shape + mag_out = mag_out.permute(0, 4, 3, 2, 1) + mix_stft = torch.view_as_real(mix_stft.permute(0, 3, 2, 1)) + + outs = [] + for sample in range(B): + pos = 0 + out = [] + for pos in range(0, T, wiener_win_len): + frame = slice(pos, pos + wiener_win_len) + z_out = wiener( + mag_out[sample, frame], mix_stft[sample, frame], niters, + residual=residual) + out.append(z_out.transpose(-1, -2)) + outs.append(torch.cat(out, dim=0)) + out = torch.view_as_complex(torch.stack(outs, 0)) + out = out.permute(0, 4, 3, 2, 1).contiguous() + if residual: + out = out[:, :-1] + assert list(out.shape) == [B, S, C, Fq, T] + return out.to(init) + + def forward(self, mix): + x = mix + length = x.shape[-1] + + z = self._spec(mix) + mag = self._magnitude(z).to(mix.device) + x = mag + + B, C, Fq, T = x.shape + + # unlike previous Demucs, we always normalize because it is easier. + mean = x.mean(dim=(1, 2, 3), keepdim=True) + std = x.std(dim=(1, 2, 3), keepdim=True) + x = (x - mean) / (1e-5 + std) + # x will be the freq. branch input. + + if self.hybrid: + # Prepare the time branch input. + xt = mix + meant = xt.mean(dim=(1, 2), keepdim=True) + stdt = xt.std(dim=(1, 2), keepdim=True) + xt = (xt - meant) / (1e-5 + stdt) + + # okay, this is a giant mess I know... + saved = [] # skip connections, freq. + saved_t = [] # skip connections, time. + lengths = [] # saved lengths to properly remove padding, freq branch. + lengths_t = [] # saved lengths for time branch. + for idx, encode in enumerate(self.encoder): + lengths.append(x.shape[-1]) + inject = None + if self.hybrid and idx < len(self.tencoder): + # we have not yet merged branches. + lengths_t.append(xt.shape[-1]) + tenc = self.tencoder[idx] + xt = tenc(xt) + if not tenc.empty: + # save for skip connection + saved_t.append(xt) + else: + # tenc contains just the first conv., so that now time and freq. + # branches have the same shape and can be merged. + inject = xt + x = encode(x, inject) + if idx == 0 and self.freq_emb is not None: + # add frequency embedding to allow for non equivariant convolutions + # over the frequency axis. + frs = torch.arange(x.shape[-2], device=x.device) + emb = self.freq_emb(frs).t()[None, :, :, None].expand_as(x) + x = x + self.freq_emb_scale * emb + + saved.append(x) + + x = torch.zeros_like(x) + if self.hybrid: + xt = torch.zeros_like(x) + # initialize everything to zero (signal will go through u-net skips). + + for idx, decode in enumerate(self.decoder): + skip = saved.pop(-1) + x, pre = decode(x, skip, lengths.pop(-1)) + # `pre` contains the output just before final transposed convolution, + # which is used when the freq. and time branch separate. + + if self.hybrid: + offset = self.depth - len(self.tdecoder) + if self.hybrid and idx >= offset: + tdec = self.tdecoder[idx - offset] + length_t = lengths_t.pop(-1) + if tdec.empty: + assert pre.shape[2] == 1, pre.shape + pre = pre[:, :, 0] + xt, _ = tdec(pre, None, length_t) + else: + skip = saved_t.pop(-1) + xt, _ = tdec(xt, skip, length_t) + + # Let's make sure we used all stored skip connections. + assert len(saved) == 0 + assert len(lengths_t) == 0 + assert len(saved_t) == 0 + + S = len(self.sources) + x = x.view(B, S, -1, Fq, T) + x = x * std[:, None] + mean[:, None] + + # to cpu as non-cuda GPUs don't support complex numbers + # demucs issue #435 ##432 + # NOTE: in this case z already is on cpu + # TODO: remove this when mps supports complex numbers + + device_type = x.device.type + device_load = f"{device_type}:{x.device.index}" if not device_type == 'mps' else device_type + x_is_other_gpu = not device_type in ["cuda", "cpu"] + + if x_is_other_gpu: + x = x.cpu() + + zout = self._mask(z, x) + x = self._ispec(zout, length) + + # back to other device + if x_is_other_gpu: + x = x.to(device_load) + + if self.hybrid: + xt = xt.view(B, S, -1, length) + xt = xt * stdt[:, None] + meant[:, None] + x = xt + x + return x \ No newline at end of file diff --git a/demucs/htdemucs.py b/demucs/htdemucs.py new file mode 100644 index 0000000000000000000000000000000000000000..bad6b113102c959c72d4c47387c2cc2f48a17e85 --- /dev/null +++ b/demucs/htdemucs.py @@ -0,0 +1,664 @@ +# Copyright (c) Meta, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# First author is Simon Rouard. +""" +This code contains the spectrogram and Hybrid version of Demucs. +""" +import math + +from .filtering import wiener +import torch +from torch import nn +from torch.nn import functional as F +from fractions import Fraction +from einops import rearrange + +from .transformer import CrossTransformerEncoder + +from .demucs import rescale_module +from .states import capture_init +from .spec import spectro, ispectro +from .hdemucs import pad1d, ScaledEmbedding, HEncLayer, MultiWrap, HDecLayer + + +class HTDemucs(nn.Module): + """ + Spectrogram and hybrid Demucs model. + The spectrogram model has the same structure as Demucs, except the first few layers are over the + frequency axis, until there is only 1 frequency, and then it moves to time convolutions. + Frequency layers can still access information across time steps thanks to the DConv residual. + + Hybrid model have a parallel time branch. At some layer, the time branch has the same stride + as the frequency branch and then the two are combined. The opposite happens in the decoder. + + Models can either use naive iSTFT from masking, Wiener filtering ([Ulhih et al. 2017]), + or complex as channels (CaC) [Choi et al. 2020]. Wiener filtering is based on + Open Unmix implementation [Stoter et al. 2019]. + + The loss is always on the temporal domain, by backpropagating through the above + output methods and iSTFT. This allows to define hybrid models nicely. However, this breaks + a bit Wiener filtering, as doing more iteration at test time will change the spectrogram + contribution, without changing the one from the waveform, which will lead to worse performance. + I tried using the residual option in OpenUnmix Wiener implementation, but it didn't improve. + CaC on the other hand provides similar performance for hybrid, and works naturally with + hybrid models. + + This model also uses frequency embeddings are used to improve efficiency on convolutions + over the freq. axis, following [Isik et al. 2020] (https://arxiv.org/pdf/2008.04470.pdf). + + Unlike classic Demucs, there is no resampling here, and normalization is always applied. + """ + + @capture_init + def __init__( + self, + sources, + # Channels + audio_channels=2, + channels=48, + channels_time=None, + growth=2, + # STFT + nfft=4096, + wiener_iters=0, + end_iters=0, + wiener_residual=False, + cac=True, + # Main structure + depth=4, + rewrite=True, + # Frequency branch + multi_freqs=None, + multi_freqs_depth=3, + freq_emb=0.2, + emb_scale=10, + emb_smooth=True, + # Convolutions + kernel_size=8, + time_stride=2, + stride=4, + context=1, + context_enc=0, + # Normalization + norm_starts=4, + norm_groups=4, + # DConv residual branch + dconv_mode=1, + dconv_depth=2, + dconv_comp=8, + dconv_init=1e-3, + # Before the Transformer + bottom_channels=0, + # Transformer + t_layers=5, + t_emb="sin", + t_hidden_scale=4.0, + t_heads=8, + t_dropout=0.0, + t_max_positions=10000, + t_norm_in=True, + t_norm_in_group=False, + t_group_norm=False, + t_norm_first=True, + t_norm_out=True, + t_max_period=10000.0, + t_weight_decay=0.0, + t_lr=None, + t_layer_scale=True, + t_gelu=True, + t_weight_pos_embed=1.0, + t_sin_random_shift=0, + t_cape_mean_normalize=True, + t_cape_augment=True, + t_cape_glob_loc_scale=[5000.0, 1.0, 1.4], + t_sparse_self_attn=False, + t_sparse_cross_attn=False, + t_mask_type="diag", + t_mask_random_seed=42, + t_sparse_attn_window=500, + t_global_window=100, + t_sparsity=0.95, + t_auto_sparsity=False, + # ------ Particuliar parameters + t_cross_first=False, + # Weight init + rescale=0.1, + # Metadata + samplerate=44100, + segment=10, + use_train_segment=True, + ): + """ + Args: + sources (list[str]): list of source names. + audio_channels (int): input/output audio channels. + channels (int): initial number of hidden channels. + channels_time: if not None, use a different `channels` value for the time branch. + growth: increase the number of hidden channels by this factor at each layer. + nfft: number of fft bins. Note that changing this require careful computation of + various shape parameters and will not work out of the box for hybrid models. + wiener_iters: when using Wiener filtering, number of iterations at test time. + end_iters: same but at train time. For a hybrid model, must be equal to `wiener_iters`. + wiener_residual: add residual source before wiener filtering. + cac: uses complex as channels, i.e. complex numbers are 2 channels each + in input and output. no further processing is done before ISTFT. + depth (int): number of layers in the encoder and in the decoder. + rewrite (bool): add 1x1 convolution to each layer. + multi_freqs: list of frequency ratios for splitting frequency bands with `MultiWrap`. + multi_freqs_depth: how many layers to wrap with `MultiWrap`. Only the outermost + layers will be wrapped. + freq_emb: add frequency embedding after the first frequency layer if > 0, + the actual value controls the weight of the embedding. + emb_scale: equivalent to scaling the embedding learning rate + emb_smooth: initialize the embedding with a smooth one (with respect to frequencies). + kernel_size: kernel_size for encoder and decoder layers. + stride: stride for encoder and decoder layers. + time_stride: stride for the final time layer, after the merge. + context: context for 1x1 conv in the decoder. + context_enc: context for 1x1 conv in the encoder. + norm_starts: layer at which group norm starts being used. + decoder layers are numbered in reverse order. + norm_groups: number of groups for group norm. + dconv_mode: if 1: dconv in encoder only, 2: decoder only, 3: both. + dconv_depth: depth of residual DConv branch. + dconv_comp: compression of DConv branch. + dconv_attn: adds attention layers in DConv branch starting at this layer. + dconv_lstm: adds a LSTM layer in DConv branch starting at this layer. + dconv_init: initial scale for the DConv branch LayerScale. + bottom_channels: if >0 it adds a linear layer (1x1 Conv) before and after the + transformer in order to change the number of channels + t_layers: number of layers in each branch (waveform and spec) of the transformer + t_emb: "sin", "cape" or "scaled" + t_hidden_scale: the hidden scale of the Feedforward parts of the transformer + for instance if C = 384 (the number of channels in the transformer) and + t_hidden_scale = 4.0 then the intermediate layer of the FFN has dimension + 384 * 4 = 1536 + t_heads: number of heads for the transformer + t_dropout: dropout in the transformer + t_max_positions: max_positions for the "scaled" positional embedding, only + useful if t_emb="scaled" + t_norm_in: (bool) norm before addinf positional embedding and getting into the + transformer layers + t_norm_in_group: (bool) if True while t_norm_in=True, the norm is on all the + timesteps (GroupNorm with group=1) + t_group_norm: (bool) if True, the norms of the Encoder Layers are on all the + timesteps (GroupNorm with group=1) + t_norm_first: (bool) if True the norm is before the attention and before the FFN + t_norm_out: (bool) if True, there is a GroupNorm (group=1) at the end of each layer + t_max_period: (float) denominator in the sinusoidal embedding expression + t_weight_decay: (float) weight decay for the transformer + t_lr: (float) specific learning rate for the transformer + t_layer_scale: (bool) Layer Scale for the transformer + t_gelu: (bool) activations of the transformer are GeLU if True, ReLU else + t_weight_pos_embed: (float) weighting of the positional embedding + t_cape_mean_normalize: (bool) if t_emb="cape", normalisation of positional embeddings + see: https://arxiv.org/abs/2106.03143 + t_cape_augment: (bool) if t_emb="cape", must be True during training and False + during the inference, see: https://arxiv.org/abs/2106.03143 + t_cape_glob_loc_scale: (list of 3 floats) if t_emb="cape", CAPE parameters + see: https://arxiv.org/abs/2106.03143 + t_sparse_self_attn: (bool) if True, the self attentions are sparse + t_sparse_cross_attn: (bool) if True, the cross-attentions are sparse (don't use it + unless you designed really specific masks) + t_mask_type: (str) can be "diag", "jmask", "random", "global" or any combination + with '_' between: i.e. "diag_jmask_random" (note that this is permutation + invariant i.e. "diag_jmask_random" is equivalent to "jmask_random_diag") + t_mask_random_seed: (int) if "random" is in t_mask_type, controls the seed + that generated the random part of the mask + t_sparse_attn_window: (int) if "diag" is in t_mask_type, for a query (i), and + a key (j), the mask is True id |i-j|<=t_sparse_attn_window + t_global_window: (int) if "global" is in t_mask_type, mask[:t_global_window, :] + and mask[:, :t_global_window] will be True + t_sparsity: (float) if "random" is in t_mask_type, t_sparsity is the sparsity + level of the random part of the mask. + t_cross_first: (bool) if True cross attention is the first layer of the + transformer (False seems to be better) + rescale: weight rescaling trick + use_train_segment: (bool) if True, the actual size that is used during the + training is used during inference. + """ + super().__init__() + self.cac = cac + self.wiener_residual = wiener_residual + self.audio_channels = audio_channels + self.sources = sources + self.kernel_size = kernel_size + self.context = context + self.stride = stride + self.depth = depth + self.bottom_channels = bottom_channels + self.channels = channels + self.samplerate = samplerate + self.segment = segment + self.use_train_segment = use_train_segment + self.nfft = nfft + self.hop_length = nfft // 4 + self.wiener_iters = wiener_iters + self.end_iters = end_iters + self.freq_emb = None + assert wiener_iters == end_iters + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + self.tencoder = nn.ModuleList() + self.tdecoder = nn.ModuleList() + + chin = audio_channels + chin_z = chin # number of channels for the freq branch + if self.cac: + chin_z *= 2 + chout = channels_time or channels + chout_z = channels + freqs = nfft // 2 + + for index in range(depth): + norm = index >= norm_starts + freq = freqs > 1 + stri = stride + ker = kernel_size + if not freq: + assert freqs == 1 + ker = time_stride * 2 + stri = time_stride + + pad = True + last_freq = False + if freq and freqs <= kernel_size: + ker = freqs + pad = False + last_freq = True + + kw = { + "kernel_size": ker, + "stride": stri, + "freq": freq, + "pad": pad, + "norm": norm, + "rewrite": rewrite, + "norm_groups": norm_groups, + "dconv_kw": { + "depth": dconv_depth, + "compress": dconv_comp, + "init": dconv_init, + "gelu": True, + }, + } + kwt = dict(kw) + kwt["freq"] = 0 + kwt["kernel_size"] = kernel_size + kwt["stride"] = stride + kwt["pad"] = True + kw_dec = dict(kw) + multi = False + if multi_freqs and index < multi_freqs_depth: + multi = True + kw_dec["context_freq"] = False + + if last_freq: + chout_z = max(chout, chout_z) + chout = chout_z + + enc = HEncLayer( + chin_z, chout_z, dconv=dconv_mode & 1, context=context_enc, **kw + ) + if freq: + tenc = HEncLayer( + chin, + chout, + dconv=dconv_mode & 1, + context=context_enc, + empty=last_freq, + **kwt + ) + self.tencoder.append(tenc) + + if multi: + enc = MultiWrap(enc, multi_freqs) + self.encoder.append(enc) + if index == 0: + chin = self.audio_channels * len(self.sources) + chin_z = chin + if self.cac: + chin_z *= 2 + dec = HDecLayer( + chout_z, + chin_z, + dconv=dconv_mode & 2, + last=index == 0, + context=context, + **kw_dec + ) + if multi: + dec = MultiWrap(dec, multi_freqs) + if freq: + tdec = HDecLayer( + chout, + chin, + dconv=dconv_mode & 2, + empty=last_freq, + last=index == 0, + context=context, + **kwt + ) + self.tdecoder.insert(0, tdec) + self.decoder.insert(0, dec) + + chin = chout + chin_z = chout_z + chout = int(growth * chout) + chout_z = int(growth * chout_z) + if freq: + if freqs <= kernel_size: + freqs = 1 + else: + freqs //= stride + if index == 0 and freq_emb: + self.freq_emb = ScaledEmbedding( + freqs, chin_z, smooth=emb_smooth, scale=emb_scale + ) + self.freq_emb_scale = freq_emb + + if rescale: + rescale_module(self, reference=rescale) + + transformer_channels = channels * growth ** (depth - 1) + if bottom_channels: + self.channel_upsampler = nn.Conv1d(transformer_channels, bottom_channels, 1) + self.channel_downsampler = nn.Conv1d( + bottom_channels, transformer_channels, 1 + ) + self.channel_upsampler_t = nn.Conv1d( + transformer_channels, bottom_channels, 1 + ) + self.channel_downsampler_t = nn.Conv1d( + bottom_channels, transformer_channels, 1 + ) + + transformer_channels = bottom_channels + + if t_layers > 0: + self.crosstransformer = CrossTransformerEncoder( + dim=transformer_channels, + emb=t_emb, + hidden_scale=t_hidden_scale, + num_heads=t_heads, + num_layers=t_layers, + cross_first=t_cross_first, + dropout=t_dropout, + max_positions=t_max_positions, + norm_in=t_norm_in, + norm_in_group=t_norm_in_group, + group_norm=t_group_norm, + norm_first=t_norm_first, + norm_out=t_norm_out, + max_period=t_max_period, + weight_decay=t_weight_decay, + lr=t_lr, + layer_scale=t_layer_scale, + gelu=t_gelu, + sin_random_shift=t_sin_random_shift, + weight_pos_embed=t_weight_pos_embed, + cape_mean_normalize=t_cape_mean_normalize, + cape_augment=t_cape_augment, + cape_glob_loc_scale=t_cape_glob_loc_scale, + sparse_self_attn=t_sparse_self_attn, + sparse_cross_attn=t_sparse_cross_attn, + mask_type=t_mask_type, + mask_random_seed=t_mask_random_seed, + sparse_attn_window=t_sparse_attn_window, + global_window=t_global_window, + sparsity=t_sparsity, + auto_sparsity=t_auto_sparsity, + ) + else: + self.crosstransformer = None + + def _spec(self, x): + hl = self.hop_length + nfft = self.nfft + x0 = x # noqa + + # We re-pad the signal in order to keep the property + # that the size of the output is exactly the size of the input + # divided by the stride (here hop_length), when divisible. + # This is achieved by padding by 1/4th of the kernel size (here nfft). + # which is not supported by torch.stft. + # Having all convolution operations follow this convention allow to easily + # align the time and frequency branches later on. + assert hl == nfft // 4 + le = int(math.ceil(x.shape[-1] / hl)) + pad = hl // 2 * 3 + x = pad1d(x, (pad, pad + le * hl - x.shape[-1]), mode="reflect") + + z = spectro(x, nfft, hl)[..., :-1, :] + assert z.shape[-1] == le + 4, (z.shape, x.shape, le) + z = z[..., 2: 2 + le] + return z + + def _ispec(self, z, length=None, scale=0): + hl = self.hop_length // (4**scale) + z = F.pad(z, (0, 0, 0, 1)) + z = F.pad(z, (2, 2)) + pad = hl // 2 * 3 + le = hl * int(math.ceil(length / hl)) + 2 * pad + x = ispectro(z, hl, length=le) + x = x[..., pad: pad + length] + return x + + def _magnitude(self, z): + # return the magnitude of the spectrogram, except when cac is True, + # in which case we just move the complex dimension to the channel one. + if self.cac: + B, C, Fr, T = z.shape + m = torch.view_as_real(z).permute(0, 1, 4, 2, 3) + m = m.reshape(B, C * 2, Fr, T) + else: + m = z.abs() + return m + + def _mask(self, z, m): + # Apply masking given the mixture spectrogram `z` and the estimated mask `m`. + # If `cac` is True, `m` is actually a full spectrogram and `z` is ignored. + niters = self.wiener_iters + if self.cac: + B, S, C, Fr, T = m.shape + out = m.view(B, S, -1, 2, Fr, T).permute(0, 1, 2, 4, 5, 3) + out = torch.view_as_complex(out.contiguous()) + return out + if self.training: + niters = self.end_iters + if niters < 0: + z = z[:, None] + return z / (1e-8 + z.abs()) * m + else: + return self._wiener(m, z, niters) + + def _wiener(self, mag_out, mix_stft, niters): + # apply wiener filtering from OpenUnmix. + init = mix_stft.dtype + wiener_win_len = 300 + residual = self.wiener_residual + + B, S, C, Fq, T = mag_out.shape + mag_out = mag_out.permute(0, 4, 3, 2, 1) + mix_stft = torch.view_as_real(mix_stft.permute(0, 3, 2, 1)) + + outs = [] + for sample in range(B): + pos = 0 + out = [] + for pos in range(0, T, wiener_win_len): + frame = slice(pos, pos + wiener_win_len) + z_out = wiener( + mag_out[sample, frame], + mix_stft[sample, frame], + niters, + residual=residual, + ) + out.append(z_out.transpose(-1, -2)) + outs.append(torch.cat(out, dim=0)) + out = torch.view_as_complex(torch.stack(outs, 0)) + out = out.permute(0, 4, 3, 2, 1).contiguous() + if residual: + out = out[:, :-1] + assert list(out.shape) == [B, S, C, Fq, T] + return out.to(init) + + def valid_length(self, length: int): + """ + Return a length that is appropriate for evaluation. + In our case, always return the training length, unless + it is smaller than the given length, in which case this + raises an error. + """ + if not self.use_train_segment: + return length + training_length = int(self.segment * self.samplerate) + if training_length < length: + raise ValueError( + f"Given length {length} is longer than " + f"training length {training_length}") + return training_length + + def forward(self, mix): + length = mix.shape[-1] + length_pre_pad = None + if self.use_train_segment: + if self.training: + self.segment = Fraction(mix.shape[-1], self.samplerate) + else: + training_length = int(self.segment * self.samplerate) + if mix.shape[-1] < training_length: + length_pre_pad = mix.shape[-1] + mix = F.pad(mix, (0, training_length - length_pre_pad)) + z = self._spec(mix) + mag = self._magnitude(z).to(mix.device) + x = mag + + B, C, Fq, T = x.shape + + # unlike previous Demucs, we always normalize because it is easier. + mean = x.mean(dim=(1, 2, 3), keepdim=True) + std = x.std(dim=(1, 2, 3), keepdim=True) + x = (x - mean) / (1e-5 + std) + # x will be the freq. branch input. + + # Prepare the time branch input. + xt = mix + meant = xt.mean(dim=(1, 2), keepdim=True) + stdt = xt.std(dim=(1, 2), keepdim=True) + xt = (xt - meant) / (1e-5 + stdt) + + # okay, this is a giant mess I know... + saved = [] # skip connections, freq. + saved_t = [] # skip connections, time. + lengths = [] # saved lengths to properly remove padding, freq branch. + lengths_t = [] # saved lengths for time branch. + for idx, encode in enumerate(self.encoder): + lengths.append(x.shape[-1]) + inject = None + if idx < len(self.tencoder): + # we have not yet merged branches. + lengths_t.append(xt.shape[-1]) + tenc = self.tencoder[idx] + xt = tenc(xt) + if not tenc.empty: + # save for skip connection + saved_t.append(xt) + else: + # tenc contains just the first conv., so that now time and freq. + # branches have the same shape and can be merged. + inject = xt + x = encode(x, inject) + if idx == 0 and self.freq_emb is not None: + # add frequency embedding to allow for non equivariant convolutions + # over the frequency axis. + frs = torch.arange(x.shape[-2], device=x.device) + emb = self.freq_emb(frs).t()[None, :, :, None].expand_as(x) + x = x + self.freq_emb_scale * emb + + saved.append(x) + if self.crosstransformer: + if self.bottom_channels: + b, c, f, t = x.shape + x = rearrange(x, "b c f t-> b c (f t)") + x = self.channel_upsampler(x) + x = rearrange(x, "b c (f t)-> b c f t", f=f) + xt = self.channel_upsampler_t(xt) + + x, xt = self.crosstransformer(x, xt) + + if self.bottom_channels: + x = rearrange(x, "b c f t-> b c (f t)") + x = self.channel_downsampler(x) + x = rearrange(x, "b c (f t)-> b c f t", f=f) + xt = self.channel_downsampler_t(xt) + + for idx, decode in enumerate(self.decoder): + skip = saved.pop(-1) + x, pre = decode(x, skip, lengths.pop(-1)) + # `pre` contains the output just before final transposed convolution, + # which is used when the freq. and time branch separate. + + offset = self.depth - len(self.tdecoder) + if idx >= offset: + tdec = self.tdecoder[idx - offset] + length_t = lengths_t.pop(-1) + if tdec.empty: + assert pre.shape[2] == 1, pre.shape + pre = pre[:, :, 0] + xt, _ = tdec(pre, None, length_t) + else: + skip = saved_t.pop(-1) + xt, _ = tdec(xt, skip, length_t) + + # Let's make sure we used all stored skip connections. + assert len(saved) == 0 + assert len(lengths_t) == 0 + assert len(saved_t) == 0 + + S = len(self.sources) + x = x.view(B, S, -1, Fq, T) + x = x * std[:, None] + mean[:, None] + + # to cpu as non-cuda GPUs don't support complex numbers + # demucs issue #435 ##432 + # NOTE: in this case z already is on cpu + # TODO: remove this when mps supports complex numbers + + device_type = x.device.type + device_load = f"{device_type}:{x.device.index}" if not device_type == 'mps' else device_type + x_is_other_gpu = not device_type in ["cuda", "cpu"] + + if x_is_other_gpu: + x = x.cpu() + + zout = self._mask(z, x) + if self.use_train_segment: + if self.training: + x = self._ispec(zout, length) + else: + x = self._ispec(zout, training_length) + else: + x = self._ispec(zout, length) + + # back to other device + if x_is_other_gpu: + x = x.to(device_load) + + if self.use_train_segment: + if self.training: + xt = xt.view(B, S, -1, length) + else: + xt = xt.view(B, S, -1, training_length) + else: + xt = xt.view(B, S, -1, length) + xt = xt * stdt[:, None] + meant[:, None] + x = xt + x + if length_pre_pad: + x = x[..., :length_pre_pad] + return x diff --git a/demucs/model.py b/demucs/model.py new file mode 100644 index 0000000000000000000000000000000000000000..e2745b8c75322a6fa58b7c163eda7667381abbe9 --- /dev/null +++ b/demucs/model.py @@ -0,0 +1,218 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math + +import torch as th +from torch import nn + +from .utils import capture_init, center_trim + + +class BLSTM(nn.Module): + def __init__(self, dim, layers=1): + super().__init__() + self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim) + self.linear = nn.Linear(2 * dim, dim) + + def forward(self, x): + x = x.permute(2, 0, 1) + x = self.lstm(x)[0] + x = self.linear(x) + x = x.permute(1, 2, 0) + return x + + +def rescale_conv(conv, reference): + std = conv.weight.std().detach() + scale = (std / reference)**0.5 + conv.weight.data /= scale + if conv.bias is not None: + conv.bias.data /= scale + + +def rescale_module(module, reference): + for sub in module.modules(): + if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)): + rescale_conv(sub, reference) + + +def upsample(x, stride): + """ + Linear upsampling, the output will be `stride` times longer. + """ + batch, channels, time = x.size() + weight = th.arange(stride, device=x.device, dtype=th.float) / stride + x = x.view(batch, channels, time, 1) + out = x[..., :-1, :] * (1 - weight) + x[..., 1:, :] * weight + return out.reshape(batch, channels, -1) + + +def downsample(x, stride): + """ + Downsample x by decimation. + """ + return x[:, :, ::stride] + + +class Demucs(nn.Module): + @capture_init + def __init__(self, + sources=4, + audio_channels=2, + channels=64, + depth=6, + rewrite=True, + glu=True, + upsample=False, + rescale=0.1, + kernel_size=8, + stride=4, + growth=2., + lstm_layers=2, + context=3, + samplerate=44100): + """ + Args: + sources (int): number of sources to separate + audio_channels (int): stereo or mono + channels (int): first convolution channels + depth (int): number of encoder/decoder layers + rewrite (bool): add 1x1 convolution to each encoder layer + and a convolution to each decoder layer. + For the decoder layer, `context` gives the kernel size. + glu (bool): use glu instead of ReLU + upsample (bool): use linear upsampling with convolutions + Wave-U-Net style, instead of transposed convolutions + rescale (int): rescale initial weights of convolutions + to get their standard deviation closer to `rescale` + kernel_size (int): kernel size for convolutions + stride (int): stride for convolutions + growth (float): multiply (resp divide) number of channels by that + for each layer of the encoder (resp decoder) + lstm_layers (int): number of lstm layers, 0 = no lstm + context (int): kernel size of the convolution in the + decoder before the transposed convolution. If > 1, + will provide some context from neighboring time + steps. + """ + + super().__init__() + self.audio_channels = audio_channels + self.sources = sources + self.kernel_size = kernel_size + self.context = context + self.stride = stride + self.depth = depth + self.upsample = upsample + self.channels = channels + self.samplerate = samplerate + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + self.final = None + if upsample: + self.final = nn.Conv1d(channels + audio_channels, sources * audio_channels, 1) + stride = 1 + + if glu: + activation = nn.GLU(dim=1) + ch_scale = 2 + else: + activation = nn.ReLU() + ch_scale = 1 + in_channels = audio_channels + for index in range(depth): + encode = [] + encode += [nn.Conv1d(in_channels, channels, kernel_size, stride), nn.ReLU()] + if rewrite: + encode += [nn.Conv1d(channels, ch_scale * channels, 1), activation] + self.encoder.append(nn.Sequential(*encode)) + + decode = [] + if index > 0: + out_channels = in_channels + else: + if upsample: + out_channels = channels + else: + out_channels = sources * audio_channels + if rewrite: + decode += [nn.Conv1d(channels, ch_scale * channels, context), activation] + if upsample: + decode += [ + nn.Conv1d(channels, out_channels, kernel_size, stride=1), + ] + else: + decode += [nn.ConvTranspose1d(channels, out_channels, kernel_size, stride)] + if index > 0: + decode.append(nn.ReLU()) + self.decoder.insert(0, nn.Sequential(*decode)) + in_channels = channels + channels = int(growth * channels) + + channels = in_channels + + if lstm_layers: + self.lstm = BLSTM(channels, lstm_layers) + else: + self.lstm = None + + if rescale: + rescale_module(self, reference=rescale) + + def valid_length(self, length): + """ + Return the nearest valid length to use with the model so that + there is no time steps left over in a convolutions, e.g. for all + layers, size of the input - kernel_size % stride = 0. + + If the mixture has a valid length, the estimated sources + will have exactly the same length when context = 1. If context > 1, + the two signals can be center trimmed to match. + + For training, extracts should have a valid length.For evaluation + on full tracks we recommend passing `pad = True` to :method:`forward`. + """ + for _ in range(self.depth): + if self.upsample: + length = math.ceil(length / self.stride) + self.kernel_size - 1 + else: + length = math.ceil((length - self.kernel_size) / self.stride) + 1 + length = max(1, length) + length += self.context - 1 + for _ in range(self.depth): + if self.upsample: + length = length * self.stride + self.kernel_size - 1 + else: + length = (length - 1) * self.stride + self.kernel_size + + return int(length) + + def forward(self, mix): + x = mix + saved = [x] + for encode in self.encoder: + x = encode(x) + saved.append(x) + if self.upsample: + x = downsample(x, self.stride) + if self.lstm: + x = self.lstm(x) + for decode in self.decoder: + if self.upsample: + x = upsample(x, stride=self.stride) + skip = center_trim(saved.pop(-1), x) + x = x + skip + x = decode(x) + if self.final: + skip = center_trim(saved.pop(-1), x) + x = th.cat([x, skip], dim=1) + x = self.final(x) + + x = x.view(x.size(0), self.sources, self.audio_channels, x.size(-1)) + return x diff --git a/demucs/model_v2.py b/demucs/model_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..db43fc5ebba683e85d9abfa8433e679ff648e216 --- /dev/null +++ b/demucs/model_v2.py @@ -0,0 +1,218 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +import math + +import julius +from torch import nn +from .tasnet_v2 import ConvTasNet + +from .utils import capture_init, center_trim + + +class BLSTM(nn.Module): + def __init__(self, dim, layers=1): + super().__init__() + self.lstm = nn.LSTM(bidirectional=True, num_layers=layers, hidden_size=dim, input_size=dim) + self.linear = nn.Linear(2 * dim, dim) + + def forward(self, x): + x = x.permute(2, 0, 1) + x = self.lstm(x)[0] + x = self.linear(x) + x = x.permute(1, 2, 0) + return x + + +def rescale_conv(conv, reference): + std = conv.weight.std().detach() + scale = (std / reference)**0.5 + conv.weight.data /= scale + if conv.bias is not None: + conv.bias.data /= scale + + +def rescale_module(module, reference): + for sub in module.modules(): + if isinstance(sub, (nn.Conv1d, nn.ConvTranspose1d)): + rescale_conv(sub, reference) + +def auto_load_demucs_model_v2(sources, demucs_model_name): + + if '48' in demucs_model_name: + channels=48 + elif 'unittest' in demucs_model_name: + channels=4 + else: + channels=64 + + if 'tasnet' in demucs_model_name: + init_demucs_model = ConvTasNet(sources, X=10) + else: + init_demucs_model = Demucs(sources, channels=channels) + + return init_demucs_model + +class Demucs(nn.Module): + @capture_init + def __init__(self, + sources, + audio_channels=2, + channels=64, + depth=6, + rewrite=True, + glu=True, + rescale=0.1, + resample=True, + kernel_size=8, + stride=4, + growth=2., + lstm_layers=2, + context=3, + normalize=False, + samplerate=44100, + segment_length=4 * 10 * 44100): + """ + Args: + sources (list[str]): list of source names + audio_channels (int): stereo or mono + channels (int): first convolution channels + depth (int): number of encoder/decoder layers + rewrite (bool): add 1x1 convolution to each encoder layer + and a convolution to each decoder layer. + For the decoder layer, `context` gives the kernel size. + glu (bool): use glu instead of ReLU + resample_input (bool): upsample x2 the input and downsample /2 the output. + rescale (int): rescale initial weights of convolutions + to get their standard deviation closer to `rescale` + kernel_size (int): kernel size for convolutions + stride (int): stride for convolutions + growth (float): multiply (resp divide) number of channels by that + for each layer of the encoder (resp decoder) + lstm_layers (int): number of lstm layers, 0 = no lstm + context (int): kernel size of the convolution in the + decoder before the transposed convolution. If > 1, + will provide some context from neighboring time + steps. + samplerate (int): stored as meta information for easing + future evaluations of the model. + segment_length (int): stored as meta information for easing + future evaluations of the model. Length of the segments on which + the model was trained. + """ + + super().__init__() + self.audio_channels = audio_channels + self.sources = sources + self.kernel_size = kernel_size + self.context = context + self.stride = stride + self.depth = depth + self.resample = resample + self.channels = channels + self.normalize = normalize + self.samplerate = samplerate + self.segment_length = segment_length + + self.encoder = nn.ModuleList() + self.decoder = nn.ModuleList() + + if glu: + activation = nn.GLU(dim=1) + ch_scale = 2 + else: + activation = nn.ReLU() + ch_scale = 1 + in_channels = audio_channels + for index in range(depth): + encode = [] + encode += [nn.Conv1d(in_channels, channels, kernel_size, stride), nn.ReLU()] + if rewrite: + encode += [nn.Conv1d(channels, ch_scale * channels, 1), activation] + self.encoder.append(nn.Sequential(*encode)) + + decode = [] + if index > 0: + out_channels = in_channels + else: + out_channels = len(self.sources) * audio_channels + if rewrite: + decode += [nn.Conv1d(channels, ch_scale * channels, context), activation] + decode += [nn.ConvTranspose1d(channels, out_channels, kernel_size, stride)] + if index > 0: + decode.append(nn.ReLU()) + self.decoder.insert(0, nn.Sequential(*decode)) + in_channels = channels + channels = int(growth * channels) + + channels = in_channels + + if lstm_layers: + self.lstm = BLSTM(channels, lstm_layers) + else: + self.lstm = None + + if rescale: + rescale_module(self, reference=rescale) + + def valid_length(self, length): + """ + Return the nearest valid length to use with the model so that + there is no time steps left over in a convolutions, e.g. for all + layers, size of the input - kernel_size % stride = 0. + + If the mixture has a valid length, the estimated sources + will have exactly the same length when context = 1. If context > 1, + the two signals can be center trimmed to match. + + For training, extracts should have a valid length.For evaluation + on full tracks we recommend passing `pad = True` to :method:`forward`. + """ + if self.resample: + length *= 2 + for _ in range(self.depth): + length = math.ceil((length - self.kernel_size) / self.stride) + 1 + length = max(1, length) + length += self.context - 1 + for _ in range(self.depth): + length = (length - 1) * self.stride + self.kernel_size + + if self.resample: + length = math.ceil(length / 2) + return int(length) + + def forward(self, mix): + x = mix + + if self.normalize: + mono = mix.mean(dim=1, keepdim=True) + mean = mono.mean(dim=-1, keepdim=True) + std = mono.std(dim=-1, keepdim=True) + else: + mean = 0 + std = 1 + + x = (x - mean) / (1e-5 + std) + + if self.resample: + x = julius.resample_frac(x, 1, 2) + + saved = [] + for encode in self.encoder: + x = encode(x) + saved.append(x) + if self.lstm: + x = self.lstm(x) + for decode in self.decoder: + skip = center_trim(saved.pop(-1), x) + x = x + skip + x = decode(x) + + if self.resample: + x = julius.resample_frac(x, 2, 1) + x = x * std + mean + x = x.view(x.size(0), len(self.sources), self.audio_channels, x.size(-1)) + return x diff --git a/demucs/pretrained.py b/demucs/pretrained.py new file mode 100644 index 0000000000000000000000000000000000000000..25aa6856eab03e17b6addb6d2ae5e367f49355f9 --- /dev/null +++ b/demucs/pretrained.py @@ -0,0 +1,180 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""Loading pretrained models. +""" + +import logging +from pathlib import Path +import typing as tp + +#from dora.log import fatal + +import logging + +from diffq import DiffQuantizer +import torch.hub + +from .model import Demucs +from .tasnet_v2 import ConvTasNet +from .utils import set_state + +from .hdemucs import HDemucs +from .repo import RemoteRepo, LocalRepo, ModelOnlyRepo, BagOnlyRepo, AnyModelRepo, ModelLoadingError # noqa + +logger = logging.getLogger(__name__) +ROOT_URL = "https://dl.fbaipublicfiles.com/demucs/mdx_final/" +REMOTE_ROOT = Path(__file__).parent / 'remote' + +SOURCES = ["drums", "bass", "other", "vocals"] + + +def demucs_unittest(): + model = HDemucs(channels=4, sources=SOURCES) + return model + + +def add_model_flags(parser): + group = parser.add_mutually_exclusive_group(required=False) + group.add_argument("-s", "--sig", help="Locally trained XP signature.") + group.add_argument("-n", "--name", default="mdx_extra_q", + help="Pretrained model name or signature. Default is mdx_extra_q.") + parser.add_argument("--repo", type=Path, + help="Folder containing all pre-trained models for use with -n.") + + +def _parse_remote_files(remote_file_list) -> tp.Dict[str, str]: + root: str = '' + models: tp.Dict[str, str] = {} + for line in remote_file_list.read_text().split('\n'): + line = line.strip() + if line.startswith('#'): + continue + elif line.startswith('root:'): + root = line.split(':', 1)[1].strip() + else: + sig = line.split('-', 1)[0] + assert sig not in models + models[sig] = ROOT_URL + root + line + return models + +def get_model(name: str, + repo: tp.Optional[Path] = None): + """`name` must be a bag of models name or a pretrained signature + from the remote AWS model repo or the specified local repo if `repo` is not None. + """ + if name == 'demucs_unittest': + return demucs_unittest() + model_repo: ModelOnlyRepo + if repo is None: + models = _parse_remote_files(REMOTE_ROOT / 'files.txt') + model_repo = RemoteRepo(models) + bag_repo = BagOnlyRepo(REMOTE_ROOT, model_repo) + else: + if not repo.is_dir(): + fatal(f"{repo} must exist and be a directory.") + model_repo = LocalRepo(repo) + bag_repo = BagOnlyRepo(repo, model_repo) + any_repo = AnyModelRepo(model_repo, bag_repo) + model = any_repo.get_model(name) + model.eval() + return model + +def get_model_from_args(args): + """ + Load local model package or pre-trained model. + """ + return get_model(name=args.name, repo=args.repo) + +logger = logging.getLogger(__name__) +ROOT = "https://dl.fbaipublicfiles.com/demucs/v3.0/" + +PRETRAINED_MODELS = { + 'demucs': 'e07c671f', + 'demucs48_hq': '28a1282c', + 'demucs_extra': '3646af93', + 'demucs_quantized': '07afea75', + 'tasnet': 'beb46fac', + 'tasnet_extra': 'df3777b2', + 'demucs_unittest': '09ebc15f', +} + +SOURCES = ["drums", "bass", "other", "vocals"] + + +def get_url(name): + sig = PRETRAINED_MODELS[name] + return ROOT + name + "-" + sig[:8] + ".th" + +def is_pretrained(name): + return name in PRETRAINED_MODELS + + +def load_pretrained(name): + if name == "demucs": + return demucs(pretrained=True) + elif name == "demucs48_hq": + return demucs(pretrained=True, hq=True, channels=48) + elif name == "demucs_extra": + return demucs(pretrained=True, extra=True) + elif name == "demucs_quantized": + return demucs(pretrained=True, quantized=True) + elif name == "demucs_unittest": + return demucs_unittest(pretrained=True) + elif name == "tasnet": + return tasnet(pretrained=True) + elif name == "tasnet_extra": + return tasnet(pretrained=True, extra=True) + else: + raise ValueError(f"Invalid pretrained name {name}") + + +def _load_state(name, model, quantizer=None): + url = get_url(name) + state = torch.hub.load_state_dict_from_url(url, map_location='cpu', check_hash=True) + set_state(model, quantizer, state) + if quantizer: + quantizer.detach() + + +def demucs_unittest(pretrained=True): + model = Demucs(channels=4, sources=SOURCES) + if pretrained: + _load_state('demucs_unittest', model) + return model + + +def demucs(pretrained=True, extra=False, quantized=False, hq=False, channels=64): + if not pretrained and (extra or quantized or hq): + raise ValueError("if extra or quantized is True, pretrained must be True.") + model = Demucs(sources=SOURCES, channels=channels) + if pretrained: + name = 'demucs' + if channels != 64: + name += str(channels) + quantizer = None + if sum([extra, quantized, hq]) > 1: + raise ValueError("Only one of extra, quantized, hq, can be True.") + if quantized: + quantizer = DiffQuantizer(model, group_size=8, min_size=1) + name += '_quantized' + if extra: + name += '_extra' + if hq: + name += '_hq' + _load_state(name, model, quantizer) + return model + + +def tasnet(pretrained=True, extra=False): + if not pretrained and extra: + raise ValueError("if extra is True, pretrained must be True.") + model = ConvTasNet(X=10, sources=SOURCES) + if pretrained: + name = 'tasnet' + if extra: + name = 'tasnet_extra' + _load_state(name, model) + return model \ No newline at end of file diff --git a/demucs/repo.py b/demucs/repo.py new file mode 100644 index 0000000000000000000000000000000000000000..65ff6b33c7771b7743659d52151da67dc18082a8 --- /dev/null +++ b/demucs/repo.py @@ -0,0 +1,148 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""Represents a model repository, including pre-trained models and bags of models. +A repo can either be the main remote repository stored in AWS, or a local repository +with your own models. +""" + +from hashlib import sha256 +from pathlib import Path +import typing as tp + +import torch +import yaml + +from .apply import BagOfModels, Model +from .states import load_model + + +AnyModel = tp.Union[Model, BagOfModels] + + +class ModelLoadingError(RuntimeError): + pass + + +def check_checksum(path: Path, checksum: str): + sha = sha256() + with open(path, 'rb') as file: + while True: + buf = file.read(2**20) + if not buf: + break + sha.update(buf) + actual_checksum = sha.hexdigest()[:len(checksum)] + if actual_checksum != checksum: + raise ModelLoadingError(f'Invalid checksum for file {path}, ' + f'expected {checksum} but got {actual_checksum}') + +class ModelOnlyRepo: + """Base class for all model only repos. + """ + def has_model(self, sig: str) -> bool: + raise NotImplementedError() + + def get_model(self, sig: str) -> Model: + raise NotImplementedError() + + +class RemoteRepo(ModelOnlyRepo): + def __init__(self, models: tp.Dict[str, str]): + self._models = models + + def has_model(self, sig: str) -> bool: + return sig in self._models + + def get_model(self, sig: str) -> Model: + try: + url = self._models[sig] + except KeyError: + raise ModelLoadingError(f'Could not find a pre-trained model with signature {sig}.') + pkg = torch.hub.load_state_dict_from_url(url, map_location='cpu', check_hash=True) + return load_model(pkg) + + +class LocalRepo(ModelOnlyRepo): + def __init__(self, root: Path): + self.root = root + self.scan() + + def scan(self): + self._models = {} + self._checksums = {} + for file in self.root.iterdir(): + if file.suffix == '.th': + if '-' in file.stem: + xp_sig, checksum = file.stem.split('-') + self._checksums[xp_sig] = checksum + else: + xp_sig = file.stem + if xp_sig in self._models: + print('Whats xp? ', xp_sig) + raise ModelLoadingError( + f'Duplicate pre-trained model exist for signature {xp_sig}. ' + 'Please delete all but one.') + self._models[xp_sig] = file + + def has_model(self, sig: str) -> bool: + return sig in self._models + + def get_model(self, sig: str) -> Model: + try: + file = self._models[sig] + except KeyError: + raise ModelLoadingError(f'Could not find pre-trained model with signature {sig}.') + if sig in self._checksums: + check_checksum(file, self._checksums[sig]) + return load_model(file) + + +class BagOnlyRepo: + """Handles only YAML files containing bag of models, leaving the actual + model loading to some Repo. + """ + def __init__(self, root: Path, model_repo: ModelOnlyRepo): + self.root = root + self.model_repo = model_repo + self.scan() + + def scan(self): + self._bags = {} + for file in self.root.iterdir(): + if file.suffix == '.yaml': + self._bags[file.stem] = file + + def has_model(self, name: str) -> bool: + return name in self._bags + + def get_model(self, name: str) -> BagOfModels: + try: + yaml_file = self._bags[name] + except KeyError: + raise ModelLoadingError(f'{name} is neither a single pre-trained model or ' + 'a bag of models.') + bag = yaml.safe_load(open(yaml_file)) + signatures = bag['models'] + models = [self.model_repo.get_model(sig) for sig in signatures] + weights = bag.get('weights') + segment = bag.get('segment') + return BagOfModels(models, weights, segment) + + +class AnyModelRepo: + def __init__(self, model_repo: ModelOnlyRepo, bag_repo: BagOnlyRepo): + self.model_repo = model_repo + self.bag_repo = bag_repo + + def has_model(self, name_or_sig: str) -> bool: + return self.model_repo.has_model(name_or_sig) or self.bag_repo.has_model(name_or_sig) + + def get_model(self, name_or_sig: str) -> AnyModel: + print('name_or_sig: ', name_or_sig) + if self.model_repo.has_model(name_or_sig): + return self.model_repo.get_model(name_or_sig) + else: + return self.bag_repo.get_model(name_or_sig) diff --git a/demucs/spec.py b/demucs/spec.py new file mode 100644 index 0000000000000000000000000000000000000000..9c79cb65c6531846f8a2085f3765ebb0fc459a23 --- /dev/null +++ b/demucs/spec.py @@ -0,0 +1,53 @@ +# Copyright (c) Meta Platforms, Inc. and affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +"""Conveniance wrapper to perform STFT and iSTFT""" + +import torch as th + + +def spectro(x, n_fft=512, hop_length=None, pad=0): + *other, length = x.shape + x = x.reshape(-1, length) + + device_type = x.device.type + is_other_gpu = not device_type in ["cuda", "cpu"] + + if is_other_gpu: + x = x.cpu() + z = th.stft(x, + n_fft * (1 + pad), + hop_length or n_fft // 4, + window=th.hann_window(n_fft).to(x), + win_length=n_fft, + normalized=True, + center=True, + return_complex=True, + pad_mode='reflect') + _, freqs, frame = z.shape + return z.view(*other, freqs, frame) + + +def ispectro(z, hop_length=None, length=None, pad=0): + *other, freqs, frames = z.shape + n_fft = 2 * freqs - 2 + z = z.view(-1, freqs, frames) + win_length = n_fft // (1 + pad) + + device_type = z.device.type + is_other_gpu = not device_type in ["cuda", "cpu"] + + if is_other_gpu: + z = z.cpu() + x = th.istft(z, + n_fft, + hop_length, + window=th.hann_window(win_length).to(z.real), + win_length=win_length, + normalized=True, + length=length, + center=True) + _, length = x.shape + return x.view(*other, length) diff --git a/demucs/states.py b/demucs/states.py new file mode 100644 index 0000000000000000000000000000000000000000..db17a182dc1b19f7934a1eccbe2f67437c2a78f1 --- /dev/null +++ b/demucs/states.py @@ -0,0 +1,148 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +""" +Utilities to save and load models. +""" +from contextlib import contextmanager + +import functools +import hashlib +import inspect +import io +from pathlib import Path +import warnings + +from omegaconf import OmegaConf +from diffq import DiffQuantizer, UniformQuantizer, restore_quantized_state +import torch + + +def get_quantizer(model, args, optimizer=None): + """Return the quantizer given the XP quantization args.""" + quantizer = None + if args.diffq: + quantizer = DiffQuantizer( + model, min_size=args.min_size, group_size=args.group_size) + if optimizer is not None: + quantizer.setup_optimizer(optimizer) + elif args.qat: + quantizer = UniformQuantizer( + model, bits=args.qat, min_size=args.min_size) + return quantizer + + +def load_model(path_or_package, strict=False): + """Load a model from the given serialized model, either given as a dict (already loaded) + or a path to a file on disk.""" + if isinstance(path_or_package, dict): + package = path_or_package + elif isinstance(path_or_package, (str, Path)): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + path = path_or_package + package = torch.load(path, 'cpu') + else: + raise ValueError(f"Invalid type for {path_or_package}.") + + klass = package["klass"] + args = package["args"] + kwargs = package["kwargs"] + + if strict: + model = klass(*args, **kwargs) + else: + sig = inspect.signature(klass) + for key in list(kwargs): + if key not in sig.parameters: + warnings.warn("Dropping inexistant parameter " + key) + del kwargs[key] + model = klass(*args, **kwargs) + + state = package["state"] + + set_state(model, state) + return model + + +def get_state(model, quantizer, half=False): + """Get the state from a model, potentially with quantization applied. + If `half` is True, model are stored as half precision, which shouldn't impact performance + but half the state size.""" + if quantizer is None: + dtype = torch.half if half else None + state = {k: p.data.to(device='cpu', dtype=dtype) for k, p in model.state_dict().items()} + else: + state = quantizer.get_quantized_state() + state['__quantized'] = True + return state + + +def set_state(model, state, quantizer=None): + """Set the state on a given model.""" + if state.get('__quantized'): + if quantizer is not None: + quantizer.restore_quantized_state(model, state['quantized']) + else: + restore_quantized_state(model, state) + else: + model.load_state_dict(state) + return state + + +def save_with_checksum(content, path): + """Save the given value on disk, along with a sha256 hash. + Should be used with the output of either `serialize_model` or `get_state`.""" + buf = io.BytesIO() + torch.save(content, buf) + sig = hashlib.sha256(buf.getvalue()).hexdigest()[:8] + + path = path.parent / (path.stem + "-" + sig + path.suffix) + path.write_bytes(buf.getvalue()) + + +def serialize_model(model, training_args, quantizer=None, half=True): + args, kwargs = model._init_args_kwargs + klass = model.__class__ + + state = get_state(model, quantizer, half) + return { + 'klass': klass, + 'args': args, + 'kwargs': kwargs, + 'state': state, + 'training_args': OmegaConf.to_container(training_args, resolve=True), + } + + +def copy_state(state): + return {k: v.cpu().clone() for k, v in state.items()} + + +@contextmanager +def swap_state(model, state): + """ + Context manager that swaps the state of a model, e.g: + + # model is in old state + with swap_state(model, new_state): + # model in new state + # model back to old state + """ + old_state = copy_state(model.state_dict()) + model.load_state_dict(state, strict=False) + try: + yield + finally: + model.load_state_dict(old_state) + + +def capture_init(init): + @functools.wraps(init) + def __init__(self, *args, **kwargs): + self._init_args_kwargs = (args, kwargs) + init(self, *args, **kwargs) + + return __init__ diff --git a/demucs/tasnet.py b/demucs/tasnet.py new file mode 100644 index 0000000000000000000000000000000000000000..9cb7a957b44a21ca64cb580a02e2c33468f9cb8d --- /dev/null +++ b/demucs/tasnet.py @@ -0,0 +1,447 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +# Created on 2018/12 +# Author: Kaituo XU +# Modified on 2019/11 by Alexandre Defossez, added support for multiple output channels +# Here is the original license: +# The MIT License (MIT) +# +# Copyright (c) 2018 Kaituo XU +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .utils import capture_init + +EPS = 1e-8 + + +def overlap_and_add(signal, frame_step): + outer_dimensions = signal.size()[:-2] + frames, frame_length = signal.size()[-2:] + + subframe_length = math.gcd(frame_length, frame_step) # gcd=Greatest Common Divisor + subframe_step = frame_step // subframe_length + subframes_per_frame = frame_length // subframe_length + output_size = frame_step * (frames - 1) + frame_length + output_subframes = output_size // subframe_length + + subframe_signal = signal.view(*outer_dimensions, -1, subframe_length) + + frame = torch.arange(0, output_subframes, + device=signal.device).unfold(0, subframes_per_frame, subframe_step) + frame = frame.long() # signal may in GPU or CPU + frame = frame.contiguous().view(-1) + + result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length) + result.index_add_(-2, frame, subframe_signal) + result = result.view(*outer_dimensions, -1) + return result + + +class ConvTasNet(nn.Module): + @capture_init + def __init__(self, + N=256, + L=20, + B=256, + H=512, + P=3, + X=8, + R=4, + C=4, + audio_channels=1, + samplerate=44100, + norm_type="gLN", + causal=False, + mask_nonlinear='relu'): + """ + Args: + N: Number of filters in autoencoder + L: Length of the filters (in samples) + B: Number of channels in bottleneck 1 × 1-conv block + H: Number of channels in convolutional blocks + P: Kernel size in convolutional blocks + X: Number of convolutional blocks in each repeat + R: Number of repeats + C: Number of speakers + norm_type: BN, gLN, cLN + causal: causal or non-causal + mask_nonlinear: use which non-linear function to generate mask + """ + super(ConvTasNet, self).__init__() + # Hyper-parameter + self.N, self.L, self.B, self.H, self.P, self.X, self.R, self.C = N, L, B, H, P, X, R, C + self.norm_type = norm_type + self.causal = causal + self.mask_nonlinear = mask_nonlinear + self.audio_channels = audio_channels + self.samplerate = samplerate + # Components + self.encoder = Encoder(L, N, audio_channels) + self.separator = TemporalConvNet(N, B, H, P, X, R, C, norm_type, causal, mask_nonlinear) + self.decoder = Decoder(N, L, audio_channels) + # init + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_normal_(p) + + def valid_length(self, length): + return length + + def forward(self, mixture): + """ + Args: + mixture: [M, T], M is batch size, T is #samples + Returns: + est_source: [M, C, T] + """ + mixture_w = self.encoder(mixture) + est_mask = self.separator(mixture_w) + est_source = self.decoder(mixture_w, est_mask) + + # T changed after conv1d in encoder, fix it here + T_origin = mixture.size(-1) + T_conv = est_source.size(-1) + est_source = F.pad(est_source, (0, T_origin - T_conv)) + return est_source + + +class Encoder(nn.Module): + """Estimation of the nonnegative mixture weight by a 1-D conv layer. + """ + def __init__(self, L, N, audio_channels): + super(Encoder, self).__init__() + # Hyper-parameter + self.L, self.N = L, N + # Components + # 50% overlap + self.conv1d_U = nn.Conv1d(audio_channels, N, kernel_size=L, stride=L // 2, bias=False) + + def forward(self, mixture): + """ + Args: + mixture: [M, T], M is batch size, T is #samples + Returns: + mixture_w: [M, N, K], where K = (T-L)/(L/2)+1 = 2T/L-1 + """ + mixture_w = F.relu(self.conv1d_U(mixture)) # [M, N, K] + return mixture_w + + +class Decoder(nn.Module): + def __init__(self, N, L, audio_channels): + super(Decoder, self).__init__() + # Hyper-parameter + self.N, self.L = N, L + self.audio_channels = audio_channels + # Components + self.basis_signals = nn.Linear(N, audio_channels * L, bias=False) + + def forward(self, mixture_w, est_mask): + """ + Args: + mixture_w: [M, N, K] + est_mask: [M, C, N, K] + Returns: + est_source: [M, C, T] + """ + # D = W * M + source_w = torch.unsqueeze(mixture_w, 1) * est_mask # [M, C, N, K] + source_w = torch.transpose(source_w, 2, 3) # [M, C, K, N] + # S = DV + est_source = self.basis_signals(source_w) # [M, C, K, ac * L] + m, c, k, _ = est_source.size() + est_source = est_source.view(m, c, k, self.audio_channels, -1).transpose(2, 3).contiguous() + est_source = overlap_and_add(est_source, self.L // 2) # M x C x ac x T + return est_source + + +class TemporalConvNet(nn.Module): + def __init__(self, N, B, H, P, X, R, C, norm_type="gLN", causal=False, mask_nonlinear='relu'): + """ + Args: + N: Number of filters in autoencoder + B: Number of channels in bottleneck 1 × 1-conv block + H: Number of channels in convolutional blocks + P: Kernel size in convolutional blocks + X: Number of convolutional blocks in each repeat + R: Number of repeats + C: Number of speakers + norm_type: BN, gLN, cLN + causal: causal or non-causal + mask_nonlinear: use which non-linear function to generate mask + """ + super(TemporalConvNet, self).__init__() + # Hyper-parameter + self.C = C + self.mask_nonlinear = mask_nonlinear + # Components + # [M, N, K] -> [M, N, K] + layer_norm = ChannelwiseLayerNorm(N) + # [M, N, K] -> [M, B, K] + bottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False) + # [M, B, K] -> [M, B, K] + repeats = [] + for r in range(R): + blocks = [] + for x in range(X): + dilation = 2**x + padding = (P - 1) * dilation if causal else (P - 1) * dilation // 2 + blocks += [ + TemporalBlock(B, + H, + P, + stride=1, + padding=padding, + dilation=dilation, + norm_type=norm_type, + causal=causal) + ] + repeats += [nn.Sequential(*blocks)] + temporal_conv_net = nn.Sequential(*repeats) + # [M, B, K] -> [M, C*N, K] + mask_conv1x1 = nn.Conv1d(B, C * N, 1, bias=False) + # Put together + self.network = nn.Sequential(layer_norm, bottleneck_conv1x1, temporal_conv_net, + mask_conv1x1) + + def forward(self, mixture_w): + """ + Keep this API same with TasNet + Args: + mixture_w: [M, N, K], M is batch size + returns: + est_mask: [M, C, N, K] + """ + M, N, K = mixture_w.size() + score = self.network(mixture_w) # [M, N, K] -> [M, C*N, K] + score = score.view(M, self.C, N, K) # [M, C*N, K] -> [M, C, N, K] + if self.mask_nonlinear == 'softmax': + est_mask = F.softmax(score, dim=1) + elif self.mask_nonlinear == 'relu': + est_mask = F.relu(score) + else: + raise ValueError("Unsupported mask non-linear function") + return est_mask + + +class TemporalBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + norm_type="gLN", + causal=False): + super(TemporalBlock, self).__init__() + # [M, B, K] -> [M, H, K] + conv1x1 = nn.Conv1d(in_channels, out_channels, 1, bias=False) + prelu = nn.PReLU() + norm = chose_norm(norm_type, out_channels) + # [M, H, K] -> [M, B, K] + dsconv = DepthwiseSeparableConv(out_channels, in_channels, kernel_size, stride, padding, + dilation, norm_type, causal) + # Put together + self.net = nn.Sequential(conv1x1, prelu, norm, dsconv) + + def forward(self, x): + """ + Args: + x: [M, B, K] + Returns: + [M, B, K] + """ + residual = x + out = self.net(x) + # TODO: when P = 3 here works fine, but when P = 2 maybe need to pad? + return out + residual # look like w/o F.relu is better than w/ F.relu + # return F.relu(out + residual) + + +class DepthwiseSeparableConv(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + norm_type="gLN", + causal=False): + super(DepthwiseSeparableConv, self).__init__() + # Use `groups` option to implement depthwise convolution + # [M, H, K] -> [M, H, K] + depthwise_conv = nn.Conv1d(in_channels, + in_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=in_channels, + bias=False) + if causal: + chomp = Chomp1d(padding) + prelu = nn.PReLU() + norm = chose_norm(norm_type, in_channels) + # [M, H, K] -> [M, B, K] + pointwise_conv = nn.Conv1d(in_channels, out_channels, 1, bias=False) + # Put together + if causal: + self.net = nn.Sequential(depthwise_conv, chomp, prelu, norm, pointwise_conv) + else: + self.net = nn.Sequential(depthwise_conv, prelu, norm, pointwise_conv) + + def forward(self, x): + """ + Args: + x: [M, H, K] + Returns: + result: [M, B, K] + """ + return self.net(x) + + +class Chomp1d(nn.Module): + """To ensure the output length is the same as the input. + """ + def __init__(self, chomp_size): + super(Chomp1d, self).__init__() + self.chomp_size = chomp_size + + def forward(self, x): + """ + Args: + x: [M, H, Kpad] + Returns: + [M, H, K] + """ + return x[:, :, :-self.chomp_size].contiguous() + + +def chose_norm(norm_type, channel_size): + """The input of normlization will be (M, C, K), where M is batch size, + C is channel size and K is sequence length. + """ + if norm_type == "gLN": + return GlobalLayerNorm(channel_size) + elif norm_type == "cLN": + return ChannelwiseLayerNorm(channel_size) + elif norm_type == "id": + return nn.Identity() + else: # norm_type == "BN": + # Given input (M, C, K), nn.BatchNorm1d(C) will accumulate statics + # along M and K, so this BN usage is right. + return nn.BatchNorm1d(channel_size) + + +# TODO: Use nn.LayerNorm to impl cLN to speed up +class ChannelwiseLayerNorm(nn.Module): + """Channel-wise Layer Normalization (cLN)""" + def __init__(self, channel_size): + super(ChannelwiseLayerNorm, self).__init__() + self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1] + self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1] + self.reset_parameters() + + def reset_parameters(self): + self.gamma.data.fill_(1) + self.beta.data.zero_() + + def forward(self, y): + """ + Args: + y: [M, N, K], M is batch size, N is channel size, K is length + Returns: + cLN_y: [M, N, K] + """ + mean = torch.mean(y, dim=1, keepdim=True) # [M, 1, K] + var = torch.var(y, dim=1, keepdim=True, unbiased=False) # [M, 1, K] + cLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta + return cLN_y + + +class GlobalLayerNorm(nn.Module): + """Global Layer Normalization (gLN)""" + def __init__(self, channel_size): + super(GlobalLayerNorm, self).__init__() + self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1] + self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1] + self.reset_parameters() + + def reset_parameters(self): + self.gamma.data.fill_(1) + self.beta.data.zero_() + + def forward(self, y): + """ + Args: + y: [M, N, K], M is batch size, N is channel size, K is length + Returns: + gLN_y: [M, N, K] + """ + # TODO: in torch 1.0, torch.mean() support dim list + mean = y.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) # [M, 1, 1] + var = (torch.pow(y - mean, 2)).mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) + gLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta + return gLN_y + + +if __name__ == "__main__": + torch.manual_seed(123) + M, N, L, T = 2, 3, 4, 12 + K = 2 * T // L - 1 + B, H, P, X, R, C, norm_type, causal = 2, 3, 3, 3, 2, 2, "gLN", False + mixture = torch.randint(3, (M, T)) + # test Encoder + encoder = Encoder(L, N) + encoder.conv1d_U.weight.data = torch.randint(2, encoder.conv1d_U.weight.size()) + mixture_w = encoder(mixture) + print('mixture', mixture) + print('U', encoder.conv1d_U.weight) + print('mixture_w', mixture_w) + print('mixture_w size', mixture_w.size()) + + # test TemporalConvNet + separator = TemporalConvNet(N, B, H, P, X, R, C, norm_type=norm_type, causal=causal) + est_mask = separator(mixture_w) + print('est_mask', est_mask) + + # test Decoder + decoder = Decoder(N, L) + est_mask = torch.randint(2, (B, K, C, N)) + est_source = decoder(mixture_w, est_mask) + print('est_source', est_source) + + # test Conv-TasNet + conv_tasnet = ConvTasNet(N, L, B, H, P, X, R, C, norm_type=norm_type) + est_source = conv_tasnet(mixture) + print('est_source', est_source) + print('est_source size', est_source.size()) diff --git a/demucs/tasnet_v2.py b/demucs/tasnet_v2.py new file mode 100644 index 0000000000000000000000000000000000000000..ecc1257925ea8f4fbe389ddd6d73ce9fdf45f6d4 --- /dev/null +++ b/demucs/tasnet_v2.py @@ -0,0 +1,452 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# +# Created on 2018/12 +# Author: Kaituo XU +# Modified on 2019/11 by Alexandre Defossez, added support for multiple output channels +# Here is the original license: +# The MIT License (MIT) +# +# Copyright (c) 2018 Kaituo XU +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +import math + +import torch +import torch.nn as nn +import torch.nn.functional as F + +from .utils import capture_init + +EPS = 1e-8 + + +def overlap_and_add(signal, frame_step): + outer_dimensions = signal.size()[:-2] + frames, frame_length = signal.size()[-2:] + + subframe_length = math.gcd(frame_length, frame_step) # gcd=Greatest Common Divisor + subframe_step = frame_step // subframe_length + subframes_per_frame = frame_length // subframe_length + output_size = frame_step * (frames - 1) + frame_length + output_subframes = output_size // subframe_length + + subframe_signal = signal.view(*outer_dimensions, -1, subframe_length) + + frame = torch.arange(0, output_subframes, + device=signal.device).unfold(0, subframes_per_frame, subframe_step) + frame = frame.long() # signal may in GPU or CPU + frame = frame.contiguous().view(-1) + + result = signal.new_zeros(*outer_dimensions, output_subframes, subframe_length) + result.index_add_(-2, frame, subframe_signal) + result = result.view(*outer_dimensions, -1) + return result + + +class ConvTasNet(nn.Module): + @capture_init + def __init__(self, + sources, + N=256, + L=20, + B=256, + H=512, + P=3, + X=8, + R=4, + audio_channels=2, + norm_type="gLN", + causal=False, + mask_nonlinear='relu', + samplerate=44100, + segment_length=44100 * 2 * 4): + """ + Args: + sources: list of sources + N: Number of filters in autoencoder + L: Length of the filters (in samples) + B: Number of channels in bottleneck 1 × 1-conv block + H: Number of channels in convolutional blocks + P: Kernel size in convolutional blocks + X: Number of convolutional blocks in each repeat + R: Number of repeats + norm_type: BN, gLN, cLN + causal: causal or non-causal + mask_nonlinear: use which non-linear function to generate mask + """ + super(ConvTasNet, self).__init__() + # Hyper-parameter + self.sources = sources + self.C = len(sources) + self.N, self.L, self.B, self.H, self.P, self.X, self.R = N, L, B, H, P, X, R + self.norm_type = norm_type + self.causal = causal + self.mask_nonlinear = mask_nonlinear + self.audio_channels = audio_channels + self.samplerate = samplerate + self.segment_length = segment_length + # Components + self.encoder = Encoder(L, N, audio_channels) + self.separator = TemporalConvNet( + N, B, H, P, X, R, self.C, norm_type, causal, mask_nonlinear) + self.decoder = Decoder(N, L, audio_channels) + # init + for p in self.parameters(): + if p.dim() > 1: + nn.init.xavier_normal_(p) + + def valid_length(self, length): + return length + + def forward(self, mixture): + """ + Args: + mixture: [M, T], M is batch size, T is #samples + Returns: + est_source: [M, C, T] + """ + mixture_w = self.encoder(mixture) + est_mask = self.separator(mixture_w) + est_source = self.decoder(mixture_w, est_mask) + + # T changed after conv1d in encoder, fix it here + T_origin = mixture.size(-1) + T_conv = est_source.size(-1) + est_source = F.pad(est_source, (0, T_origin - T_conv)) + return est_source + + +class Encoder(nn.Module): + """Estimation of the nonnegative mixture weight by a 1-D conv layer. + """ + def __init__(self, L, N, audio_channels): + super(Encoder, self).__init__() + # Hyper-parameter + self.L, self.N = L, N + # Components + # 50% overlap + self.conv1d_U = nn.Conv1d(audio_channels, N, kernel_size=L, stride=L // 2, bias=False) + + def forward(self, mixture): + """ + Args: + mixture: [M, T], M is batch size, T is #samples + Returns: + mixture_w: [M, N, K], where K = (T-L)/(L/2)+1 = 2T/L-1 + """ + mixture_w = F.relu(self.conv1d_U(mixture)) # [M, N, K] + return mixture_w + + +class Decoder(nn.Module): + def __init__(self, N, L, audio_channels): + super(Decoder, self).__init__() + # Hyper-parameter + self.N, self.L = N, L + self.audio_channels = audio_channels + # Components + self.basis_signals = nn.Linear(N, audio_channels * L, bias=False) + + def forward(self, mixture_w, est_mask): + """ + Args: + mixture_w: [M, N, K] + est_mask: [M, C, N, K] + Returns: + est_source: [M, C, T] + """ + # D = W * M + source_w = torch.unsqueeze(mixture_w, 1) * est_mask # [M, C, N, K] + source_w = torch.transpose(source_w, 2, 3) # [M, C, K, N] + # S = DV + est_source = self.basis_signals(source_w) # [M, C, K, ac * L] + m, c, k, _ = est_source.size() + est_source = est_source.view(m, c, k, self.audio_channels, -1).transpose(2, 3).contiguous() + est_source = overlap_and_add(est_source, self.L // 2) # M x C x ac x T + return est_source + + +class TemporalConvNet(nn.Module): + def __init__(self, N, B, H, P, X, R, C, norm_type="gLN", causal=False, mask_nonlinear='relu'): + """ + Args: + N: Number of filters in autoencoder + B: Number of channels in bottleneck 1 × 1-conv block + H: Number of channels in convolutional blocks + P: Kernel size in convolutional blocks + X: Number of convolutional blocks in each repeat + R: Number of repeats + C: Number of speakers + norm_type: BN, gLN, cLN + causal: causal or non-causal + mask_nonlinear: use which non-linear function to generate mask + """ + super(TemporalConvNet, self).__init__() + # Hyper-parameter + self.C = C + self.mask_nonlinear = mask_nonlinear + # Components + # [M, N, K] -> [M, N, K] + layer_norm = ChannelwiseLayerNorm(N) + # [M, N, K] -> [M, B, K] + bottleneck_conv1x1 = nn.Conv1d(N, B, 1, bias=False) + # [M, B, K] -> [M, B, K] + repeats = [] + for r in range(R): + blocks = [] + for x in range(X): + dilation = 2**x + padding = (P - 1) * dilation if causal else (P - 1) * dilation // 2 + blocks += [ + TemporalBlock(B, + H, + P, + stride=1, + padding=padding, + dilation=dilation, + norm_type=norm_type, + causal=causal) + ] + repeats += [nn.Sequential(*blocks)] + temporal_conv_net = nn.Sequential(*repeats) + # [M, B, K] -> [M, C*N, K] + mask_conv1x1 = nn.Conv1d(B, C * N, 1, bias=False) + # Put together + self.network = nn.Sequential(layer_norm, bottleneck_conv1x1, temporal_conv_net, + mask_conv1x1) + + def forward(self, mixture_w): + """ + Keep this API same with TasNet + Args: + mixture_w: [M, N, K], M is batch size + returns: + est_mask: [M, C, N, K] + """ + M, N, K = mixture_w.size() + score = self.network(mixture_w) # [M, N, K] -> [M, C*N, K] + score = score.view(M, self.C, N, K) # [M, C*N, K] -> [M, C, N, K] + if self.mask_nonlinear == 'softmax': + est_mask = F.softmax(score, dim=1) + elif self.mask_nonlinear == 'relu': + est_mask = F.relu(score) + else: + raise ValueError("Unsupported mask non-linear function") + return est_mask + + +class TemporalBlock(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + norm_type="gLN", + causal=False): + super(TemporalBlock, self).__init__() + # [M, B, K] -> [M, H, K] + conv1x1 = nn.Conv1d(in_channels, out_channels, 1, bias=False) + prelu = nn.PReLU() + norm = chose_norm(norm_type, out_channels) + # [M, H, K] -> [M, B, K] + dsconv = DepthwiseSeparableConv(out_channels, in_channels, kernel_size, stride, padding, + dilation, norm_type, causal) + # Put together + self.net = nn.Sequential(conv1x1, prelu, norm, dsconv) + + def forward(self, x): + """ + Args: + x: [M, B, K] + Returns: + [M, B, K] + """ + residual = x + out = self.net(x) + # TODO: when P = 3 here works fine, but when P = 2 maybe need to pad? + return out + residual # look like w/o F.relu is better than w/ F.relu + # return F.relu(out + residual) + + +class DepthwiseSeparableConv(nn.Module): + def __init__(self, + in_channels, + out_channels, + kernel_size, + stride, + padding, + dilation, + norm_type="gLN", + causal=False): + super(DepthwiseSeparableConv, self).__init__() + # Use `groups` option to implement depthwise convolution + # [M, H, K] -> [M, H, K] + depthwise_conv = nn.Conv1d(in_channels, + in_channels, + kernel_size, + stride=stride, + padding=padding, + dilation=dilation, + groups=in_channels, + bias=False) + if causal: + chomp = Chomp1d(padding) + prelu = nn.PReLU() + norm = chose_norm(norm_type, in_channels) + # [M, H, K] -> [M, B, K] + pointwise_conv = nn.Conv1d(in_channels, out_channels, 1, bias=False) + # Put together + if causal: + self.net = nn.Sequential(depthwise_conv, chomp, prelu, norm, pointwise_conv) + else: + self.net = nn.Sequential(depthwise_conv, prelu, norm, pointwise_conv) + + def forward(self, x): + """ + Args: + x: [M, H, K] + Returns: + result: [M, B, K] + """ + return self.net(x) + + +class Chomp1d(nn.Module): + """To ensure the output length is the same as the input. + """ + def __init__(self, chomp_size): + super(Chomp1d, self).__init__() + self.chomp_size = chomp_size + + def forward(self, x): + """ + Args: + x: [M, H, Kpad] + Returns: + [M, H, K] + """ + return x[:, :, :-self.chomp_size].contiguous() + + +def chose_norm(norm_type, channel_size): + """The input of normlization will be (M, C, K), where M is batch size, + C is channel size and K is sequence length. + """ + if norm_type == "gLN": + return GlobalLayerNorm(channel_size) + elif norm_type == "cLN": + return ChannelwiseLayerNorm(channel_size) + elif norm_type == "id": + return nn.Identity() + else: # norm_type == "BN": + # Given input (M, C, K), nn.BatchNorm1d(C) will accumulate statics + # along M and K, so this BN usage is right. + return nn.BatchNorm1d(channel_size) + + +# TODO: Use nn.LayerNorm to impl cLN to speed up +class ChannelwiseLayerNorm(nn.Module): + """Channel-wise Layer Normalization (cLN)""" + def __init__(self, channel_size): + super(ChannelwiseLayerNorm, self).__init__() + self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1] + self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1] + self.reset_parameters() + + def reset_parameters(self): + self.gamma.data.fill_(1) + self.beta.data.zero_() + + def forward(self, y): + """ + Args: + y: [M, N, K], M is batch size, N is channel size, K is length + Returns: + cLN_y: [M, N, K] + """ + mean = torch.mean(y, dim=1, keepdim=True) # [M, 1, K] + var = torch.var(y, dim=1, keepdim=True, unbiased=False) # [M, 1, K] + cLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta + return cLN_y + + +class GlobalLayerNorm(nn.Module): + """Global Layer Normalization (gLN)""" + def __init__(self, channel_size): + super(GlobalLayerNorm, self).__init__() + self.gamma = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1] + self.beta = nn.Parameter(torch.Tensor(1, channel_size, 1)) # [1, N, 1] + self.reset_parameters() + + def reset_parameters(self): + self.gamma.data.fill_(1) + self.beta.data.zero_() + + def forward(self, y): + """ + Args: + y: [M, N, K], M is batch size, N is channel size, K is length + Returns: + gLN_y: [M, N, K] + """ + # TODO: in torch 1.0, torch.mean() support dim list + mean = y.mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) # [M, 1, 1] + var = (torch.pow(y - mean, 2)).mean(dim=1, keepdim=True).mean(dim=2, keepdim=True) + gLN_y = self.gamma * (y - mean) / torch.pow(var + EPS, 0.5) + self.beta + return gLN_y + + +if __name__ == "__main__": + torch.manual_seed(123) + M, N, L, T = 2, 3, 4, 12 + K = 2 * T // L - 1 + B, H, P, X, R, C, norm_type, causal = 2, 3, 3, 3, 2, 2, "gLN", False + mixture = torch.randint(3, (M, T)) + # test Encoder + encoder = Encoder(L, N) + encoder.conv1d_U.weight.data = torch.randint(2, encoder.conv1d_U.weight.size()) + mixture_w = encoder(mixture) + print('mixture', mixture) + print('U', encoder.conv1d_U.weight) + print('mixture_w', mixture_w) + print('mixture_w size', mixture_w.size()) + + # test TemporalConvNet + separator = TemporalConvNet(N, B, H, P, X, R, C, norm_type=norm_type, causal=causal) + est_mask = separator(mixture_w) + print('est_mask', est_mask) + + # test Decoder + decoder = Decoder(N, L) + est_mask = torch.randint(2, (B, K, C, N)) + est_source = decoder(mixture_w, est_mask) + print('est_source', est_source) + + # test Conv-TasNet + conv_tasnet = ConvTasNet(N, L, B, H, P, X, R, C, norm_type=norm_type) + est_source = conv_tasnet(mixture) + print('est_source', est_source) + print('est_source size', est_source.size()) diff --git a/demucs/transformer.py b/demucs/transformer.py new file mode 100644 index 0000000000000000000000000000000000000000..56a465b861d7d018d0eca2779bbd392f07e411a9 --- /dev/null +++ b/demucs/transformer.py @@ -0,0 +1,839 @@ +# Copyright (c) 2019-present, Meta, Inc. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. +# First author is Simon Rouard. + +import random +import typing as tp + +import torch +import torch.nn as nn +import torch.nn.functional as F +import numpy as np +import math +from einops import rearrange + + +def create_sin_embedding( + length: int, dim: int, shift: int = 0, device="cpu", max_period=10000 +): + # We aim for TBC format + assert dim % 2 == 0 + pos = shift + torch.arange(length, device=device).view(-1, 1, 1) + half_dim = dim // 2 + adim = torch.arange(dim // 2, device=device).view(1, 1, -1) + phase = pos / (max_period ** (adim / (half_dim - 1))) + return torch.cat( + [ + torch.cos(phase), + torch.sin(phase), + ], + dim=-1, + ) + + +def create_2d_sin_embedding(d_model, height, width, device="cpu", max_period=10000): + """ + :param d_model: dimension of the model + :param height: height of the positions + :param width: width of the positions + :return: d_model*height*width position matrix + """ + if d_model % 4 != 0: + raise ValueError( + "Cannot use sin/cos positional encoding with " + "odd dimension (got dim={:d})".format(d_model) + ) + pe = torch.zeros(d_model, height, width) + # Each dimension use half of d_model + d_model = int(d_model / 2) + div_term = torch.exp( + torch.arange(0.0, d_model, 2) * -(math.log(max_period) / d_model) + ) + pos_w = torch.arange(0.0, width).unsqueeze(1) + pos_h = torch.arange(0.0, height).unsqueeze(1) + pe[0:d_model:2, :, :] = ( + torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1) + ) + pe[1:d_model:2, :, :] = ( + torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1) + ) + pe[d_model::2, :, :] = ( + torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width) + ) + pe[d_model + 1:: 2, :, :] = ( + torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width) + ) + + return pe[None, :].to(device) + + +def create_sin_embedding_cape( + length: int, + dim: int, + batch_size: int, + mean_normalize: bool, + augment: bool, # True during training + max_global_shift: float = 0.0, # delta max + max_local_shift: float = 0.0, # epsilon max + max_scale: float = 1.0, + device: str = "cpu", + max_period: float = 10000.0, +): + # We aim for TBC format + assert dim % 2 == 0 + pos = 1.0 * torch.arange(length).view(-1, 1, 1) # (length, 1, 1) + pos = pos.repeat(1, batch_size, 1) # (length, batch_size, 1) + if mean_normalize: + pos -= torch.nanmean(pos, dim=0, keepdim=True) + + if augment: + delta = np.random.uniform( + -max_global_shift, +max_global_shift, size=[1, batch_size, 1] + ) + delta_local = np.random.uniform( + -max_local_shift, +max_local_shift, size=[length, batch_size, 1] + ) + log_lambdas = np.random.uniform( + -np.log(max_scale), +np.log(max_scale), size=[1, batch_size, 1] + ) + pos = (pos + delta + delta_local) * np.exp(log_lambdas) + + pos = pos.to(device) + + half_dim = dim // 2 + adim = torch.arange(dim // 2, device=device).view(1, 1, -1) + phase = pos / (max_period ** (adim / (half_dim - 1))) + return torch.cat( + [ + torch.cos(phase), + torch.sin(phase), + ], + dim=-1, + ).float() + + +def get_causal_mask(length): + pos = torch.arange(length) + return pos > pos[:, None] + + +def get_elementary_mask( + T1, + T2, + mask_type, + sparse_attn_window, + global_window, + mask_random_seed, + sparsity, + device, +): + """ + When the input of the Decoder has length T1 and the output T2 + The mask matrix has shape (T2, T1) + """ + assert mask_type in ["diag", "jmask", "random", "global"] + + if mask_type == "global": + mask = torch.zeros(T2, T1, dtype=torch.bool) + mask[:, :global_window] = True + line_window = int(global_window * T2 / T1) + mask[:line_window, :] = True + + if mask_type == "diag": + + mask = torch.zeros(T2, T1, dtype=torch.bool) + rows = torch.arange(T2)[:, None] + cols = ( + (T1 / T2 * rows + torch.arange(-sparse_attn_window, sparse_attn_window + 1)) + .long() + .clamp(0, T1 - 1) + ) + mask.scatter_(1, cols, torch.ones(1, dtype=torch.bool).expand_as(cols)) + + elif mask_type == "jmask": + mask = torch.zeros(T2 + 2, T1 + 2, dtype=torch.bool) + rows = torch.arange(T2 + 2)[:, None] + t = torch.arange(0, int((2 * T1) ** 0.5 + 1)) + t = (t * (t + 1) / 2).int() + t = torch.cat([-t.flip(0)[:-1], t]) + cols = (T1 / T2 * rows + t).long().clamp(0, T1 + 1) + mask.scatter_(1, cols, torch.ones(1, dtype=torch.bool).expand_as(cols)) + mask = mask[1:-1, 1:-1] + + elif mask_type == "random": + gene = torch.Generator(device=device) + gene.manual_seed(mask_random_seed) + mask = ( + torch.rand(T1 * T2, generator=gene, device=device).reshape(T2, T1) + > sparsity + ) + + mask = mask.to(device) + return mask + + +def get_mask( + T1, + T2, + mask_type, + sparse_attn_window, + global_window, + mask_random_seed, + sparsity, + device, +): + """ + Return a SparseCSRTensor mask that is a combination of elementary masks + mask_type can be a combination of multiple masks: for instance "diag_jmask_random" + """ + from xformers.sparse import SparseCSRTensor + # create a list + mask_types = mask_type.split("_") + + all_masks = [ + get_elementary_mask( + T1, + T2, + mask, + sparse_attn_window, + global_window, + mask_random_seed, + sparsity, + device, + ) + for mask in mask_types + ] + + final_mask = torch.stack(all_masks).sum(axis=0) > 0 + + return SparseCSRTensor.from_dense(final_mask[None]) + + +class ScaledEmbedding(nn.Module): + def __init__( + self, + num_embeddings: int, + embedding_dim: int, + scale: float = 1.0, + boost: float = 3.0, + ): + super().__init__() + self.embedding = nn.Embedding(num_embeddings, embedding_dim) + self.embedding.weight.data *= scale / boost + self.boost = boost + + @property + def weight(self): + return self.embedding.weight * self.boost + + def forward(self, x): + return self.embedding(x) * self.boost + + +class LayerScale(nn.Module): + """Layer scale from [Touvron et al 2021] (https://arxiv.org/pdf/2103.17239.pdf). + This rescales diagonaly residual outputs close to 0 initially, then learnt. + """ + + def __init__(self, channels: int, init: float = 0, channel_last=False): + """ + channel_last = False corresponds to (B, C, T) tensors + channel_last = True corresponds to (T, B, C) tensors + """ + super().__init__() + self.channel_last = channel_last + self.scale = nn.Parameter(torch.zeros(channels, requires_grad=True)) + self.scale.data[:] = init + + def forward(self, x): + if self.channel_last: + return self.scale * x + else: + return self.scale[:, None] * x + + +class MyGroupNorm(nn.GroupNorm): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + def forward(self, x): + """ + x: (B, T, C) + if num_groups=1: Normalisation on all T and C together for each B + """ + x = x.transpose(1, 2) + return super().forward(x).transpose(1, 2) + + +class MyTransformerEncoderLayer(nn.TransformerEncoderLayer): + def __init__( + self, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation=F.relu, + group_norm=0, + norm_first=False, + norm_out=False, + layer_norm_eps=1e-5, + layer_scale=False, + init_values=1e-4, + device=None, + dtype=None, + sparse=False, + mask_type="diag", + mask_random_seed=42, + sparse_attn_window=500, + global_window=50, + auto_sparsity=False, + sparsity=0.95, + batch_first=False, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__( + d_model=d_model, + nhead=nhead, + dim_feedforward=dim_feedforward, + dropout=dropout, + activation=activation, + layer_norm_eps=layer_norm_eps, + batch_first=batch_first, + norm_first=norm_first, + device=device, + dtype=dtype, + ) + self.sparse = sparse + self.auto_sparsity = auto_sparsity + if sparse: + if not auto_sparsity: + self.mask_type = mask_type + self.sparse_attn_window = sparse_attn_window + self.global_window = global_window + self.sparsity = sparsity + if group_norm: + self.norm1 = MyGroupNorm(int(group_norm), d_model, eps=layer_norm_eps, **factory_kwargs) + self.norm2 = MyGroupNorm(int(group_norm), d_model, eps=layer_norm_eps, **factory_kwargs) + + self.norm_out = None + if self.norm_first & norm_out: + self.norm_out = MyGroupNorm(num_groups=int(norm_out), num_channels=d_model) + self.gamma_1 = ( + LayerScale(d_model, init_values, True) if layer_scale else nn.Identity() + ) + self.gamma_2 = ( + LayerScale(d_model, init_values, True) if layer_scale else nn.Identity() + ) + + if sparse: + self.self_attn = MultiheadAttention( + d_model, nhead, dropout=dropout, batch_first=batch_first, + auto_sparsity=sparsity if auto_sparsity else 0, + ) + self.__setattr__("src_mask", torch.zeros(1, 1)) + self.mask_random_seed = mask_random_seed + + def forward(self, src, src_mask=None, src_key_padding_mask=None): + """ + if batch_first = False, src shape is (T, B, C) + the case where batch_first=True is not covered + """ + device = src.device + x = src + T, B, C = x.shape + if self.sparse and not self.auto_sparsity: + assert src_mask is None + src_mask = self.src_mask + if src_mask.shape[-1] != T: + src_mask = get_mask( + T, + T, + self.mask_type, + self.sparse_attn_window, + self.global_window, + self.mask_random_seed, + self.sparsity, + device, + ) + self.__setattr__("src_mask", src_mask) + + if self.norm_first: + x = x + self.gamma_1( + self._sa_block(self.norm1(x), src_mask, src_key_padding_mask) + ) + x = x + self.gamma_2(self._ff_block(self.norm2(x))) + + if self.norm_out: + x = self.norm_out(x) + else: + x = self.norm1( + x + self.gamma_1(self._sa_block(x, src_mask, src_key_padding_mask)) + ) + x = self.norm2(x + self.gamma_2(self._ff_block(x))) + + return x + + +class CrossTransformerEncoderLayer(nn.Module): + def __init__( + self, + d_model: int, + nhead: int, + dim_feedforward: int = 2048, + dropout: float = 0.1, + activation=F.relu, + layer_norm_eps: float = 1e-5, + layer_scale: bool = False, + init_values: float = 1e-4, + norm_first: bool = False, + group_norm: bool = False, + norm_out: bool = False, + sparse=False, + mask_type="diag", + mask_random_seed=42, + sparse_attn_window=500, + global_window=50, + sparsity=0.95, + auto_sparsity=None, + device=None, + dtype=None, + batch_first=False, + ): + factory_kwargs = {"device": device, "dtype": dtype} + super().__init__() + + self.sparse = sparse + self.auto_sparsity = auto_sparsity + if sparse: + if not auto_sparsity: + self.mask_type = mask_type + self.sparse_attn_window = sparse_attn_window + self.global_window = global_window + self.sparsity = sparsity + + self.cross_attn: nn.Module + self.cross_attn = nn.MultiheadAttention( + d_model, nhead, dropout=dropout, batch_first=batch_first) + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward, **factory_kwargs) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model, **factory_kwargs) + + self.norm_first = norm_first + self.norm1: nn.Module + self.norm2: nn.Module + self.norm3: nn.Module + if group_norm: + self.norm1 = MyGroupNorm(int(group_norm), d_model, eps=layer_norm_eps, **factory_kwargs) + self.norm2 = MyGroupNorm(int(group_norm), d_model, eps=layer_norm_eps, **factory_kwargs) + self.norm3 = MyGroupNorm(int(group_norm), d_model, eps=layer_norm_eps, **factory_kwargs) + else: + self.norm1 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs) + self.norm2 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs) + self.norm3 = nn.LayerNorm(d_model, eps=layer_norm_eps, **factory_kwargs) + + self.norm_out = None + if self.norm_first & norm_out: + self.norm_out = MyGroupNorm(num_groups=int(norm_out), num_channels=d_model) + + self.gamma_1 = ( + LayerScale(d_model, init_values, True) if layer_scale else nn.Identity() + ) + self.gamma_2 = ( + LayerScale(d_model, init_values, True) if layer_scale else nn.Identity() + ) + + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + # Legacy string support for activation function. + if isinstance(activation, str): + self.activation = self._get_activation_fn(activation) + else: + self.activation = activation + + if sparse: + self.cross_attn = MultiheadAttention( + d_model, nhead, dropout=dropout, batch_first=batch_first, + auto_sparsity=sparsity if auto_sparsity else 0) + if not auto_sparsity: + self.__setattr__("mask", torch.zeros(1, 1)) + self.mask_random_seed = mask_random_seed + + def forward(self, q, k, mask=None): + """ + Args: + q: tensor of shape (T, B, C) + k: tensor of shape (S, B, C) + mask: tensor of shape (T, S) + + """ + device = q.device + T, B, C = q.shape + S, B, C = k.shape + if self.sparse and not self.auto_sparsity: + assert mask is None + mask = self.mask + if mask.shape[-1] != S or mask.shape[-2] != T: + mask = get_mask( + S, + T, + self.mask_type, + self.sparse_attn_window, + self.global_window, + self.mask_random_seed, + self.sparsity, + device, + ) + self.__setattr__("mask", mask) + + if self.norm_first: + x = q + self.gamma_1(self._ca_block(self.norm1(q), self.norm2(k), mask)) + x = x + self.gamma_2(self._ff_block(self.norm3(x))) + if self.norm_out: + x = self.norm_out(x) + else: + x = self.norm1(q + self.gamma_1(self._ca_block(q, k, mask))) + x = self.norm2(x + self.gamma_2(self._ff_block(x))) + + return x + + # self-attention block + def _ca_block(self, q, k, attn_mask=None): + x = self.cross_attn(q, k, k, attn_mask=attn_mask, need_weights=False)[0] + return self.dropout1(x) + + # feed forward block + def _ff_block(self, x): + x = self.linear2(self.dropout(self.activation(self.linear1(x)))) + return self.dropout2(x) + + def _get_activation_fn(self, activation): + if activation == "relu": + return F.relu + elif activation == "gelu": + return F.gelu + + raise RuntimeError("activation should be relu/gelu, not {}".format(activation)) + + +# ----------------- MULTI-BLOCKS MODELS: ----------------------- + + +class CrossTransformerEncoder(nn.Module): + def __init__( + self, + dim: int, + emb: str = "sin", + hidden_scale: float = 4.0, + num_heads: int = 8, + num_layers: int = 6, + cross_first: bool = False, + dropout: float = 0.0, + max_positions: int = 1000, + norm_in: bool = True, + norm_in_group: bool = False, + group_norm: int = False, + norm_first: bool = False, + norm_out: bool = False, + max_period: float = 10000.0, + weight_decay: float = 0.0, + lr: tp.Optional[float] = None, + layer_scale: bool = False, + gelu: bool = True, + sin_random_shift: int = 0, + weight_pos_embed: float = 1.0, + cape_mean_normalize: bool = True, + cape_augment: bool = True, + cape_glob_loc_scale: list = [5000.0, 1.0, 1.4], + sparse_self_attn: bool = False, + sparse_cross_attn: bool = False, + mask_type: str = "diag", + mask_random_seed: int = 42, + sparse_attn_window: int = 500, + global_window: int = 50, + auto_sparsity: bool = False, + sparsity: float = 0.95, + ): + super().__init__() + """ + """ + assert dim % num_heads == 0 + + hidden_dim = int(dim * hidden_scale) + + self.num_layers = num_layers + # classic parity = 1 means that if idx%2 == 1 there is a + # classical encoder else there is a cross encoder + self.classic_parity = 1 if cross_first else 0 + self.emb = emb + self.max_period = max_period + self.weight_decay = weight_decay + self.weight_pos_embed = weight_pos_embed + self.sin_random_shift = sin_random_shift + if emb == "cape": + self.cape_mean_normalize = cape_mean_normalize + self.cape_augment = cape_augment + self.cape_glob_loc_scale = cape_glob_loc_scale + if emb == "scaled": + self.position_embeddings = ScaledEmbedding(max_positions, dim, scale=0.2) + + self.lr = lr + + activation: tp.Any = F.gelu if gelu else F.relu + + self.norm_in: nn.Module + self.norm_in_t: nn.Module + if norm_in: + self.norm_in = nn.LayerNorm(dim) + self.norm_in_t = nn.LayerNorm(dim) + elif norm_in_group: + self.norm_in = MyGroupNorm(int(norm_in_group), dim) + self.norm_in_t = MyGroupNorm(int(norm_in_group), dim) + else: + self.norm_in = nn.Identity() + self.norm_in_t = nn.Identity() + + # spectrogram layers + self.layers = nn.ModuleList() + # temporal layers + self.layers_t = nn.ModuleList() + + kwargs_common = { + "d_model": dim, + "nhead": num_heads, + "dim_feedforward": hidden_dim, + "dropout": dropout, + "activation": activation, + "group_norm": group_norm, + "norm_first": norm_first, + "norm_out": norm_out, + "layer_scale": layer_scale, + "mask_type": mask_type, + "mask_random_seed": mask_random_seed, + "sparse_attn_window": sparse_attn_window, + "global_window": global_window, + "sparsity": sparsity, + "auto_sparsity": auto_sparsity, + "batch_first": True, + } + + kwargs_classic_encoder = dict(kwargs_common) + kwargs_classic_encoder.update({ + "sparse": sparse_self_attn, + }) + kwargs_cross_encoder = dict(kwargs_common) + kwargs_cross_encoder.update({ + "sparse": sparse_cross_attn, + }) + + for idx in range(num_layers): + if idx % 2 == self.classic_parity: + + self.layers.append(MyTransformerEncoderLayer(**kwargs_classic_encoder)) + self.layers_t.append( + MyTransformerEncoderLayer(**kwargs_classic_encoder) + ) + + else: + self.layers.append(CrossTransformerEncoderLayer(**kwargs_cross_encoder)) + + self.layers_t.append( + CrossTransformerEncoderLayer(**kwargs_cross_encoder) + ) + + def forward(self, x, xt): + B, C, Fr, T1 = x.shape + pos_emb_2d = create_2d_sin_embedding( + C, Fr, T1, x.device, self.max_period + ) # (1, C, Fr, T1) + pos_emb_2d = rearrange(pos_emb_2d, "b c fr t1 -> b (t1 fr) c") + x = rearrange(x, "b c fr t1 -> b (t1 fr) c") + x = self.norm_in(x) + x = x + self.weight_pos_embed * pos_emb_2d + + B, C, T2 = xt.shape + xt = rearrange(xt, "b c t2 -> b t2 c") # now T2, B, C + pos_emb = self._get_pos_embedding(T2, B, C, x.device) + pos_emb = rearrange(pos_emb, "t2 b c -> b t2 c") + xt = self.norm_in_t(xt) + xt = xt + self.weight_pos_embed * pos_emb + + for idx in range(self.num_layers): + if idx % 2 == self.classic_parity: + x = self.layers[idx](x) + xt = self.layers_t[idx](xt) + else: + old_x = x + x = self.layers[idx](x, xt) + xt = self.layers_t[idx](xt, old_x) + + x = rearrange(x, "b (t1 fr) c -> b c fr t1", t1=T1) + xt = rearrange(xt, "b t2 c -> b c t2") + return x, xt + + def _get_pos_embedding(self, T, B, C, device): + if self.emb == "sin": + shift = random.randrange(self.sin_random_shift + 1) + pos_emb = create_sin_embedding( + T, C, shift=shift, device=device, max_period=self.max_period + ) + elif self.emb == "cape": + if self.training: + pos_emb = create_sin_embedding_cape( + T, + C, + B, + device=device, + max_period=self.max_period, + mean_normalize=self.cape_mean_normalize, + augment=self.cape_augment, + max_global_shift=self.cape_glob_loc_scale[0], + max_local_shift=self.cape_glob_loc_scale[1], + max_scale=self.cape_glob_loc_scale[2], + ) + else: + pos_emb = create_sin_embedding_cape( + T, + C, + B, + device=device, + max_period=self.max_period, + mean_normalize=self.cape_mean_normalize, + augment=False, + ) + + elif self.emb == "scaled": + pos = torch.arange(T, device=device) + pos_emb = self.position_embeddings(pos)[:, None] + + return pos_emb + + def make_optim_group(self): + group = {"params": list(self.parameters()), "weight_decay": self.weight_decay} + if self.lr is not None: + group["lr"] = self.lr + return group + + +# Attention Modules + + +class MultiheadAttention(nn.Module): + def __init__( + self, + embed_dim, + num_heads, + dropout=0.0, + bias=True, + add_bias_kv=False, + add_zero_attn=False, + kdim=None, + vdim=None, + batch_first=False, + auto_sparsity=None, + ): + super().__init__() + assert auto_sparsity is not None, "sanity check" + self.num_heads = num_heads + self.q = torch.nn.Linear(embed_dim, embed_dim, bias=bias) + self.k = torch.nn.Linear(embed_dim, embed_dim, bias=bias) + self.v = torch.nn.Linear(embed_dim, embed_dim, bias=bias) + self.attn_drop = torch.nn.Dropout(dropout) + self.proj = torch.nn.Linear(embed_dim, embed_dim, bias) + self.proj_drop = torch.nn.Dropout(dropout) + self.batch_first = batch_first + self.auto_sparsity = auto_sparsity + + def forward( + self, + query, + key, + value, + key_padding_mask=None, + need_weights=True, + attn_mask=None, + average_attn_weights=True, + ): + + if not self.batch_first: # N, B, C + query = query.permute(1, 0, 2) # B, N_q, C + key = key.permute(1, 0, 2) # B, N_k, C + value = value.permute(1, 0, 2) # B, N_k, C + B, N_q, C = query.shape + B, N_k, C = key.shape + + q = ( + self.q(query) + .reshape(B, N_q, self.num_heads, C // self.num_heads) + .permute(0, 2, 1, 3) + ) + q = q.flatten(0, 1) + k = ( + self.k(key) + .reshape(B, N_k, self.num_heads, C // self.num_heads) + .permute(0, 2, 1, 3) + ) + k = k.flatten(0, 1) + v = ( + self.v(value) + .reshape(B, N_k, self.num_heads, C // self.num_heads) + .permute(0, 2, 1, 3) + ) + v = v.flatten(0, 1) + + if self.auto_sparsity: + assert attn_mask is None + x = dynamic_sparse_attention(q, k, v, sparsity=self.auto_sparsity) + else: + x = scaled_dot_product_attention(q, k, v, attn_mask, dropout=self.attn_drop) + x = x.reshape(B, self.num_heads, N_q, C // self.num_heads) + + x = x.transpose(1, 2).reshape(B, N_q, C) + x = self.proj(x) + x = self.proj_drop(x) + if not self.batch_first: + x = x.permute(1, 0, 2) + return x, None + + +def scaled_query_key_softmax(q, k, att_mask): + from xformers.ops import masked_matmul + q = q / (k.size(-1)) ** 0.5 + att = masked_matmul(q, k.transpose(-2, -1), att_mask) + att = torch.nn.functional.softmax(att, -1) + return att + + +def scaled_dot_product_attention(q, k, v, att_mask, dropout): + att = scaled_query_key_softmax(q, k, att_mask=att_mask) + att = dropout(att) + y = att @ v + return y + + +def _compute_buckets(x, R): + qq = torch.einsum('btf,bfhi->bhti', x, R) + qq = torch.cat([qq, -qq], dim=-1) + buckets = qq.argmax(dim=-1) + + return buckets.permute(0, 2, 1).byte().contiguous() + + +def dynamic_sparse_attention(query, key, value, sparsity, infer_sparsity=True, attn_bias=None): + # assert False, "The code for the custom sparse kernel is not ready for release yet." + from xformers.ops import find_locations, sparse_memory_efficient_attention + n_hashes = 32 + proj_size = 4 + query, key, value = [x.contiguous() for x in [query, key, value]] + with torch.no_grad(): + R = torch.randn(1, query.shape[-1], n_hashes, proj_size // 2, device=query.device) + bucket_query = _compute_buckets(query, R) + bucket_key = _compute_buckets(key, R) + row_offsets, column_indices = find_locations( + bucket_query, bucket_key, sparsity, infer_sparsity) + return sparse_memory_efficient_attention( + query, key, value, row_offsets, column_indices, attn_bias) diff --git a/demucs/utils.py b/demucs/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..94bd323dcd4b170b4d71ff27bade1bb6ad28738f --- /dev/null +++ b/demucs/utils.py @@ -0,0 +1,502 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# All rights reserved. +# +# This source code is licensed under the license found in the +# LICENSE file in the root directory of this source tree. + +from collections import defaultdict +from contextlib import contextmanager +import math +import os +import tempfile +import typing as tp + +import errno +import functools +import hashlib +import inspect +import io +import os +import random +import socket +import tempfile +import warnings +import zlib +import tkinter as tk + +from diffq import UniformQuantizer, DiffQuantizer +import torch as th +import tqdm +from torch import distributed +from torch.nn import functional as F + +import torch + +def unfold(a, kernel_size, stride): + """Given input of size [*OT, T], output Tensor of size [*OT, F, K] + with K the kernel size, by extracting frames with the given stride. + + This will pad the input so that `F = ceil(T / K)`. + + see https://github.com/pytorch/pytorch/issues/60466 + """ + *shape, length = a.shape + n_frames = math.ceil(length / stride) + tgt_length = (n_frames - 1) * stride + kernel_size + a = F.pad(a, (0, tgt_length - length)) + strides = list(a.stride()) + assert strides[-1] == 1, 'data should be contiguous' + strides = strides[:-1] + [stride, 1] + return a.as_strided([*shape, n_frames, kernel_size], strides) + + +def center_trim(tensor: torch.Tensor, reference: tp.Union[torch.Tensor, int]): + """ + Center trim `tensor` with respect to `reference`, along the last dimension. + `reference` can also be a number, representing the length to trim to. + If the size difference != 0 mod 2, the extra sample is removed on the right side. + """ + ref_size: int + if isinstance(reference, torch.Tensor): + ref_size = reference.size(-1) + else: + ref_size = reference + delta = tensor.size(-1) - ref_size + if delta < 0: + raise ValueError("tensor must be larger than reference. " f"Delta is {delta}.") + if delta: + tensor = tensor[..., delta // 2:-(delta - delta // 2)] + return tensor + + +def pull_metric(history: tp.List[dict], name: str): + out = [] + for metrics in history: + metric = metrics + for part in name.split("."): + metric = metric[part] + out.append(metric) + return out + + +def EMA(beta: float = 1): + """ + Exponential Moving Average callback. + Returns a single function that can be called to repeatidly update the EMA + with a dict of metrics. The callback will return + the new averaged dict of metrics. + + Note that for `beta=1`, this is just plain averaging. + """ + fix: tp.Dict[str, float] = defaultdict(float) + total: tp.Dict[str, float] = defaultdict(float) + + def _update(metrics: dict, weight: float = 1) -> dict: + nonlocal total, fix + for key, value in metrics.items(): + total[key] = total[key] * beta + weight * float(value) + fix[key] = fix[key] * beta + weight + return {key: tot / fix[key] for key, tot in total.items()} + return _update + + +def sizeof_fmt(num: float, suffix: str = 'B'): + """ + Given `num` bytes, return human readable size. + Taken from https://stackoverflow.com/a/1094933 + """ + for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: + if abs(num) < 1024.0: + return "%3.1f%s%s" % (num, unit, suffix) + num /= 1024.0 + return "%.1f%s%s" % (num, 'Yi', suffix) + + +@contextmanager +def temp_filenames(count: int, delete=True): + names = [] + try: + for _ in range(count): + names.append(tempfile.NamedTemporaryFile(delete=False).name) + yield names + finally: + if delete: + for name in names: + os.unlink(name) + +def average_metric(metric, count=1.): + """ + Average `metric` which should be a float across all hosts. `count` should be + the weight for this particular host (i.e. number of examples). + """ + metric = th.tensor([count, count * metric], dtype=th.float32, device='cuda') + distributed.all_reduce(metric, op=distributed.ReduceOp.SUM) + return metric[1].item() / metric[0].item() + + +def free_port(host='', low=20000, high=40000): + """ + Return a port number that is most likely free. + This could suffer from a race condition although + it should be quite rare. + """ + sock = socket.socket() + while True: + port = random.randint(low, high) + try: + sock.bind((host, port)) + except OSError as error: + if error.errno == errno.EADDRINUSE: + continue + raise + return port + + +def sizeof_fmt(num, suffix='B'): + """ + Given `num` bytes, return human readable size. + Taken from https://stackoverflow.com/a/1094933 + """ + for unit in ['', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: + if abs(num) < 1024.0: + return "%3.1f%s%s" % (num, unit, suffix) + num /= 1024.0 + return "%.1f%s%s" % (num, 'Yi', suffix) + + +def human_seconds(seconds, display='.2f'): + """ + Given `seconds` seconds, return human readable duration. + """ + value = seconds * 1e6 + ratios = [1e3, 1e3, 60, 60, 24] + names = ['us', 'ms', 's', 'min', 'hrs', 'days'] + last = names.pop(0) + for name, ratio in zip(names, ratios): + if value / ratio < 0.3: + break + value /= ratio + last = name + return f"{format(value, display)} {last}" + + +class TensorChunk: + def __init__(self, tensor, offset=0, length=None): + total_length = tensor.shape[-1] + assert offset >= 0 + assert offset < total_length + + if length is None: + length = total_length - offset + else: + length = min(total_length - offset, length) + + self.tensor = tensor + self.offset = offset + self.length = length + self.device = tensor.device + + @property + def shape(self): + shape = list(self.tensor.shape) + shape[-1] = self.length + return shape + + def padded(self, target_length): + delta = target_length - self.length + total_length = self.tensor.shape[-1] + assert delta >= 0 + + start = self.offset - delta // 2 + end = start + target_length + + correct_start = max(0, start) + correct_end = min(total_length, end) + + pad_left = correct_start - start + pad_right = end - correct_end + + out = F.pad(self.tensor[..., correct_start:correct_end], (pad_left, pad_right)) + assert out.shape[-1] == target_length + return out + + +def tensor_chunk(tensor_or_chunk): + if isinstance(tensor_or_chunk, TensorChunk): + return tensor_or_chunk + else: + assert isinstance(tensor_or_chunk, th.Tensor) + return TensorChunk(tensor_or_chunk) + + +def apply_model_v1(model, mix, shifts=None, split=False, progress=False, set_progress_bar=None): + """ + Apply model to a given mixture. + + Args: + shifts (int): if > 0, will shift in time `mix` by a random amount between 0 and 0.5 sec + and apply the oppositve shift to the output. This is repeated `shifts` time and + all predictions are averaged. This effectively makes the model time equivariant + and improves SDR by up to 0.2 points. + split (bool): if True, the input will be broken down in 8 seconds extracts + and predictions will be performed individually on each and concatenated. + Useful for model with large memory footprint like Tasnet. + progress (bool): if True, show a progress bar (requires split=True) + """ + + channels, length = mix.size() + device = mix.device + progress_value = 0 + + if split: + out = th.zeros(4, channels, length, device=device) + shift = model.samplerate * 10 + offsets = range(0, length, shift) + scale = 10 + if progress: + offsets = tqdm.tqdm(offsets, unit_scale=scale, ncols=120, unit='seconds') + for offset in offsets: + chunk = mix[..., offset:offset + shift] + if set_progress_bar: + progress_value += 1 + set_progress_bar(0.1, (0.8/len(offsets)*progress_value)) + chunk_out = apply_model_v1(model, chunk, shifts=shifts, set_progress_bar=set_progress_bar) + else: + chunk_out = apply_model_v1(model, chunk, shifts=shifts) + out[..., offset:offset + shift] = chunk_out + offset += shift + return out + elif shifts: + max_shift = int(model.samplerate / 2) + mix = F.pad(mix, (max_shift, max_shift)) + offsets = list(range(max_shift)) + random.shuffle(offsets) + out = 0 + for offset in offsets[:shifts]: + shifted = mix[..., offset:offset + length + max_shift] + if set_progress_bar: + shifted_out = apply_model_v1(model, shifted, set_progress_bar=set_progress_bar) + else: + shifted_out = apply_model_v1(model, shifted) + out += shifted_out[..., max_shift - offset:max_shift - offset + length] + out /= shifts + return out + else: + valid_length = model.valid_length(length) + delta = valid_length - length + padded = F.pad(mix, (delta // 2, delta - delta // 2)) + with th.no_grad(): + out = model(padded.unsqueeze(0))[0] + return center_trim(out, mix) + +def apply_model_v2(model, mix, shifts=None, split=False, + overlap=0.25, transition_power=1., progress=False, set_progress_bar=None): + """ + Apply model to a given mixture. + + Args: + shifts (int): if > 0, will shift in time `mix` by a random amount between 0 and 0.5 sec + and apply the oppositve shift to the output. This is repeated `shifts` time and + all predictions are averaged. This effectively makes the model time equivariant + and improves SDR by up to 0.2 points. + split (bool): if True, the input will be broken down in 8 seconds extracts + and predictions will be performed individually on each and concatenated. + Useful for model with large memory footprint like Tasnet. + progress (bool): if True, show a progress bar (requires split=True) + """ + + assert transition_power >= 1, "transition_power < 1 leads to weird behavior." + device = mix.device + channels, length = mix.shape + progress_value = 0 + + if split: + out = th.zeros(len(model.sources), channels, length, device=device) + sum_weight = th.zeros(length, device=device) + segment = model.segment_length + stride = int((1 - overlap) * segment) + offsets = range(0, length, stride) + scale = stride / model.samplerate + if progress: + offsets = tqdm.tqdm(offsets, unit_scale=scale, ncols=120, unit='seconds') + # We start from a triangle shaped weight, with maximal weight in the middle + # of the segment. Then we normalize and take to the power `transition_power`. + # Large values of transition power will lead to sharper transitions. + weight = th.cat([th.arange(1, segment // 2 + 1), + th.arange(segment - segment // 2, 0, -1)]).to(device) + assert len(weight) == segment + # If the overlap < 50%, this will translate to linear transition when + # transition_power is 1. + weight = (weight / weight.max())**transition_power + for offset in offsets: + chunk = TensorChunk(mix, offset, segment) + if set_progress_bar: + progress_value += 1 + set_progress_bar(0.1, (0.8/len(offsets)*progress_value)) + chunk_out = apply_model_v2(model, chunk, shifts=shifts, set_progress_bar=set_progress_bar) + else: + chunk_out = apply_model_v2(model, chunk, shifts=shifts) + chunk_length = chunk_out.shape[-1] + out[..., offset:offset + segment] += weight[:chunk_length] * chunk_out + sum_weight[offset:offset + segment] += weight[:chunk_length] + offset += segment + assert sum_weight.min() > 0 + out /= sum_weight + return out + elif shifts: + max_shift = int(0.5 * model.samplerate) + mix = tensor_chunk(mix) + padded_mix = mix.padded(length + 2 * max_shift) + out = 0 + for _ in range(shifts): + offset = random.randint(0, max_shift) + shifted = TensorChunk(padded_mix, offset, length + max_shift - offset) + + if set_progress_bar: + progress_value += 1 + shifted_out = apply_model_v2(model, shifted, set_progress_bar=set_progress_bar) + else: + shifted_out = apply_model_v2(model, shifted) + out += shifted_out[..., max_shift - offset:] + out /= shifts + return out + else: + valid_length = model.valid_length(length) + mix = tensor_chunk(mix) + padded_mix = mix.padded(valid_length) + with th.no_grad(): + out = model(padded_mix.unsqueeze(0))[0] + return center_trim(out, length) + + +@contextmanager +def temp_filenames(count, delete=True): + names = [] + try: + for _ in range(count): + names.append(tempfile.NamedTemporaryFile(delete=False).name) + yield names + finally: + if delete: + for name in names: + os.unlink(name) + + +def get_quantizer(model, args, optimizer=None): + quantizer = None + if args.diffq: + quantizer = DiffQuantizer( + model, min_size=args.q_min_size, group_size=8) + if optimizer is not None: + quantizer.setup_optimizer(optimizer) + elif args.qat: + quantizer = UniformQuantizer( + model, bits=args.qat, min_size=args.q_min_size) + return quantizer + + +def load_model(path, strict=False): + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + load_from = path + package = th.load(load_from, 'cpu') + + klass = package["klass"] + args = package["args"] + kwargs = package["kwargs"] + + if strict: + model = klass(*args, **kwargs) + else: + sig = inspect.signature(klass) + for key in list(kwargs): + if key not in sig.parameters: + warnings.warn("Dropping inexistant parameter " + key) + del kwargs[key] + model = klass(*args, **kwargs) + + state = package["state"] + training_args = package["training_args"] + quantizer = get_quantizer(model, training_args) + + set_state(model, quantizer, state) + return model + + +def get_state(model, quantizer): + if quantizer is None: + state = {k: p.data.to('cpu') for k, p in model.state_dict().items()} + else: + state = quantizer.get_quantized_state() + buf = io.BytesIO() + th.save(state, buf) + state = {'compressed': zlib.compress(buf.getvalue())} + return state + + +def set_state(model, quantizer, state): + if quantizer is None: + model.load_state_dict(state) + else: + buf = io.BytesIO(zlib.decompress(state["compressed"])) + state = th.load(buf, "cpu") + quantizer.restore_quantized_state(state) + + return state + + +def save_state(state, path): + buf = io.BytesIO() + th.save(state, buf) + sig = hashlib.sha256(buf.getvalue()).hexdigest()[:8] + + path = path.parent / (path.stem + "-" + sig + path.suffix) + path.write_bytes(buf.getvalue()) + + +def save_model(model, quantizer, training_args, path): + args, kwargs = model._init_args_kwargs + klass = model.__class__ + + state = get_state(model, quantizer) + + save_to = path + package = { + 'klass': klass, + 'args': args, + 'kwargs': kwargs, + 'state': state, + 'training_args': training_args, + } + th.save(package, save_to) + + +def capture_init(init): + @functools.wraps(init) + def __init__(self, *args, **kwargs): + self._init_args_kwargs = (args, kwargs) + init(self, *args, **kwargs) + + return __init__ + +class DummyPoolExecutor: + class DummyResult: + def __init__(self, func, *args, **kwargs): + self.func = func + self.args = args + self.kwargs = kwargs + + def result(self): + return self.func(*self.args, **self.kwargs) + + def __init__(self, workers=0): + pass + + def submit(self, func, *args, **kwargs): + return DummyPoolExecutor.DummyResult(func, *args, **kwargs) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + return diff --git a/gui_data/__pycache__/app_size_values.cpython-310.pyc b/gui_data/__pycache__/app_size_values.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8b203ddc8c1e3d62ecaa939fa3fbf7e75b0946aa Binary files /dev/null and b/gui_data/__pycache__/app_size_values.cpython-310.pyc differ diff --git a/gui_data/__pycache__/constants.cpython-310.pyc b/gui_data/__pycache__/constants.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91664dd730b4d98b128351501d942f5147904ee2 Binary files /dev/null and b/gui_data/__pycache__/constants.cpython-310.pyc differ diff --git a/gui_data/__pycache__/error_handling.cpython-310.pyc b/gui_data/__pycache__/error_handling.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2fb95ed98e93abbf4449dfa4c8679d27f0fff307 Binary files /dev/null and b/gui_data/__pycache__/error_handling.cpython-310.pyc differ diff --git a/gui_data/__pycache__/old_data_check.cpython-310.pyc b/gui_data/__pycache__/old_data_check.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9442c852c2440ae10ac2f1d21a6bd8af9b8777b Binary files /dev/null and b/gui_data/__pycache__/old_data_check.cpython-310.pyc differ diff --git a/gui_data/app_size_values.py b/gui_data/app_size_values.py new file mode 100644 index 0000000000000000000000000000000000000000..85c3ad472d3b65bf5a4bed4aee90267c0bfe5bab --- /dev/null +++ b/gui_data/app_size_values.py @@ -0,0 +1,371 @@ +import os +import platform +from screeninfo import get_monitors +from PIL import Image +from PIL import ImageTk + +OPERATING_SYSTEM = platform.system() + +def get_screen_height(): + monitors = get_monitors() + if len(monitors) == 0: + raise Exception("Failed to get screen height") + return monitors[0].height, monitors[0].width + +def scale_values(value): + if not SCALE_WIN_SIZE == 1920: + ratio = SCALE_WIN_SIZE/1920 # Approx. 1.3333 for 2K + return value * ratio + else: + return value + +SCREEN_HIGHT, SCREEN_WIDTH = get_screen_height() +SCALE_WIN_SIZE = 1920 + +SCREEN_SIZE_VALUES = { + "normal": { + "credits_img":(100, 100), + ## App Size + 'IMAGE_HEIGHT': 140, + 'FILEPATHS_HEIGHT': 75, + 'OPTIONS_HEIGHT': 262, + 'CONVERSIONBUTTON_HEIGHT': 30, + 'COMMAND_HEIGHT': 141, + 'PROGRESS_HEIGHT': 25, + 'PADDING': 7, + 'WIDTH': 680 + }, + "small": { + "credits_img":(50, 50), + 'IMAGE_HEIGHT': 140, + 'FILEPATHS_HEIGHT': 75, + 'OPTIONS_HEIGHT': 262, + 'CONVERSIONBUTTON_HEIGHT': 30, + 'COMMAND_HEIGHT': 80, + 'PROGRESS_HEIGHT': 25, + 'PADDING': 5, + 'WIDTH': 680 + }, + "medium": { + "credits_img":(50, 50), + ## App Size + 'IMAGE_HEIGHT': 140, + 'FILEPATHS_HEIGHT': 75, + 'OPTIONS_HEIGHT': 262, + 'CONVERSIONBUTTON_HEIGHT': 30, + 'COMMAND_HEIGHT': 115, + 'PROGRESS_HEIGHT': 25, + 'PADDING': 7, + 'WIDTH': 680 + }, +} + +try: + if SCREEN_HIGHT >= 900: + determined_size = SCREEN_SIZE_VALUES["normal"] + elif SCREEN_HIGHT <= 720: + determined_size = SCREEN_SIZE_VALUES["small"] + else: + determined_size = SCREEN_SIZE_VALUES["medium"] +except: + determined_size = SCREEN_SIZE_VALUES["normal"] + +image_scale_1, image_scale_2 = 20, 30 + +class ImagePath(): + def __init__(self, base_path): + img_path = os.path.join(base_path, 'gui_data', 'img') + credits_path = os.path.join(img_path, 'credits.png') + donate_path = os.path.join(img_path, 'donate.png') + download_path = os.path.join(img_path, 'download.png') + efile_path = os.path.join(img_path, 'File.png') + help_path = os.path.join(img_path, 'help.png') + key_path = os.path.join(img_path, 'key.png') + stop_path = os.path.join(img_path, 'stop.png') + play_path = os.path.join(img_path, 'play.png') + pause_path = os.path.join(img_path, 'pause.png') + up_img_path = os.path.join(img_path, "up.png") + down_img_path = os.path.join(img_path, "down.png") + left_img_path = os.path.join(img_path, "left.png") + right_img_path = os.path.join(img_path, "right.png") + clear_img_path = os.path.join(img_path, "clear.png") + copy_img_path = os.path.join(img_path, "copy.png") + self.banner_path = os.path.join(img_path, 'UVR-banner.png') + + self.efile_img = self.open_image(path=efile_path,size=(image_scale_1, image_scale_1)) + self.stop_img = self.open_image(path=stop_path, size=(image_scale_1, image_scale_1)) + self.play_img = self.open_image(path=play_path, size=(image_scale_1, image_scale_1)) + self.pause_img = self.open_image(path=pause_path, size=(image_scale_1, image_scale_1)) + self.help_img = self.open_image(path=help_path, size=(image_scale_1, image_scale_1)) + self.download_img = self.open_image(path=download_path, size=(image_scale_2, image_scale_2)) + self.donate_img = self.open_image(path=donate_path, size=(image_scale_2, image_scale_2)) + self.key_img = self.open_image(path=key_path, size=(image_scale_2, image_scale_2)) + self.up_img = self.open_image(path=up_img_path, size=(image_scale_2, image_scale_2)) + self.down_img = self.open_image(path=down_img_path, size=(image_scale_2, image_scale_2)) + self.left_img = self.open_image(path=left_img_path, size=(image_scale_2, image_scale_2)) + self.right_img = self.open_image(path=right_img_path, size=(image_scale_2, image_scale_2)) + self.clear_img = self.open_image(path=clear_img_path, size=(image_scale_2, image_scale_2)) + self.copy_img = self.open_image(path=copy_img_path, size=(image_scale_2, image_scale_2)) + self.credits_img = self.open_image(path=credits_path, size=determined_size["credits_img"]) + + def open_image(self, path: str, size: tuple = None, keep_aspect: bool = True, rotate: int = 0) -> ImageTk.PhotoImage: + """ + Open the image on the path and apply given settings\n + Paramaters: + path(str): + Absolute path of the image + size(tuple): + first value - width + second value - height + keep_aspect(bool): + keep aspect ratio of image and resize + to maximum possible width and height + (maxima are given by size) + rotate(int): + clockwise rotation of image + Returns(ImageTk.PhotoImage): + Image of path + """ + img = Image.open(path).convert(mode='RGBA') + ratio = img.height/img.width + img = img.rotate(angle=-rotate) + if size is not None: + size = (int(size[0]), int(size[1])) + if keep_aspect: + img = img.resize((size[0], int(size[0] * ratio)), Image.ANTIALIAS) + else: + img = img.resize(size, Image.ANTIALIAS) + + return ImageTk.PhotoImage(img) + +#All Sizes Below Calibrated to 1080p! + +if OPERATING_SYSTEM=="Darwin": + FONT_SIZE_F1 = 13 + FONT_SIZE_F2 = 11 + FONT_SIZE_F3 = 12 + FONT_SIZE_0 = 9 + FONT_SIZE_1 = 11 + FONT_SIZE_2 = 12 + FONT_SIZE_3 = 13 + FONT_SIZE_4 = 14 + FONT_SIZE_5 = 15 + FONT_SIZE_6 = 17 + HELP_HINT_CHECKBOX_WIDTH = 13 + MDX_CHECKBOXS_WIDTH = 14 + VR_CHECKBOXS_WIDTH = 14 + ENSEMBLE_CHECKBOXS_WIDTH = 18 + DEMUCS_CHECKBOXS_WIDTH = 14 + DEMUCS_PRE_CHECKBOXS_WIDTH = 20 + GEN_SETTINGS_WIDTH = 17 + MENU_COMBOBOX_WIDTH = 16 + MENU_OPTION_WIDTH = 12 + READ_ONLY_COMBO_WIDTH = 35 + SETTINGS_BUT_WIDTH = 19 + VR_BUT_WIDTH = 16 + SET_MENUS_CHECK_WIDTH = 12 + COMBO_WIDTH = 14 + SET_VOC_SPLIT_CHECK_WIDTH = 21 +elif OPERATING_SYSTEM=="Linux": + HELP_HINT_CHECKBOX_WIDTH = 15 + MDX_CHECKBOXS_WIDTH = 16 + VR_CHECKBOXS_WIDTH = 16 + ENSEMBLE_CHECKBOXS_WIDTH = 20 + DEMUCS_CHECKBOXS_WIDTH = 16 + DEMUCS_PRE_CHECKBOXS_WIDTH = 24 + GEN_SETTINGS_WIDTH = 20 + MENU_COMBOBOX_WIDTH = 18 + MENU_OPTION_WIDTH = 12 + READ_ONLY_COMBO_WIDTH = 40 + SETTINGS_BUT_WIDTH = 23 + VR_BUT_WIDTH = 18 + SET_MENUS_CHECK_WIDTH = 13 + COMBO_WIDTH = 16 + SET_VOC_SPLIT_CHECK_WIDTH = 25 + FONT_SIZE_F1 = 10 + FONT_SIZE_F2 = 8 + FONT_SIZE_F3 = 9 + FONT_SIZE_0 = 7 + FONT_SIZE_1 = 8 + FONT_SIZE_2 = 9 + FONT_SIZE_3 = 10 + FONT_SIZE_4 = 11 + FONT_SIZE_5 = 13 + FONT_SIZE_6 = 15 +elif OPERATING_SYSTEM=="Windows": + HELP_HINT_CHECKBOX_WIDTH = 15 + MDX_CHECKBOXS_WIDTH = 14 + VR_CHECKBOXS_WIDTH = 14 + ENSEMBLE_CHECKBOXS_WIDTH = 20 + DEMUCS_CHECKBOXS_WIDTH = 14 + DEMUCS_PRE_CHECKBOXS_WIDTH = 20 + GEN_SETTINGS_WIDTH = 18 + MENU_COMBOBOX_WIDTH = 16 + MENU_OPTION_WIDTH = 12 + READ_ONLY_COMBO_WIDTH = 35 + SETTINGS_BUT_WIDTH = 20 + VR_BUT_WIDTH = 16 + SET_MENUS_CHECK_WIDTH = 13 + COMBO_WIDTH = 14 + SET_VOC_SPLIT_CHECK_WIDTH = 23 + FONT_SIZE_F1 = 10 + FONT_SIZE_F2 = 8 + FONT_SIZE_F3 = 9 + FONT_SIZE_0 = 7 + FONT_SIZE_1 = 8 + FONT_SIZE_2 = 9 + FONT_SIZE_3 = 10 + FONT_SIZE_4 = 11 + FONT_SIZE_5 = 13 + FONT_SIZE_6 = 15 + +#Main Size Values: +IMAGE_HEIGHT = determined_size["IMAGE_HEIGHT"] +FILEPATHS_HEIGHT = determined_size["FILEPATHS_HEIGHT"] +OPTIONS_HEIGHT = determined_size["OPTIONS_HEIGHT"] +CONVERSIONBUTTON_HEIGHT = determined_size["CONVERSIONBUTTON_HEIGHT"] +COMMAND_HEIGHT = determined_size["COMMAND_HEIGHT"] +PROGRESS_HEIGHT = determined_size["PROGRESS_HEIGHT"] +PADDING = determined_size["PADDING"] +WIDTH = determined_size["WIDTH"] + +# IMAGE_HEIGHT = 140 +# FILEPATHS_HEIGHT = 75 +# OPTIONS_HEIGHT = 262 +# CONVERSIONBUTTON_HEIGHT = 30 +# COMMAND_HEIGHT = 141 +# PROGRESS_HEIGHT = 25 +# PADDING = 7 +# WIDTH = 680 + +MENU_PADDING_1 = 3 +MENU_PADDING_2 = 10 +MENU_PADDING_3 = 15 +MENU_PADDING_4 = 3 + +#Main Frame Sizes +X_CONVERSION_BUTTON_1080P = 50 +WIDTH_CONVERSION_BUTTON_1080P = -100 +HEIGHT_GENERIC_BUTTON_1080P = 35 +X_STOP_BUTTON_1080P = -10 - 35 +X_SETTINGS_BUTTON_1080P = -670 +X_PROGRESSBAR_1080P = 25 +WIDTH_PROGRESSBAR_1080P = -50 +X_CONSOLE_FRAME_1080P = 15 +WIDTH_CONSOLE_FRAME_1080P = -30 +HO_S = 7 + +#File Frame Sizes +FILEPATHS_FRAME_X = 10 +FILEPATHS_FRAME_Y = 155 +FILEPATHS_FRAME_WIDTH = -20 +MUSICFILE_BUTTON_X = 0 +MUSICFILE_BUTTON_Y = 5 +MUSICFILE_BUTTON_WIDTH = 0 +MUSICFILE_BUTTON_HEIGHT = -5 +MUSICFILE_ENTRY_X = 7.5 +MUSICFILE_ENTRY_WIDTH = -50 +MUSICFILE_ENTRY_HEIGHT = -5 +MUSICFILE_OPEN_X = -45 +MUSICFILE_OPEN_Y = 160 +MUSICFILE_OPEN_WIDTH = 35 +MUSICFILE_OPEN_HEIGHT = 33 +SAVETO_BUTTON_X = 0 +SAVETO_BUTTON_Y = 5 +SAVETO_BUTTON_WIDTH = 0 +SAVETO_BUTTON_HEIGHT = -5 +SAVETO_ENTRY_X = 7.5 +OPEN_BUTTON_X = 427.1 +OPEN_BUTTON_WIDTH = -427.4 +SAVETO_ENTRY_WIDTH = -50 +SAVETO_ENTRY_HEIGHT = -5 +SAVETO_OPEN_X = -45 +SAVETO_OPEN_Y = 197.5 +SAVETO_OPEN_WIDTH = 35 +SAVETO_OPEN_HEIGHT = 32 + +#Main Option menu +OPTIONS_FRAME_X = 10 +OPTIONS_FRAME_Y = 250 +OPTIONS_FRAME_WIDTH = -20 +FILEONE_LABEL_X = -28 +FILEONE_LABEL_WIDTH = -38 +FILETWO_LABEL_X = -32 +FILETWO_LABEL_WIDTH = -20 +TIME_WINDOW_LABEL_X = -43 +TIME_WINDOW_LABEL_WIDTH = 0 +INTRO_ANALYSIS_LABEL_X = -83 +INTRO_ANALYSIS_LABEL_WIDTH = -50 +INTRO_ANALYSIS_OPTION_X = -68 +DB_ANALYSIS_LABEL_X = 62 +DB_ANALYSIS_LABEL_WIDTH = -34 +DB_ANALYSIS_OPTION_X = 86 +WAV_TYPE_SET_LABEL_X = -43 +WAV_TYPE_SET_LABEL_WIDTH = 0 +ENTRY_WIDTH = 222 + +# Constants for the ensemble_listbox_Frame +ENSEMBLE_LISTBOX_FRAME_X = -25 +ENSEMBLE_LISTBOX_FRAME_Y = -20 +ENSEMBLE_LISTBOX_FRAME_WIDTH = 0 +ENSEMBLE_LISTBOX_FRAME_HEIGHT = 67 + +# Constants for the ensemble_listbox_scroll +ENSEMBLE_LISTBOX_SCROLL_X = 195 +ENSEMBLE_LISTBOX_SCROLL_Y = -20 +ENSEMBLE_LISTBOX_SCROLL_WIDTH = -48 +ENSEMBLE_LISTBOX_SCROLL_HEIGHT = 69 + +# Constants for Radio Buttons +RADIOBUTTON_X_WAV = 457 +RADIOBUTTON_X_FLAC = 300 +RADIOBUTTON_X_MP3 = 143 +RADIOBUTTON_Y = -5 +RADIOBUTTON_WIDTH = 0 +RADIOBUTTON_HEIGHT = 6 +MAIN_ROW_Y_1 = -15 +MAIN_ROW_Y_2 = -17 +MAIN_ROW_X_1 = -4 +MAIN_ROW_X_2 = 21 +MAIN_ROW_2_Y_1 = -15 +MAIN_ROW_2_Y_2 = -17 +MAIN_ROW_2_X_1 = -28 +MAIN_ROW_2_X_2 = 1 +LOW_MENU_Y_1 = 18 +LOW_MENU_Y_2 = 16 +SUB_ENT_ROW_X = -2 +MAIN_ROW_WIDTH = -53 +MAIN_ROW_ALIGN_WIDTH = -86 +CHECK_BOX_Y = 0 +CHECK_BOX_X = 20 +CHECK_BOX_WIDTH = -49 +CHECK_BOX_HEIGHT = 2 +LEFT_ROW_WIDTH = -10 +LABEL_HEIGHT = -5 +OPTION_HEIGHT = 8 +LABEL_X_OFFSET = -28 +LABEL_WIDTH = -38 +ENTRY_WIDTH = 179.5 +ENTRY_OPEN_BUTT_WIDTH = -185 +ENTRY_OPEN_BUTT_X_OFF = 405 +UPDATE_LABEL_WIDTH = 35 if OPERATING_SYSTEM == 'Linux' else 32 + +HEIGHT_CONSOLE_FRAME_1080P = COMMAND_HEIGHT + HO_S +LOW_MENU_Y = LOW_MENU_Y_1, LOW_MENU_Y_2 +MAIN_ROW_Y = MAIN_ROW_Y_1, MAIN_ROW_Y_2 +MAIN_ROW_X = MAIN_ROW_X_1, MAIN_ROW_X_2 +MAIN_ROW_2_Y = MAIN_ROW_2_Y_1, MAIN_ROW_2_Y_2 +MAIN_ROW_2_X = MAIN_ROW_2_X_1, MAIN_ROW_2_X_2 + +LABEL_Y = MAIN_ROW_Y[0] +ENTRY_Y = MAIN_ROW_Y[1] + +BUTTON_Y_1080P = IMAGE_HEIGHT + FILEPATHS_HEIGHT + OPTIONS_HEIGHT - 8 + PADDING*2 +HEIGHT_PROGRESSBAR_1080P = PROGRESS_HEIGHT +Y_OFFSET_PROGRESS_BAR_1080P = IMAGE_HEIGHT + FILEPATHS_HEIGHT + OPTIONS_HEIGHT + CONVERSIONBUTTON_HEIGHT + COMMAND_HEIGHT + PADDING*4 +Y_OFFSET_CONSOLE_FRAME_1080P = IMAGE_HEIGHT + FILEPATHS_HEIGHT + OPTIONS_HEIGHT + CONVERSIONBUTTON_HEIGHT + PADDING + X_PROGRESSBAR_1080P + +LABEL_Y_OFFSET = MAIN_ROW_Y[0] +ENTRY_X_OFFSET = SUB_ENT_ROW_X +ENTRY_Y_OFFSET = MAIN_ROW_Y[1] +OPTION_WIDTH = MAIN_ROW_ALIGN_WIDTH diff --git a/gui_data/change_log.txt b/gui_data/change_log.txt new file mode 100644 index 0000000000000000000000000000000000000000..be400542bc4e0d0f410844f3b55a856567c9d42a --- /dev/null +++ b/gui_data/change_log.txt @@ -0,0 +1,93 @@ +Most Recent Changes: + +~ Fixed Download Center model list issue. +~ Fixed audio clip in ensemble mode. +~ Fixed output model name issue in ensemble mode. +~ Added "Batch Mode" for MDX-Net to increase performance. + ~ Batch Mode is more memory efficient. + ~ Batch Mode produces the best output, regardless of batch size. +~ Added Batch Mode for VR Architecture. +~ Added Mixer Mode for Demucs. + ~ This option may improve separation for some 4-stem models. + +Fixes & Changes going from UVR v5.4 to v5.5: + +~ The progress bar is now fully synced up with every process in the application. +~ Fixed low-resolution icon. +~ Added the ability to download models manually if the application can't connect + to the internet. +~ Drag-n-drop is functional across all os platforms. +~ Resolved mp3 tag issue in MacOS version. + +Performance: + +~ Model load times are faster. +~ Importing/exporting audio files is faster. + +MacOS M1 Notes: + +~ The GPU Conversion checkbox will enable MPS for GPU acceleration. However, + only the VR Architecture models are currently compatible with it. + +New Options: + +~ Select Saved Settings option - Allows the user to save the current settings + of the whole application. You can also load a saved setting or reset them to + the default. +~ Right-click menu - Allows for quick access to important options. +~ Help Hints option - When enabled, users can hover over options to see a pop-up + text that describes that option. The right-clicking option also allows copying + the "Help Hint" text. +~ Secondary Model Mode - This option is an expanded version of the "Demucs Model" + option that was only available to MDX-Net. Except now, this option is available + in all three AI Networks and for any stem. Any model can now be Secondary, and + the user can choose the amount of influence it has on the final result. +~ Robust caching for ensemble mode, allowing for much faster processing times. +~ Clicking the "Input" field will pop up a window allowing the user to review the selected audio inputs. Within this menu, users can: + ~ Remove inputs. + ~ Verify inputs. + ~ Create samples of chosen inputs. +~ "Sample Mode" option - Allows the user to process only part of a track to sample + settings or a model without running a full conversion. + ~ The number in the parentheses is the current number of seconds the generated + sample will be. + ~ You can choose the number of seconds to extract from the track in the "Additional + Settings" menu. + +VR Architecture: + +~ Ability to toggle "High-End Processing." +~ Ability to change the post-processing threshold. +~ Support for the latest VR architecture + ~ Crop Size and Batch Size are specifically for models using the latest + architecture only. + +MDX-NET: + +~ Denoise Output option results in cleaner results, + but the processing time will be longer. This option has replaced Noise Reduction. +~ Spectral Inversion option uses spectral inversion techniques for a + cleaner secondary stem result. This option may slow down the audio export process. +~ Secondary stem now has the same frequency cut-off as the main stem. + +Demucs: + +~ Demucs v4 models are now supported, including the 6-stem model. +~ Ability to combine remaining stems instead of inverting selected stem with the + mixture only when a user does not select "All Stems". +~ A Pre-process model that allows the user to run an inference through a robust + vocal or instrumental model and separate the remaining stems from its generated + instrumental mix. This option can significantly reduce vocal bleed in other + Demucs-generated non-vocal stems. + ~ The Pre-process model is intended for Demucs separations for all stems except + vocals and instrumentals. + +Ensemble Mode: + +~ Ensemble Mode has been extended to include the following: + ~ Averaging is a new algorithm that averages the final results. + ~ Unlimited models in the ensemble. + ~ Ability to save different ensembles. + ~ Ability to ensemble outputs for all individual stem types. + ~ Ability to choose unique ensemble algorithms. + ~ Ability to ensemble all 4 Demucs stems at once. \ No newline at end of file diff --git a/gui_data/complete_chime.wav b/gui_data/complete_chime.wav new file mode 100644 index 0000000000000000000000000000000000000000..c51ae89cbc82f110d890cd945139fd9dd9dd4640 Binary files /dev/null and b/gui_data/complete_chime.wav differ diff --git a/gui_data/constants.py b/gui_data/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..22c59186e622e4208708e0f9956cf991aafce5e1 --- /dev/null +++ b/gui_data/constants.py @@ -0,0 +1,1584 @@ +import platform + +#Platform Details +OPERATING_SYSTEM = platform.system() +SYSTEM_ARCH = platform.platform() +SYSTEM_PROC = platform.processor() +ARM = 'arm' + +is_macos = False + +CPU = 'cpu' +CUDA_DEVICE = 'cuda' +DIRECTML_DEVICE = "privateuseone" + +#MAIN_FONT_NAME = "Century Gothic" +OPT_SEPARATOR_SAVE = '─'*25 +BG_COLOR = '#0e0e0f' +FG_COLOR = '#13849f' + +#Model Types +VR_ARCH_TYPE = 'VR Arc' +MDX_ARCH_TYPE = 'MDX-Net' +DEMUCS_ARCH_TYPE = 'Demucs' +VR_ARCH_PM = 'VR Architecture' +ENSEMBLE_MODE = 'Ensemble Mode' +ENSEMBLE_STEM_CHECK = 'Ensemble Stem' +SECONDARY_MODEL = 'Secondary Model' +DEMUCS_6_STEM_MODEL = 'htdemucs_6s' +DEFAULT = "Default" +ALIGNMENT_TOOL = 'Alignment Tool Options' + +SINGLE_FILE = 'SINGLE_FILE' +MULTIPLE_FILE = 'MULTI_FILE' +MAIN_MULTIPLE_FILE = 'MAIN_MULTI_FILE' +CHOOSE_EXPORT_FIR = 'CHOOSE_EXPORT_FIR' + +DUAL = "dual" +FOUR_STEM = "fourstem" +ANY_STEM = "Any Stem" + +DEMUCS_V3_ARCH_TYPE = 'Demucs v3' +DEMUCS_V4_ARCH_TYPE = 'Demucs v4' +DEMUCS_NEWER_ARCH_TYPES = [DEMUCS_V3_ARCH_TYPE, DEMUCS_V4_ARCH_TYPE] + +DEMUCS_V1 = 'v1' +DEMUCS_V2 = 'v2' +DEMUCS_V3 = 'v3' +DEMUCS_V4 = 'v4' + +DEMUCS_V1_TAG = 'v1 | ' +DEMUCS_V2_TAG = 'v2 | ' +DEMUCS_V3_TAG = 'v3 | ' +DEMUCS_V4_TAG = 'v4 | ' +DEMUCS_NEWER_TAGS = [DEMUCS_V3_TAG, DEMUCS_V4_TAG] + +DEMUCS_VERSION_MAPPER = { + DEMUCS_V1:DEMUCS_V1_TAG, + DEMUCS_V2:DEMUCS_V2_TAG, + DEMUCS_V3:DEMUCS_V3_TAG, + DEMUCS_V4:DEMUCS_V4_TAG} + +#Download Center +DOWNLOAD_FAILED = 'Download Failed' +DOWNLOAD_STOPPED = 'Download Stopped' +DOWNLOAD_COMPLETE = 'Download Complete' +DOWNLOAD_UPDATE_COMPLETE = 'Update Download Complete' +SETTINGS_MENU_EXIT = 'exit' +NO_CONNECTION = 'No Internet Connection' +VIP_SELECTION = 'VIP:' +DEVELOPER_SELECTION = 'VIP:' +NO_NEW_MODELS = 'All Available Models Downloaded' +ENSEMBLE_PARTITION = ': ' +NO_MODEL = 'No Model Selected' +CHOOSE_MODEL = 'Choose Model' +SINGLE_DOWNLOAD = 'Downloading Item 1/1...' +DOWNLOADING_ITEM = 'Downloading Item' +FILE_EXISTS = 'File already exists!' +DOWNLOADING_UPDATE = 'Downloading Update...' +DOWNLOAD_MORE = 'Download More Models' +IS_KARAOKEE = "is_karaoke" +IS_BV_MODEL = "is_bv_model" +IS_BV_MODEL_REBAL = "is_bv_model_rebalanced" +INPUT_STEM_NAME = 'Input Stem Name' + +#Menu Options + +AUTO_SELECT = 'Auto' + +#LINKS +DOWNLOAD_CHECKS = "https://raw.githubusercontent.com/TRvlvr/application_data/main/filelists/download_checks.json" +MDX_MODEL_DATA_LINK = "https://raw.githubusercontent.com/TRvlvr/application_data/main/mdx_model_data/model_data_new.json" +VR_MODEL_DATA_LINK = "https://raw.githubusercontent.com/TRvlvr/application_data/main/vr_model_data/model_data_new.json" +MDX23_CONFIG_CHECKS = "https://raw.githubusercontent.com/TRvlvr/application_data/main/mdx_model_data/mdx_c_configs/" +BULLETIN_CHECK = "https://raw.githubusercontent.com/TRvlvr/application_data/main/bulletin.txt" + +DEMUCS_MODEL_NAME_DATA_LINK = "https://raw.githubusercontent.com/TRvlvr/application_data/main/demucs_model_data/model_name_mapper.json" +MDX_MODEL_NAME_DATA_LINK = "https://raw.githubusercontent.com/TRvlvr/application_data/main/mdx_model_data/model_name_mapper.json" + +DONATE_LINK_BMAC = "https://www.buymeacoffee.com/uvr5" +DONATE_LINK_PATREON = "https://www.patreon.com/uvr" + +#DOWNLOAD REPOS +NORMAL_REPO = "https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/" +UPDATE_REPO = "https://github.com/TRvlvr/model_repo/releases/download/uvr_update_patches/" + +UPDATE_MAC_ARM_REPO = "https://github.com/Anjok07/ultimatevocalremovergui/releases/download/v5.6/Ultimate_Vocal_Remover_v5_6_MacOS_arm64.dmg" +UPDATE_MAC_X86_64_REPO = "https://github.com/Anjok07/ultimatevocalremovergui/releases/download/v5.6/Ultimate_Vocal_Remover_v5_6_MacOS_x86_64.dmg" +UPDATE_LINUX_REPO = "https://github.com/Anjok07/ultimatevocalremovergui#linux-installation" + +ISSUE_LINK = 'https://github.com/Anjok07/ultimatevocalremovergui/issues/new' +VIP_REPO = b'\xf3\xc2W\x19\x1foI)\xc2\xa9\xcc\xb67(Z\xf5',\ + b'gAAAAABjQAIQ-NpNMMxMedpKHHb7ze_nqB05hw0YhbOy3pFzuzDrfqumn8_qvraxEoUpZC5ZXC0gGvfDxFMqyq9VWbYKlA67SUFI_wZB6QoVyGI581vs7kaGfUqlXHIdDS6tQ_U-BfjbEAK9EU_74-R2zXjz8Xzekw==' +NO_CODE = 'incorrect_code' + +#Extensions +ONNX = '.onnx' +CKPT = '.ckpt' +CKPT_C = '.ckptc' +YAML = '.yaml' +PTH = '.pth' +TH_EXT = '.th' +JSON = '.json' + +#GUI Buttons +START_PROCESSING = 'Start Processing' +WAIT_PROCESSING = 'Please wait...' +STOP_PROCESSING = 'Halting process, please wait...' +LOADING_MODELS = 'Loading models...' + +#---Messages and Logs---- + +MISSING_MODEL = 'missing' +MODEL_PRESENT = 'present' + +ALL_STEMS = 'All Stems' +VOCAL_STEM = 'Vocals' +INST_STEM = 'Instrumental' +OTHER_STEM = 'Other' +BASS_STEM = 'Bass' +DRUM_STEM = 'Drums' +GUITAR_STEM = 'Guitar' +PIANO_STEM = 'Piano' +SYNTH_STEM = 'Synthesizer' +STRINGS_STEM = 'Strings' +WOODWINDS_STEM = 'Woodwinds' +BRASS_STEM = 'Brass' +WIND_INST_STEM = 'Wind Inst' +NO_OTHER_STEM = 'No Other' +NO_BASS_STEM = 'No Bass' +NO_DRUM_STEM = 'No Drums' +NO_GUITAR_STEM = 'No Guitar' +NO_PIANO_STEM = 'No Piano' +NO_SYNTH_STEM = 'No Synthesizer' +NO_STRINGS_STEM = 'No Strings' +NO_WOODWINDS_STEM = 'No Woodwinds' +NO_WIND_INST_STEM = 'No Wind Inst' +NO_BRASS_STEM = 'No Brass' +PRIMARY_STEM = 'Primary Stem' +SECONDARY_STEM = 'Secondary Stem' +LEAD_VOCAL_STEM = 'lead_only' +BV_VOCAL_STEM = 'backing_only' +LEAD_VOCAL_STEM_I = 'with_lead_vocals' +BV_VOCAL_STEM_I = 'with_backing_vocals' +LEAD_VOCAL_STEM_LABEL = 'Lead Vocals' +BV_VOCAL_STEM_LABEL = 'Backing Vocals' + +VOCAL_STEM_ONLY = f'{VOCAL_STEM} Only' +INST_STEM_ONLY = f'{INST_STEM} Only' +PRIMARY_STEM_ONLY = f'{PRIMARY_STEM} Only' + +IS_SAVE_INST_ONLY = f'save_only_inst' +IS_SAVE_VOC_ONLY = f'save_only_voc' + +DEVERB_MAPPER = {'Main Vocals Only':VOCAL_STEM, + 'Lead Vocals Only':LEAD_VOCAL_STEM_LABEL, + 'Backing Vocals Only':BV_VOCAL_STEM_LABEL, + 'All Vocal Types':'ALL'} + +BALANCE_VALUES = [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9] + +#Other Constants +DEMUCS_2_SOURCE = ["instrumental", "vocals"] +DEMUCS_4_SOURCE = ["drums", "bass", "other", "vocals"] + +DEMUCS_2_SOURCE_MAPPER = { + INST_STEM: 0, + VOCAL_STEM: 1} + +DEMUCS_4_SOURCE_MAPPER = { + BASS_STEM: 0, + DRUM_STEM: 1, + OTHER_STEM: 2, + VOCAL_STEM: 3} + +DEMUCS_6_SOURCE_MAPPER = { + BASS_STEM:0, + DRUM_STEM:1, + OTHER_STEM:2, + VOCAL_STEM:3, + GUITAR_STEM:4, + PIANO_STEM:5} + +DEMUCS_4_SOURCE_LIST = [BASS_STEM, DRUM_STEM, OTHER_STEM, VOCAL_STEM] +DEMUCS_6_SOURCE_LIST = [BASS_STEM, DRUM_STEM, OTHER_STEM, VOCAL_STEM, GUITAR_STEM, PIANO_STEM] + +DEMUCS_UVR_MODEL = 'UVR_Model' + +CHOOSE_STEM_PAIR = 'Choose Stem Pair' + +STEM_SET_MENU = (VOCAL_STEM, + INST_STEM, + OTHER_STEM, + BASS_STEM, + DRUM_STEM, + GUITAR_STEM, + PIANO_STEM, + SYNTH_STEM, + STRINGS_STEM, + WOODWINDS_STEM, + BRASS_STEM, + WIND_INST_STEM) + +STEM_SET_MENU_ONLY = list(STEM_SET_MENU) + [OPT_SEPARATOR_SAVE, INPUT_STEM_NAME] + +STEM_SET_MENU_2 = ( + OTHER_STEM, + BASS_STEM, + DRUM_STEM, + GUITAR_STEM, + PIANO_STEM, + SYNTH_STEM, + STRINGS_STEM, + WOODWINDS_STEM, + BRASS_STEM, + WIND_INST_STEM, + "Noise", + "Reverb") + +STEM_PAIR_MAPPER = { + VOCAL_STEM: INST_STEM, + INST_STEM: VOCAL_STEM, + LEAD_VOCAL_STEM: BV_VOCAL_STEM, + BV_VOCAL_STEM: LEAD_VOCAL_STEM, + PRIMARY_STEM: SECONDARY_STEM} + +STEM_PAIR_MAPPER_FULL = { + VOCAL_STEM: INST_STEM, + INST_STEM: VOCAL_STEM, + OTHER_STEM: NO_OTHER_STEM, + BASS_STEM: NO_BASS_STEM, + DRUM_STEM: NO_DRUM_STEM, + GUITAR_STEM: NO_GUITAR_STEM, + PIANO_STEM: NO_PIANO_STEM, + SYNTH_STEM: NO_SYNTH_STEM, + STRINGS_STEM: NO_STRINGS_STEM, + WOODWINDS_STEM: NO_WOODWINDS_STEM, + BRASS_STEM: NO_BRASS_STEM, + WIND_INST_STEM: NO_WIND_INST_STEM, + NO_OTHER_STEM: OTHER_STEM, + NO_BASS_STEM: BASS_STEM, + NO_DRUM_STEM: DRUM_STEM, + NO_GUITAR_STEM: GUITAR_STEM, + NO_PIANO_STEM: PIANO_STEM, + NO_SYNTH_STEM: SYNTH_STEM, + NO_STRINGS_STEM: STRINGS_STEM, + NO_WOODWINDS_STEM: WOODWINDS_STEM, + NO_BRASS_STEM: BRASS_STEM, + NO_WIND_INST_STEM: WIND_INST_STEM, + PRIMARY_STEM: SECONDARY_STEM} + +NO_STEM = "No " + +NON_ACCOM_STEMS = ( + VOCAL_STEM, + OTHER_STEM, + BASS_STEM, + DRUM_STEM, + GUITAR_STEM, + PIANO_STEM, + SYNTH_STEM, + STRINGS_STEM, + WOODWINDS_STEM, + BRASS_STEM, + WIND_INST_STEM) + +MDX_NET_FREQ_CUT = [VOCAL_STEM, INST_STEM] + +DEMUCS_4_STEM_OPTIONS = (ALL_STEMS, VOCAL_STEM, OTHER_STEM, BASS_STEM, DRUM_STEM) +DEMUCS_6_STEM_OPTIONS = (ALL_STEMS, VOCAL_STEM, OTHER_STEM, BASS_STEM, DRUM_STEM, GUITAR_STEM, PIANO_STEM) +DEMUCS_2_STEM_OPTIONS = (VOCAL_STEM, INST_STEM) +DEMUCS_4_STEM_CHECK = (OTHER_STEM, BASS_STEM, DRUM_STEM) + +#Menu Dropdowns + +VOCAL_PAIR = f'{VOCAL_STEM}/{INST_STEM}' +INST_PAIR = f'{INST_STEM}/{VOCAL_STEM}' +OTHER_PAIR = f'{OTHER_STEM}/{NO_OTHER_STEM}' +DRUM_PAIR = f'{DRUM_STEM}/{NO_DRUM_STEM}' +BASS_PAIR = f'{BASS_STEM}/{NO_BASS_STEM}' +FOUR_STEM_ENSEMBLE = '4 Stem Ensemble' +MULTI_STEM_ENSEMBLE = 'Multi-stem Ensemble' + +ENSEMBLE_MAIN_STEM = (CHOOSE_STEM_PAIR, VOCAL_PAIR, OTHER_PAIR, DRUM_PAIR, BASS_PAIR, FOUR_STEM_ENSEMBLE, MULTI_STEM_ENSEMBLE) + +MIN_SPEC = 'Min Spec' +MAX_SPEC = 'Max Spec' +AUDIO_AVERAGE = 'Average' + +MAX_MIN = f'{MAX_SPEC}/{MIN_SPEC}' +MAX_MAX = f'{MAX_SPEC}/{MAX_SPEC}' +MAX_AVE = f'{MAX_SPEC}/{AUDIO_AVERAGE}' +MIN_MAX = f'{MIN_SPEC}/{MAX_SPEC}' +MIN_MIX = f'{MIN_SPEC}/{MIN_SPEC}' +MIN_AVE = f'{MIN_SPEC}/{AUDIO_AVERAGE}' +AVE_MAX = f'{AUDIO_AVERAGE}/{MAX_SPEC}' +AVE_MIN = f'{AUDIO_AVERAGE}/{MIN_SPEC}' +AVE_AVE = f'{AUDIO_AVERAGE}/{AUDIO_AVERAGE}' + +ENSEMBLE_TYPE = (MAX_MIN, MAX_MAX, MAX_AVE, MIN_MAX, MIN_MIX, MIN_AVE, AVE_MAX, AVE_MIN, AVE_AVE) +ENSEMBLE_TYPE_4_STEM = (MAX_SPEC, MIN_SPEC, AUDIO_AVERAGE) + +BATCH_MODE = 'Batch Mode' +BETA_VERSION = 'BETA' +DEF_OPT = 'Default' +USER_INPUT = "User Input" +OPT_SEPARATOR = '─'*65 + +CHUNKS = (AUTO_SELECT, '1', '5', '10', '15', '20', + '25', '30', '35', '40', '45', '50', + '55', '60', '65', '70', '75', '80', + '85', '90', '95', 'Full') + +BATCH_SIZE = (DEF_OPT, '2', '3', '4', '5', + '6', '7', '8', '9', '10') + +VOL_COMPENSATION = (AUTO_SELECT, '1.035', '1.08') + +MARGIN_SIZE = ('44100', '22050', '11025') + +AUDIO_TOOLS = 'Audio Tools' + +MANUAL_ENSEMBLE = 'Manual Ensemble' +TIME_STRETCH = 'Time Stretch' +CHANGE_PITCH = 'Change Pitch' +ALIGN_INPUTS = 'Align Inputs' +MATCH_INPUTS = 'Matchering' +COMBINE_INPUTS = 'Combine Inputs' + +if OPERATING_SYSTEM == 'Windows' or OPERATING_SYSTEM == 'Darwin': + AUDIO_TOOL_OPTIONS = (MANUAL_ENSEMBLE, TIME_STRETCH, CHANGE_PITCH, ALIGN_INPUTS, MATCH_INPUTS) +else: + AUDIO_TOOL_OPTIONS = (MANUAL_ENSEMBLE, ALIGN_INPUTS, MATCH_INPUTS) + +MANUAL_ENSEMBLE_OPTIONS = (MIN_SPEC, MAX_SPEC, AUDIO_AVERAGE, COMBINE_INPUTS) + +PROCESS_METHODS = (VR_ARCH_PM, MDX_ARCH_TYPE, DEMUCS_ARCH_TYPE, ENSEMBLE_MODE, AUDIO_TOOLS) + +DEMUCS_SEGMENTS = (DEF_OPT, '1', '5', '10', '15', '20', + '25', '30', '35', '40', '45', '50', + '55', '60', '65', '70', '75', '80', + '85', '90', '95', '100') + +DEMUCS_SHIFTS = (0, 1, 2, 3, 4, 5, + 6, 7, 8, 9, 10, 11, + 12, 13, 14, 15, 16, 17, + 18, 19, 20) +SEMI_DEF = ['0'] +SEMITONE_SEL = (-12,-11,-10,-9,-8,-7,-6,-5,-4,-3,-2,-1,0,1,2,3,4,5,6,7,8,9,10,11,12) + +NOUT_SEL = (8, 16, 32, 48, 64) +NOUT_LSTM_SEL = (64, 128) + +DEMUCS_OVERLAP = (0.25, 0.50, 0.75, 0.99) +MDX_OVERLAP = (DEF_OPT, 0.25, 0.50, 0.75, 0.99) +MDX23_OVERLAP = range(2, 51) +VR_AGGRESSION = range(0, 51) + +TIME_WINDOW_MAPPER = { + "None": None, + "1": [0.0625], + "2": [0.125], + "3": [0.25], + "4": [0.5], + "5": [0.75], + "6": [1], + "7": [2], + "Shifts: Low": [0.0625, 0.5], + "Shifts: Medium": [0.0625, 0.125, 0.5], + "Shifts: High": [0.0625, 0.125, 0.25, 0.5] + #"Shifts: Very High": [0.0625, 0.125, 0.25, 0.5, 0.75, 1], +} + +INTRO_MAPPER = { + "Default": [10], + "1": [8], + "2": [6], + "3": [4], + "4": [2], + "Shifts: Low": [1, 10], + "Shifts: Medium": [1, 10, 8], + "Shifts: High": [1, 10, 8, 6, 4] + } + +VOLUME_MAPPER = { + "None": (0, [0]), + "Low": (-4, range(0, 8)), + "Medium": (-6, range(0, 12)), + "High": (-6, [x * 0.5 for x in range(0, 25)]), + "Very High": (-10, [x * 0.5 for x in range(0, 41)])} + #"Max": (-10, [x * 0.3 for x in range(0, int(20 / 0.3) + 1)])} + +PHASE_MAPPER = { + "None": [0], + "Shifts Low": [0, 180], + "Shifts Medium": [0], + "Shifts High": [0], + "Shifts Very High": [0],} + +NONE_P = "None" +VLOW_P = "Shifts: Very Low" +LOW_P = "Shifts: Low" +MED_P = "Shifts: Medium" +HIGH_P = "Shifts: High" +VHIGH_P = "Shifts: Very High" +VMAX_P = "Shifts: Maximum" + +PHASE_SHIFTS_OPT = { + NONE_P:190, + VLOW_P:180, + LOW_P:90, + MED_P:45, + HIGH_P:20, + VHIGH_P:10, + VMAX_P:1,} + +VR_WINDOW = ('320', '512','1024') +VR_CROP = ('256', '512', '1024') +POST_PROCESSES_THREASHOLD_VALUES = ('0.1', '0.2', '0.3') + +MDX_POP_PRO = ('MDX-NET_Noise_Profile_14_kHz', 'MDX-NET_Noise_Profile_17_kHz', 'MDX-NET_Noise_Profile_Full_Band') +MDX_POP_STEMS = ('Vocals', 'Instrumental', 'Other', 'Drums', 'Bass') +MDX_POP_NFFT = ('4096', '5120', '6144', '7680', '8192', '16384') +MDX_POP_DIMF = ('2048', '3072', '4096') +DENOISE_NONE, DENOISE_S, DENOISE_M = 'None', 'Standard', 'Denoise Model' +MDX_DENOISE_OPTION = [DENOISE_NONE, DENOISE_S, DENOISE_M] +MDX_SEGMENTS = list(range(32, 4000+1, 32)) + +SAVE_ENSEMBLE = 'Save Ensemble' +CLEAR_ENSEMBLE = 'Clear Selection(s)' +MENU_SEPARATOR = 35*'•' +CHOOSE_ENSEMBLE_OPTION = 'Choose Option' +ALL_TYPES = 'ALL' +INVALID_ENTRY = 'Invalid Input, Please Try Again' +ENSEMBLE_INPUT_RULE = '1. Only letters, numbers, spaces, and dashes allowed.\n2. No dashes or spaces at the start or end of input.' +STEM_INPUT_RULE = '1. Only words with no spaces are allowed.\n2. No spaces, numbers, or special characters.' + +ENSEMBLE_OPTIONS = [OPT_SEPARATOR_SAVE, SAVE_ENSEMBLE, CLEAR_ENSEMBLE] +ENSEMBLE_CHECK = 'ensemble check' +KARAOKEE_CHECK = 'kara check' + +AUTO_PHASE = "Automatic" +POSITIVE_PHASE = "Positive Phase" +NEGATIVE_PHASE = "Negative Phase" +OFF_PHASE = "Native Phase" + +ALIGN_PHASE_OPTIONS = [AUTO_PHASE, POSITIVE_PHASE, NEGATIVE_PHASE, OFF_PHASE] + +SELECT_SAVED_ENSEMBLE = 'Select Saved Ensemble' +SELECT_SAVED_SETTING = 'Select Saved Setting' +ENSEMBLE_OPTION = "Ensemble Customization Options" +MDX_OPTION = "Advanced MDX-Net Options" +DEMUCS_OPTION = "Advanced Demucs Options" +VR_OPTION = "Advanced VR Options" +HELP_OPTION = "Open Information Guide" +ERROR_OPTION = "Open Error Log" +VERIFY_BEGIN = 'Verifying file ' +SAMPLE_BEGIN = 'Creating Sample ' +MODEL_MISSING_CHECK = 'Model Missing:' +OPTION_LIST = [VR_OPTION, MDX_OPTION, DEMUCS_OPTION, ENSEMBLE_OPTION, ALIGNMENT_TOOL, HELP_OPTION, ERROR_OPTION] + +#Menu Strings +VR_MENU ='VR Menu' +DEMUCS_MENU ='Demucs Menu' +MDX_MENU ='MDX-Net Menu' +ENSEMBLE_MENU ='Ensemble Menu' +HELP_MENU ='Help Menu' +ERROR_MENU ='Error Log' +INPUTS_MENU ='Inputs Menu' +ALIGN_MENU ='Align Menu' + +# Audio Player +PLAYING_SONG = ": Playing" +PAUSE_SONG = ": Paused" +STOP_SONG = ": Stopped" + +SELECTED_VER = 'Selected' +DETECTED_VER = 'Detected' + +SAMPLE_MODE_CHECKBOX = lambda v:f'Sample Mode ({v}s)' +REMOVED_FILES = lambda r, e:f'Audio Input Verification Report:\n\nRemoved Files:\n\n{r}\n\nError Details:\n\n{e}' +ADVANCED_SETTINGS = (ENSEMBLE_OPTION, MDX_OPTION, DEMUCS_OPTION, VR_OPTION, HELP_OPTION, ERROR_OPTION) + +WAV = 'WAV' +FLAC = 'FLAC' +MP3 = 'MP3' + +MP3_BIT_RATES = ('96k', '128k', '160k', '224k', '256k', '320k') +WAV_TYPE = ('PCM_U8', 'PCM_16', 'PCM_24', 'PCM_32', '32-bit Float', '64-bit Float') +GPU_DEVICE_NUM_OPTS = (DEFAULT, '0', '1', '2', '3', '4', '5', '6', '7', '8') + +SELECT_SAVED_SET = 'Choose Option' +SAVE_SETTINGS = 'Save Current Settings' +RESET_TO_DEFAULT = 'Reset to Default' +RESET_FULL_TO_DEFAULT = 'Reset to Default' +RESET_PM_TO_DEFAULT = 'Reset All Application Settings to Default' + +SAVE_SET_OPTIONS = [OPT_SEPARATOR_SAVE, SAVE_SETTINGS, RESET_TO_DEFAULT] + +TIME_PITCH = ('1.0', '2.0', '3.0', '4.0') +TIME_TEXT = '_time_stretched' +PITCH_TEXT = '_pitch_shifted' + +#RegEx Input Validation +REG_PITCH = r'^[-+]?(1[0]|[0-9]([.][0-9]*)?)$' +REG_TIME = r'^[+]?(1[0]|[0-9]([.][0-9]*)?)$' +REG_COMPENSATION = r'\b^(1[0]|[0-9]([.][0-9]*)?|Auto|None)$\b' +REG_THES_POSTPORCESS = r'\b^([0]([.][0-9]{0,6})?)$\b' +REG_CHUNKS = r'\b^(200|1[0-9][0-9]|[1-9][0-9]?|Auto|Full)$\b' +REG_CHUNKS_DEMUCS = r'\b^(200|1[0-9][0-9]|[1-9][0-9]?|Auto|Full)$\b' +REG_MARGIN = r'\b^[0-9]*$\b' +REG_SEGMENTS = r'\b^(200|1[0-9][0-9]|[1-9][0-9]?|Default)$\b' +REG_SAVE_INPUT = r'\b^([a-zA-Z0-9 -]{0,25})$\b' +REG_INPUT_STEM_NAME = r'^(Wind Inst|[a-zA-Z]{1,25})$' +REG_SEMITONES = r'^-?(20\.00|[01]?\d(\.\d{1,2})?|20)$' +REG_AGGRESSION = r'^[-+]?[0-9]\d*?$' +REG_WINDOW = r'\b^[0-9]{0,4}$\b' +REG_SHIFTS = r'\b^[0-9]*$\b' +REG_BATCHES = r'\b^([0-9]*?|Default)$\b' +REG_OVERLAP = r'\b^([0]([.][0-9]{0,6})?|Default)$\b'#r"(Default|[0-9]+(\.[0-9]+)?)"# +REG_OVERLAP23 = r'\b^([1][0-9]|[2-9][0-9]*|Default)$\b'#r'\b^([2-9][0-9]*?|Default)$\b' +REG_MDX_SEG = r'\b(?:' + '|'.join([str(num) for num in range(32, 1000001, 32)]) + r')\b' +REG_ALIGN = r'^[-+]?[0-9]\d*?$' +REG_VOL_COMP = r'^\d+\.\d{1,9}$' + +# Sub Menu +VR_ARCH_SETTING_LOAD = 'Load for VR Arch' +MDX_SETTING_LOAD = 'Load for MDX-Net' +DEMUCS_SETTING_LOAD = 'Load for Demucs' +ALL_ARCH_SETTING_LOAD = 'Load for Full Application' + +# Mappers + +DEFAULT_DATA = { + 'chosen_process_method': MDX_ARCH_TYPE, + 'vr_model': CHOOSE_MODEL, + 'aggression_setting': 5, + 'window_size': 512, + 'mdx_segment_size': 256, + 'batch_size': DEF_OPT, + 'crop_size': 256, + 'is_tta': False, + 'is_output_image': False, + 'is_post_process': False, + 'is_high_end_process': False, + 'post_process_threshold': 0.2, + 'vr_voc_inst_secondary_model': NO_MODEL, + 'vr_other_secondary_model': NO_MODEL, + 'vr_bass_secondary_model': NO_MODEL, + 'vr_drums_secondary_model': NO_MODEL, + 'vr_is_secondary_model_activate': False, + 'vr_voc_inst_secondary_model_scale': 0.9, + 'vr_other_secondary_model_scale': 0.7, + 'vr_bass_secondary_model_scale': 0.5, + 'vr_drums_secondary_model_scale': 0.5, + 'demucs_model': CHOOSE_MODEL, + 'segment': DEMUCS_SEGMENTS[0], + 'overlap': DEMUCS_OVERLAP[0], + 'overlap_mdx': MDX_OVERLAP[0], + 'overlap_mdx23': '8', + 'shifts': 2, + 'chunks_demucs': CHUNKS[0], + 'margin_demucs': 44100, + 'is_chunk_demucs': False, + 'is_chunk_mdxnet': False, + 'is_primary_stem_only_Demucs': False, + 'is_secondary_stem_only_Demucs': False, + 'is_split_mode': True, + 'is_demucs_combine_stems': True,# + 'is_mdx23_combine_stems': True,# + 'demucs_voc_inst_secondary_model': NO_MODEL, + 'demucs_other_secondary_model': NO_MODEL, + 'demucs_bass_secondary_model': NO_MODEL, + 'demucs_drums_secondary_model': NO_MODEL, + 'demucs_is_secondary_model_activate': False, + 'demucs_voc_inst_secondary_model_scale': 0.9, + 'demucs_other_secondary_model_scale': 0.7, + 'demucs_bass_secondary_model_scale': 0.5, + 'demucs_drums_secondary_model_scale': 0.5, + 'demucs_stems': ALL_STEMS, + 'demucs_pre_proc_model': NO_MODEL, + 'is_demucs_pre_proc_model_activate': False, + 'is_demucs_pre_proc_model_inst_mix': False, + 'mdx_net_model': CHOOSE_MODEL, + 'chunks': CHUNKS[0], + 'margin': 44100, + 'compensate': AUTO_SELECT, + 'is_denoise': False,# + 'denoise_option': 'None',# + 'phase_option': AUTO_PHASE, + 'phase_shifts': NONE_P,# + 'is_save_align': False,#, + 'is_match_frequency_pitch': True,# + 'is_match_silence': True,# + 'is_spec_match': False,# + 'is_mdx_c_seg_def': False, + 'is_invert_spec': False, # + 'is_deverb_vocals': False, # + 'deverb_vocal_opt': 'Main Vocals Only', # + 'voc_split_save_opt': 'Lead Only', # + 'is_mixer_mode': False, + 'mdx_batch_size': DEF_OPT, + 'mdx_voc_inst_secondary_model': NO_MODEL, + 'mdx_other_secondary_model': NO_MODEL, + 'mdx_bass_secondary_model': NO_MODEL, + 'mdx_drums_secondary_model': NO_MODEL, + 'mdx_is_secondary_model_activate': False, + 'mdx_voc_inst_secondary_model_scale': 0.9, + 'mdx_other_secondary_model_scale': 0.7, + 'mdx_bass_secondary_model_scale': 0.5, + 'mdx_drums_secondary_model_scale': 0.5, + 'mdx_stems': ALL_STEMS, + 'is_save_all_outputs_ensemble': True, + 'is_append_ensemble_name': False, + 'chosen_audio_tool': AUDIO_TOOL_OPTIONS[0], + 'choose_algorithm': MANUAL_ENSEMBLE_OPTIONS[0], + 'time_stretch_rate': 2.0, + 'pitch_rate': 2.0, + 'is_time_correction': True, + 'is_gpu_conversion': False, + 'is_primary_stem_only': False, + 'is_secondary_stem_only': False, + 'is_testing_audio': False,# + 'is_auto_update_model_params': True,# + 'is_add_model_name': False, + 'is_accept_any_input': False, + 'is_task_complete': False, + 'is_normalization': False, + 'is_use_opencl': False, + 'is_wav_ensemble': False, + 'is_create_model_folder': False, + 'mp3_bit_set': '320k',# + 'semitone_shift': '0',# + 'save_format': WAV, + 'wav_type_set': 'PCM_16', + 'device_set': DEFAULT, + 'user_code': '', + 'export_path': '', + 'input_paths': [], + 'lastDir': None, + 'time_window': "3", + 'intro_analysis': DEFAULT, + 'db_analysis': "Medium", + 'fileOneEntry': '', + 'fileOneEntry_Full': '', + 'fileTwoEntry': '', + 'fileTwoEntry_Full': '', + 'DualBatch_inputPaths': [], + 'model_hash_table': {}, + 'help_hints_var': True, + 'set_vocal_splitter': NO_MODEL, + 'is_set_vocal_splitter': False,# + 'is_save_inst_set_vocal_splitter': False,# + 'model_sample_mode': False, + 'model_sample_mode_duration': 30 +} + +SETTING_CHECK = ('vr_model', + 'aggression_setting', + 'window_size', + 'mdx_segment_size', + 'batch_size', + 'crop_size', + 'is_tta', + 'is_output_image', + 'is_post_process', + 'is_high_end_process', + 'post_process_threshold', + 'vr_voc_inst_secondary_model', + 'vr_other_secondary_model', + 'vr_bass_secondary_model', + 'vr_drums_secondary_model', + 'vr_is_secondary_model_activate', + 'vr_voc_inst_secondary_model_scale', + 'vr_other_secondary_model_scale', + 'vr_bass_secondary_model_scale', + 'vr_drums_secondary_model_scale', + 'demucs_model', + 'segment', + 'overlap', + 'overlap_mdx', + 'shifts', + 'chunks_demucs', + 'margin_demucs', + 'is_chunk_demucs', + 'is_primary_stem_only_Demucs', + 'is_secondary_stem_only_Demucs', + 'is_split_mode', + 'is_demucs_combine_stems',# + 'is_mdx23_combine_stems',# + 'demucs_voc_inst_secondary_model', + 'demucs_other_secondary_model', + 'demucs_bass_secondary_model', + 'demucs_drums_secondary_model', + 'demucs_is_secondary_model_activate', + 'demucs_voc_inst_secondary_model_scale', + 'demucs_other_secondary_model_scale', + 'demucs_bass_secondary_model_scale', + 'demucs_drums_secondary_model_scale', + 'demucs_stems', + 'mdx_net_model', + 'chunks', + 'margin', + 'compensate', + 'is_denoise',# + 'denoise_option',# + 'phase_option',# + 'phase_shifts',# + 'is_save_align',#, + 'is_match_silence', + 'is_spec_match',#, + 'is_match_frequency_pitch',# + 'is_mdx_c_seg_def', + 'is_invert_spec',# + 'is_deverb_vocals',# + 'deverb_vocal_opt',# + 'voc_split_save_opt',# + 'mdx_batch_size', + 'mdx_voc_inst_secondary_model', + 'mdx_other_secondary_model', + 'mdx_bass_secondary_model', + 'mdx_drums_secondary_model', + 'mdx_is_secondary_model_activate', + 'mdx_voc_inst_secondary_model_scale', + 'mdx_other_secondary_model_scale', + 'mdx_bass_secondary_model_scale', + 'mdx_drums_secondary_model_scale', + 'is_save_all_outputs_ensemble', + 'is_append_ensemble_name', + 'chosen_audio_tool', + 'choose_algorithm', + 'time_stretch_rate', + 'pitch_rate', + 'is_time_correction', + 'is_primary_stem_only', + 'is_secondary_stem_only', + 'is_testing_audio',# + 'is_auto_update_model_params',# + 'is_add_model_name', + "is_accept_any_input", + 'is_task_complete', + 'is_create_model_folder', + 'mp3_bit_set',# + 'semitone_shift',# + 'save_format', + 'wav_type_set', + 'device_set', + 'user_code', + 'is_gpu_conversion', + 'is_normalization', + 'is_use_opencl', + 'is_wav_ensemble', + 'help_hints_var', + 'set_vocal_splitter', + 'is_set_vocal_splitter',# + 'is_save_inst_set_vocal_splitter',# + 'model_sample_mode', + 'model_sample_mode_duration', + 'time_window', + 'intro_analysis', + 'db_analysis', + 'fileOneEntry', + 'fileOneEntry_Full', + 'fileTwoEntry', + 'fileTwoEntry_Full', + 'DualBatch_inputPaths' + ) + +NEW_LINES = "\n\n" +NEW_LINE = "\n" +NO_LINE = '' + +FFMPEG_EXT = (".aac", ".aiff", ".alac" ,".flac", ".FLAC", ".mov", ".mp4", ".MP4", + ".m4a", ".M4A", ".mp2", ".mp3", "MP3", ".mpc", ".mpc8", + ".mpeg", ".ogg", ".OGG", ".tta", ".wav", ".wave", ".WAV", ".WAVE", ".wma", ".webm", ".eac3", ".mkv", ".opus", ".OPUS") + +FFMPEG_MORE_EXT = (".aa", ".aac", ".ac3", ".aiff", ".alac", ".avi", ".f4v",".flac", ".flic", ".flv", + ".m4v",".mlv", ".mov", ".mp4", ".m4a", ".mp2", ".mp3", ".mp4", ".mpc", ".mpc8", + ".mpeg", ".ogg", ".tta", ".tty", ".vcd", ".wav", ".wma") +ANY_EXT = "" + +# Secondary Menu Constants + +VOCAL_PAIR_PLACEMENT = 1, 2, 3, 4 +OTHER_PAIR_PLACEMENT = 5, 6, 7, 8 +BASS_PAIR_PLACEMENT = 9, 10, 11, 12 +DRUMS_PAIR_PLACEMENT = 13, 14, 15, 16 + +# Drag n Drop String Checks + +DOUBLE_BRACKET = "} {" +RIGHT_BRACKET = "}" +LEFT_BRACKET = "{" +#DND CONSTS + +MAC_DND_CHECK = ('/Users/', + '/Applications/', + '/Library/', + '/System/') +LINUX_DND_CHECK = ('/home/', + '/usr/') +WINDOWS_DND_CHECK = ('A:', 'B:', 'C:', 'D:', 'E:', 'F:', 'G:', 'H:', 'I:', 'J:', 'K:', 'L:', 'M:', 'N:', 'O:', 'P:', 'Q:', 'R:', 'S:', 'T:', 'U:', 'V:', 'W:', 'X:', 'Y:', 'Z:') + +WOOD_INST_MODEL_HASH = '0ec76fd9e65f81d8b4fbd13af4826ed8' +WOOD_INST_PARAMS = { + "vr_model_param": "4band_v3", + "primary_stem": NO_WIND_INST_STEM + } + +READ_ONLY = 'readonly' + +FILE_1 = 'file1' +FILE_2 = 'file2' + +FILE_1_LB = 'file1_lb' +FILE_2_LB = 'file1_2b' +BATCH_MODE_DUAL = " : Batch Mode" + +CODEC_DICT = { + 'PCM_U8': {"sample_width": 1, "codec": None}, # 8-bit unsigned PCM + 'PCM_16': {"sample_width": 2, "codec": None}, # 16-bit signed PCM + 'PCM_24': {"sample_width": 3, "codec": None}, # 24-bit signed PCM + 'PCM_32': {"sample_width": 4, "codec": None}, # 32-bit signed PCM + 'FLOAT32': {"sample_width": None, "codec": "pcm_f32le"}, # 32-bit float + 'FLOAT64': {"sample_width": None, "codec": "pcm_f64le"} # 64-bit float +} + + +# Manual Downloads +VR_PLACEMENT_TEXT = 'Place models in \"models/VR_Models\" directory.' +MDX_PLACEMENT_TEXT = 'Place models in \"models/MDX_Net_Models\" directory.' +DEMUCS_PLACEMENT_TEXT = 'Place models in \"models/Demucs_Models\" directory.' +DEMUCS_V3_V4_PLACEMENT_TEXT = 'Place items in \"models/Demucs_Models/v3_v4_repo\" directory.' +MDX_23_NAME = "MDX23C Model" + +# Liscense info +if OPERATING_SYSTEM=="Darwin": + is_macos = True + LICENSE_OS_SPECIFIC_TEXT = '• This application is intended for those running macOS Catalina and above.\n' +\ + '• Application functionality for systems running macOS Mojave or lower is not guaranteed.\n' +\ + '• Application functionality for older or budget Mac systems is not guaranteed.\n\n' +elif OPERATING_SYSTEM=="Linux": + LICENSE_OS_SPECIFIC_TEXT = '• This application is intended for those running Linux Ubuntu 18.04+.\n' +\ + '• Application functionality for systems running other Linux platforms is not guaranteed.\n' +\ + '• Application functionality for older or budget systems is not guaranteed.\n\n' +elif OPERATING_SYSTEM=="Windows": + LICENSE_OS_SPECIFIC_TEXT = '• This application is intended for those running Windows 10 or higher.\n' +\ + '• Application functionality for systems running Windows 7 or lower is not guaranteed.\n' +\ + '• Application functionality for Intel Pentium & Celeron CPUs systems is not guaranteed.\n\n' + +LICENSE_TEXT = lambda a, p:f'Current Application Version: Ultimate Vocal Remover {a}\n' +\ + f'Current Patch Version: {p}\n\n' +\ + 'Copyright (c) 2022 Ultimate Vocal Remover\n\n' +\ + 'UVR is free and open-source, but MIT licensed. Please credit us if you use our\n' +\ + f'models or code for projects unrelated to UVR.\n\n{LICENSE_OS_SPECIFIC_TEXT}' +\ + 'This bundle contains the UVR interface, Python, PyTorch, and other\n' +\ + 'dependencies needed to run the application effectively.\n\n' +\ + 'Website Links: This application, System or Service(s) may contain links to\n' +\ + 'other websites and downloads, and they are solely provided to you as an\n' +\ + 'additional convenience. You understand and acknowledge that by clicking\n' +\ + 'or activating such links you are accessing a site or service outside of\n' +\ + 'this application, and that we do not screen, review, approve, or otherwise\n' +\ + 'endorse any content or information contained in these linked websites.\n' +\ + 'You acknowledge and agree that we, our affiliates and partners are not\n' +\ + 'responsible for the contents of any of these linked websites, including\n' +\ + 'the accuracy or availability of information provided by the linked websites,\n' +\ + 'and we make no representations or warranties regarding your use of\n' +\ + 'the linked websites.\n\n' +\ + 'This application is MIT Licensed\n\n' +\ + 'Permission is hereby granted, free of charge, to any person obtaining a copy\n' +\ + 'of this software and associated documentation files (the "Software"), to deal\n' +\ + 'in the Software without restriction, including without limitation the rights\n' +\ + 'to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n' +\ + 'copies of the Software, and to permit persons to whom the Software is\n' +\ + 'furnished to do so, subject to the following conditions:\n\n' +\ + 'The above copyright notice and this permission notice shall be included in all\n' +\ + 'copies or substantial portions of the Software.\n\n' +\ + 'THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n' +\ + 'IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n' +\ + 'FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n' +\ + 'AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n' +\ + 'LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n' +\ + 'OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n' +\ + 'SOFTWARE.' + +# Message Box Text +INVALID_INPUT = 'Invalid Input', 'The input is invalid.\n\nPlease verify the input still exists or is valid and try again.' +INVALID_EXPORT = 'Invalid Export Directory', 'You have selected an invalid export directory.\n\nPlease make sure the selected directory still exists.' +INVALID_ENSEMBLE = 'Not Enough Models', 'You must select 2 or more models to run ensemble.' +INVALID_MODEL = 'No Model Chosen', 'You must select an model to continue.' +MISSING_MODEL = 'Model Missing', 'The selected model is missing or not valid.' +ERROR_OCCURED = 'Error Occured', '\n\nWould you like to open the error log for more details?\n' +PROCESS_COMPLETE = '\nProcess complete\n' +PROCESS_COMPLETE_2 = 'Process complete\n' + +# GUI Text Constants +BACK_TO_MAIN_MENU = 'Back to Main Menu' + +# Help Hint Text +INTERNAL_MODEL_ATT = 'This is an internal model setting. \n\n***Avoid changing it unless you\'re certain about it!***' +STOP_HELP = 'Stops ongoing tasks.\n• A confirmation pop-up will appear before stopping.' +SETTINGS_HELP = 'Accesses the main settings and the "Download Center."' +COMMAND_TEXT_HELP = 'Shows the status and progress of ongoing tasks.' +SAVE_CURRENT_SETTINGS_HELP = 'Load or save the app\'s settings.' +PITCH_SHIFT_HELP = ('Choose the pitch for processing tracks:\n\n' + '• Whole numbers indicate semitones.\n' + '• Using higher pitches may cut the upper bandwidth, even in high-quality models.\n' + '• Upping the pitch can be better for tracks with deeper vocals.\n' + '• Dropping the pitch may take more processing time but works well for tracks with high-pitched vocals.') +AGGRESSION_SETTING_HELP = ('Adjust the intensity of primary stem extraction:\n\n' + '• It ranges from -100 - 100.\n' + '• Bigger values mean deeper extractions.\n' + '• Typically, it\'s set to 5 for vocals & instrumentals. \n' + '• Values beyond 5 might muddy the sound for non-vocal models.') +WINDOW_SIZE_HELP = ('Select window size to balance quality and speed:\n\n' + '• 1024 - Quick but lesser quality.\n' + '• 512 - Medium speed and quality.\n' + '• 320 - Takes longer but may offer better quality.') +MDX_SEGMENT_SIZE_HELP = ('Pick a segment size to balance speed, resource use, and quality:\n' + '• Smaller sizes consume less resources.\n' + '• Bigger sizes consume more resources, but may provide better results.\n' + '• Default size is 256. Quality can change based on your pick.') +DEMUCS_STEMS_HELP = ('Select a stem for extraction with the chosen model:\n\n' + '• All Stems - Extracts all available stems.\n' + '• Vocals - Only the "vocals" stem.\n' + '• Other - Only the "other" stem.\n' + '• Bass - Only the "bass" stem.\n' + '• Drums - Only the "drums" stem.') +SEGMENT_HELP = ('Adjust segments to manage RAM or V-RAM usage:\n\n' + '• Smaller sizes consume less resources.\n' + '• Bigger sizes consume more resources, but may provide better results.\n' + '• "Default" picks the optimal size.') + +ENSEMBLE_MAIN_STEM_HELP = ( + 'Select the stem type for ensembling:\n\n' + + f'• {VOCAL_PAIR}:\n' + ' - Primary Stem: Vocals\n' + ' - Secondary Stem: Instrumental (mixture minus vocals)\n\n' + + f'• {OTHER_PAIR}:\n' + ' - Primary Stem: Other\n' + ' - Secondary Stem: No Other (mixture minus "other")\n\n' + + f'• {BASS_PAIR}:\n' + ' - Primary Stem: Bass\n' + ' - Secondary Stem: No Bass (mixture minus bass)\n\n' + + f'• {DRUM_PAIR}:\n' + ' - Primary Stem: Drums\n' + ' - Secondary Stem: No Drums (mixture minus drums)\n\n' + + f'• {FOUR_STEM_ENSEMBLE}:\n' + ' - Gathers all 4-stem Demucs models and ensembles all outputs.\n\n' + + f'• {MULTI_STEM_ENSEMBLE}:\n' + ' - The "Jungle Ensemble" gathers all models and ensembles any related outputs.' +) + +ENSEMBLE_TYPE_HELP = ( + 'Choose the ensemble algorithm for generating the final output:\n\n' + + f'• {MAX_MIN}:\n' + ' - Primary stem processed with "Max Spec" algorithm.\n' + ' - Secondary stem processed with "Min Spec" algorithm.\n\n' + + 'Note: For the "4 Stem Ensemble" option, only one algorithm will be displayed.\n\n' + + 'Algorithm Details:\n' + + f'• {MAX_SPEC}:\n' + ' - Produces the highest possible output.\n' + ' - Ideal for vocal stems for a fuller sound, but might introduce unwanted artifacts.\n' + ' - Works well with instrumental stems, but avoid using VR Arch models in the ensemble.\n\n' + + f'• {MIN_SPEC}:\n' + ' - Produces the lowest possible output.\n' + ' - Ideal for instrumental stems for a cleaner result. Might result in a "muddy" sound.\n\n' + + f'• {AUDIO_AVERAGE}:\n' + ' - Averages all results together for the final output.' +) + +ENSEMBLE_LISTBOX_HELP = ( + 'Displays all available models for the chosen main stem pair.' +) + +if OPERATING_SYSTEM == 'darwin': + IS_GPU_CONVERSION_HELP = ( + '• Use GPU for Processing (if available):\n' + ' - If checked, the application will attempt to use your GPU for faster processing.\n' + ' - If a GPU is not detected, it will default to CPU processing.\n' + ' - GPU processing for MacOS only works with VR Arch models.\n\n' + '• Please Note:\n' + ' - CPU processing is significantly slower than GPU processing.\n' + ' - Only Macs with M1 chips can be used for GPU processing.' + ) +else: + IS_GPU_CONVERSION_HELP = ( + '• Use GPU for Processing (if available):\n' + ' - If checked, the application will attempt to use your GPU for faster processing.\n' + ' - If a GPU is not detected, it will default to CPU processing.\n\n' + '• Please Note:\n' + ' - CPU processing is significantly slower than GPU processing.\n' + ' - Only Nvidia GPUs can be used for GPU processing.' + ) + +IS_TIME_CORRECTION_HELP = ('When checked, the output will retain the original BPM of the input.') +SAVE_STEM_ONLY_HELP = 'Allows the user to save only the selected stem.' +IS_NORMALIZATION_HELP = 'Normalizes output to prevent clipping.' +IS_CUDA_SELECT_HELP = "If you have more than one GPU, you can pick which one to use for processing." +CROP_SIZE_HELP = '**Only compatible with select models only!**\n\n Setting should match training crop-size value. Leave as is if unsure.' +IS_TTA_HELP = ('This option performs Test-Time-Augmentation to improve the separation quality.\n\n' + 'Note: Having this selected will increase the time it takes to complete a conversion') +IS_POST_PROCESS_HELP = ('This option can potentially identify leftover instrumental artifacts within the vocal outputs. \nThis option may improve the separation of some songs.\n\n' +\ + 'Note: Selecting this option can adversely affect the conversion process, depending on the track. Because of this, it is only recommended as a last resort.') +IS_HIGH_END_PROCESS_HELP = 'The application will mirror the missing frequency range of the output.' +SHIFTS_HELP = ('Performs multiple predictions with random shifts of the input and averages them.\n\n' + '• The higher number of shifts, the longer the prediction will take. \n- Not recommended unless you have a GPU.') +OVERLAP_HELP = ('• This option controls the amount of overlap between prediction windows.\n' + ' - Higher values can provide better results, but will lead to longer processing times.\n' + ' - You can choose between 0.001-0.999') +MDX_OVERLAP_HELP = ('• This option controls the amount of overlap between prediction windows.\n' + ' - Higher values can provide better results, but will lead to longer processing times.\n' + ' - For Non-MDX23C models: You can choose between 0.001-0.999') +OVERLAP_23_HELP = ('• This option controls the amount of overlap between prediction windows.\n' + ' - Higher values can provide better results, but will lead to longer processing times.') +IS_SEGMENT_DEFAULT_HELP = '• The segment size is set based on the value provided in a chosen model\'s associated \nconfig file (yaml).' +IS_SPLIT_MODE_HELP = '• Enables \"Segments\". \n• Deselecting this option is only recommended for those with powerful PCs.' +IS_DEMUCS_COMBINE_STEMS_HELP = 'The application will create the secondary stem by combining the remaining stems \ninstead of inverting the primary stem with the mixture.' +COMPENSATE_HELP = 'Compensates the audio of the primary stems to allow for a better secondary stem.' +IS_DENOISE_HELP = ('• Standard: This setting reduces the noise created by MDX-Net models.\n' + ' - This option only reduces noise in non-MDX23 models.\n' + '• Denoise Model: This setting employs a special denoise model to eliminate noise produced by any MDX-Net model.\n' + ' - This option works on all MDX-Net models.\n' + ' - You must have the "UVR-DeNoise-Lite" VR Arch model installed to use this option.\n' + '• Please Note: Both options will increase separation time.') + +VOC_SPLIT_MODEL_SELECT_HELP = '• Select a model from the list of lead and backing vocal models to run through vocal stems automatically.' +IS_VOC_SPLIT_INST_SAVE_SELECT_HELP = '• When activated, you will receive extra instrumental outputs that include: one with just the lead vocals and another with only the backing vocals.' +IS_VOC_SPLIT_MODEL_SELECT_HELP = ('• When activated, this option auto-processes generated vocal stems, using either a karaoke model to remove lead vocals or another to remove backing vocals.\n' + ' - This option splits the vocal track into two separate parts: lead vocals and backing vocals, providing two extra vocal outputs.\n' + ' - The results will be organized in the same way, whether you use a karaoke model or a background vocal model.\n' + ' - This option does not work in ensemble mode at this time.') +IS_DEVERB_OPT_HELP = ('• Select the vocal type you wish to deverb automatically.\n' + ' - Example: Choosing "Lead Vocals Only" will only remove reverb from a lead vocal stem.') +IS_DEVERB_VOC_HELP = ('• This option removes reverb from a vocal stem.\n' + ' - You must have the "UVR-DeEcho-DeReverb" VR Arch model installed to use this option.\n' + ' - This option does not work in ensemble mode at this time.') +IS_FREQUENCY_MATCH_HELP = 'Matches the frequency cut-off of the primary stem to that of the secondary stem.' +CLEAR_CACHE_HELP = 'Clears settings for unrecognized models chosen by the user.' +IS_SAVE_ALL_OUTPUTS_ENSEMBLE_HELP = 'If enabled, all individual ensemble-generated outputs are retained.' +IS_APPEND_ENSEMBLE_NAME_HELP = 'When enabled, the ensemble name is added to the final output.' +IS_WAV_ENSEMBLE_HELP = ( + 'Processes ensemble algorithms with waveforms instead of spectrograms when activated:\n' + '• Might lead to increased distortion.\n' + '• Waveform ensembling is faster than spectrogram ensembling.' +) +DONATE_HELP = 'Opens official UVR "Buy Me a Coffee" external link for project donations!' +IS_INVERT_SPEC_HELP = ( + 'Potentially enhances the secondary stem quality:\n' + '• Inverts primary stem using spectrograms, instead of waveforms.\n' + '• Slightly slower inversion method.' +) +IS_TESTING_AUDIO_HELP = 'Appends a 10-digit number to saved files to avoid accidental overwrites.' +IS_MODEL_TESTING_AUDIO_HELP = 'Appends the model name to outputs for comparison across different models.' +IS_ACCEPT_ANY_INPUT_HELP = ( + 'Allows all types of inputs when enabled, even non-audio formats.\n' + 'For experimental use only. Not recommended for regular use.' +) +IS_TASK_COMPLETE_HELP = 'Plays a chime upon process completion or failure when activated.' +DELETE_YOUR_SETTINGS_HELP = ( + 'Contains your saved settings. Confirmation will be requested before deleting a selected setting.' +) +SET_STEM_NAME_HELP = 'Select the primary stem for the given model.' +IS_CREATE_MODEL_FOLDER_HELP = ('Two new directories will be generated for the outputs in the export directory after each conversion.\n\n' + '• Example: \n' + '─ Export Directory\n' + ' └── First Directory (Named after the model)\n' + ' └── Second Directory (Named after the track)\n' + ' └── Output File(s)') +MDX_DIM_T_SET_HELP = INTERNAL_MODEL_ATT +MDX_DIM_F_SET_HELP = INTERNAL_MODEL_ATT + +MDX_N_FFT_SCALE_SET_HELP = 'Specify the N_FFT size used during model training.' +POPUP_COMPENSATE_HELP = ( + f'Select the appropriate volume compensation for the chosen model.\n' + f'Reminder: {COMPENSATE_HELP}' +) +VR_MODEL_PARAM_HELP = 'Select the required parameters to run the chosen model.' +CHOSEN_ENSEMBLE_HELP = ( + 'Default Ensemble Selections:\n' + '• Save the current ensemble configuration.\n' + '• Clear all selected models.\n' + 'Note: You can also select previously saved ensembles.' +) +CHOSEN_PROCESS_METHOD_HELP = ( + 'Choose a Processing Method:\n' + 'Select from various AI networks and algorithms to process your track:\n' + '\n' + '• VR Architecture: Uses magnitude spectrograms for source separation.\n' + '• MDX-Net: Employs a Hybrid Spectrogram network for source separation.\n' + '• Demucs v3: Also utilizes a Hybrid Spectrogram network for source separation.\n' + '• Ensemble Mode: Combine results from multiple models and networks for optimal results.\n' + '• Audio Tools: Additional utilities for added convenience.' +) + +INPUT_FOLDER_ENTRY_HELP = ( + 'Select Input:\n' + 'Choose the audio file(s) you want to process.' +) +INPUT_FOLDER_ENTRY_HELP_2 = ( + 'Input Option Menu:\n' + 'Click to access the input option menu.' +) +OUTPUT_FOLDER_ENTRY_HELP = ( + 'Select Output:\n' + 'Choose the directory where the processed files will be saved.' +) +INPUT_FOLDER_BUTTON_HELP = ( + 'Open Input Folder Button:\n' + 'Open the directory containing the selected input audio file(s).' +) +OUTPUT_FOLDER_BUTTON_HELP = ( + 'Open Output Folder Button:\n' + 'Open the selected output folder.' +) +CHOOSE_MODEL_HELP = ( + 'Each processing method has its own set of options and models.\n' + 'Choose the model associated with the selected processing method here.' +) +FORMAT_SETTING_HELP = 'Save Outputs As: ' +SECONDARY_MODEL_ACTIVATE_HELP = ( + 'When enabled, the application will perform an additional inference using the selected model(s) above.' +) +SECONDARY_MODEL_HELP = ( + 'Choose the Secondary Model:\n' + 'Select the secondary model associated with the stem you want to process with the current method.' +) + +INPUT_SEC_FIELDS_HELP = ( + 'Right click here to choose your inputs!' +) + +SECONDARY_MODEL_SCALE_HELP = ('The scale determines how the final audio outputs will be averaged between the primary and secondary models.\n\nFor example:\n\n' + '• 10% - 10 percent of the main model result will be factored into the final result.\n' + '• 50% - The results from the main and secondary models will be averaged evenly.\n' + '• 90% - 90 percent of the main model result will be factored into the final result.') +PRE_PROC_MODEL_ACTIVATE_HELP = ( + 'When enabled, the application will use the selected model to isolate the instrumental stem.\n' + 'Subsequently, all non-vocal stems will be extracted from this generated instrumental.\n' + '\n' + 'Key Points:\n' + '• This feature can significantly reduce vocal bleed in non-vocal stems.\n' + '• Available exclusively in the Demucs tool.\n' + '• Compatible only with non-vocal and non-instrumental stem outputs.\n' + '• Expect an increase in total processing time.\n' + '• Only the VR or MDX-Net Vocal Instrumental/Vocals models can be chosen for this process.' +) + +AUDIO_TOOLS_HELP = ( + 'Select from various audio tools to process your track:\n' + '\n' + '• Manual Ensemble: Requires 2 or more selected files as inputs. This allows tracks to be processed using the algorithms from Ensemble Mode.\n' + '• Time Stretch: Adjust the playback speed of the selected inputs to be faster or slower.\n' + '• Change Pitch: Modify the pitch of the selected inputs.\n' + '• Align Inputs: Choose 2 audio file and the application will align them and provide the difference in alignment.\n' + ' - This tool provides similar functionality to "Utagoe."\n' + ' - Primary Audio: This is usually a mixture.\n' + ' - Secondary Audio: This is usually an instrumental.\n' + '• Matchering: Choose 2 audio files. The matchering algorithm will master the target audio to have the same RMS, FR, peak amplitude, and stereo width as the reference audio.' +) + +PRE_PROC_MODEL_INST_MIX_HELP = 'When enabled, the application will generate a third output without the selected stem and vocals.' +MODEL_SAMPLE_MODE_HELP = ('Allows the user to process only part of a track to sample settings or a model without running a full conversion.\n\nNotes:\n\n' + '• The number in the parentheses is the current number of seconds the generated sample will be.\n' + '• You can choose the number of seconds to extract from the track in the \"Additional Settings\" menu.') + +POST_PROCESS_THREASHOLD_HELP = ('Allows the user to control the intensity of the Post_process option.\n\nNotes:\n\n' + '• Higher values potentially remove more artifacts. However, bleed might increase.\n' + '• Lower values limit artifact removal.') + +BATCH_SIZE_HELP = ('Specify the number of batches to be processed at a time.\n\nNotes:\n\n' + '• Higher values mean more RAM usage but slightly faster processing times.\n' + '• Lower values mean less RAM usage but slightly longer processing times.\n' + '• Batch size value has no effect on output quality.') + +VR_MODEL_NOUT_HELP = "" +VR_MODEL_NOUT_LSTM_HELP = "" + +IS_PHASE_HELP = 'Select the phase for the secondary audio.\n• Note: Using the "Automatic" option is strongly recommended.' +IS_ALIGN_TRACK_HELP = 'Enable this to save the secondary track once aligned.' +IS_MATCH_SILENCE_HELP = ( + 'Aligns the initial silence of the secondary audio with the primary audio.\n' + '• Note: Avoid using this option if the primary audio begins solely with vocals.' +) +IS_MATCH_SPEC_HELP = 'Align the secondary audio based on the primary audio\'s spectrogram.\n• Note: This may enhance alignment in specific cases.' + +TIME_WINDOW_ALIGN_HELP = ( + 'This setting determines the window size for alignment analysis, especially for pairs with minor timing variations:\n' + '\n' + '• None: Disables time window analysis.\n' + '• 1: Analyzes pair by 0.0625-second windows.\n' + '• 2: Analyzes pair by 0.125-second windows.\n' + '• 3: Analyzes pair by 0.25-second windows.\n' + '• 4: Analyzes pair by 0.50-second windows.\n' + '• 5: Analyzes pair by 0.75-second windows.\n' + '• 6: Analyzes pair by 1-second windows.\n' + '• 7: Analyzes pair by 2-second windows.\n' + '\n' + 'Shifts Options:\n' + '• Low: Cycles through 0.0625 and 0.5-second windows to find an optimal match.\n' + '• Medium: Cycles through 0.0625, 0.125, and 0.5-second windows to find an optimal match.\n' + '• High: Cycles through 0.0625, 0.125, 0.25, and 0.5-second windows to find an optimal match.\n' + '\n' + 'Important Points to Consider:\n' + ' - Using the "Shifts" option may require more processing time and might not guarantee better results.\n' + ' - Opting for smaller analysis windows can increase processing times.\n' + ' - The best settings are likely to vary based on the specific tracks being processed.' +) +INTRO_ANALYSIS_ALIGN_HELP = ( + 'This setting determines the portion of the audio input to be analyzed for initial alignment.\n' + '\n' + '• Default: Analyzes 10% (or 1/10th) of the audio\'s total length.\n' + '• 1: Analyzes 12.5% (or 1/8th) of the audio\'s total length.\n' + '• 2: Analyzes 16.67% (or 1/6th) of the audio\'s total length.\n' + '• 3: Analyzes 25% (or 1/4th) of the audio\'s total length.\n' + '• 4: Analyzes 50% (or half) of the audio\'s total length.\n' + '\n' + 'Shifts Options:\n' + '• Low: Cycles through 2 intro analysis values.\n' + '• Medium: Cycles through 3 intro analysis values.\n' + '• High: Cycles through 5 intro analysis values.\n' + '\n' + 'Important Points to Consider:\n' + ' - Using the "Shifts" option will require more processing time and might not guarantee better results.\n' + ' - Optimal settings may vary depending on the specific tracks being processed.' +) + +VOLUME_ANALYSIS_ALIGN_HELP = ( + 'This setting specifies the volume adjustments to be made on the secondary input:\n' + '\n' + '• None: No volume adjustments are made.\n' + '• Low: Analyzes the audio within a 4dB range, adjusting in 1dB increments.\n' + '• Medium: Analyzes the audio within a 6dB range, adjusting in 1dB increments.\n' + '• High: Analyzes the audio within a 6dB range, adjusting in 0.5dB increments.\n' + '• Very High: Analyzes the audio within a 10dB range, adjusting in 0.5dB increments.\n' + '\n' + 'Important Points to Consider:\n' + ' - Selecting more extensive analysis options (e.g., High, Very High) will lead to longer processing times.\n' + ' - Optimal settings might vary based on the specific tracks being processed.' +) + +PHASE_SHIFTS_ALIGN_HELP = ( + 'This setting specifies the phase adjustments to be made on the secondary input:\n' + '\n' + 'Shifts Options:\n' + '• None: No phase adjustments are made.\n' + '• Very Low: Analyzes the audio within range of 2 different phase positions.\n' + '• Low: Analyzes the audio within range of 4 different phase positions.\n' + '• Medium: Analyzes the audio within range of 8 different phase positions.\n' + '• High: Analyzes the audio within range of 18 different phase positions.\n' + '• Very High: Analyzes the audio within range of 36 different phase positions.\n' + '• Maximum: Analyzes the audio in all 360 phase positions.\n' + '\n' + 'Important Points to Consider:\n' + ' - This option only works with time correction.\n' + ' - This option can be helpful if one of the inputs were from an analog source.\n' + ' - Selecting more extensive analysis options (e.g., High, Very High) will lead to longer processing times.\n' + ' - Selecting "Maximum" can take hours to process.\n' + ' - Optimal settings might vary based on the specific tracks being processed.' +) + +# Warning Messages +STORAGE_ERROR = 'Insufficient Storage', 'There is not enough storage on main drive to continue. Your main drive must have at least 3 GB\'s of storage in order for this application function properly. \n\nPlease ensure your main drive has at least 3 GB\'s of storage and try again.\n\n' +STORAGE_WARNING = 'Available Storage Low', 'Your main drive is running low on storage. Your main drive must have at least 3 GB\'s of storage in order for this application function properly.\n\n' +CONFIRM_WARNING = '\nAre you sure you wish to continue?' +PROCESS_FAILED = 'Process failed, please see error log\n' +EXIT_PROCESS_ERROR = 'Active Process', 'Please stop the active process or wait for it to complete before you exit.' +EXIT_HALTED_PROCESS_ERROR = 'Halting Process', 'Please wait for the application to finish halting the process before exiting.' +EXIT_DOWNLOAD_ERROR = 'Active Download', 'Please stop the download or wait for it to complete before you exit.' +SET_TO_DEFAULT_PROCESS_ERROR = 'Active Process', 'You cannot reset all of the application settings during an active process.' +SET_TO_ANY_PROCESS_ERROR = 'Active Process', 'You cannot reset the application settings during an active process.' +RESET_ALL_TO_DEFAULT_WARNING = 'Reset Settings Confirmation', 'All application settings will be set to factory default.\n\nAre you sure you wish to continue?' +AUDIO_VERIFICATION_CHECK = lambda i, e:f'++++++++++++++++++++++++++++++++++++++++++++++++++++\n\nBroken File Removed: \n\n{i}\n\nError Details:\n\n{e}\n++++++++++++++++++++++++++++++++++++++++++++++++++++' +INVALID_ONNX_MODEL_ERROR = 'Invalid Model', 'The file selected is not a valid MDX-Net model. Please see the error log for more information.' +INVALID_PARAM_MODEL_ERROR = 'Select Model Param', 'Please choose a model param or click \'Cancel\'.' +UNRECOGNIZED_MODEL = 'Unrecognized Model Detected', ' is an unrecognized model.\n\n' + \ + 'Would you like to select the correct parameters before continuing?' +STOP_PROCESS_CONFIRM = 'Confirmation', 'You are about to stop all active processes.\n\nAre you sure you wish to continue?' +NO_ENSEMBLE_SELECTED = 'No Models Selected', 'Please select ensemble and try again.' +PICKLE_CORRU = 'File Corrupted', 'Unable to load this ensemble.\n\n' + \ + 'Would you like to remove this ensemble from your list?' +DELETE_ENS_ENTRY = 'Confirm Removal', 'Are you sure you want to remove this entry?' + +# Separation Text +LOADING_MODEL = 'Loading model...' +INFERENCE_STEP_1 = 'Running inference...' +INFERENCE_STEP_1_SEC = 'Running inference (secondary model)...' +INFERENCE_STEP_1_4_STEM = lambda stem:f'Running inference (secondary model for {stem})...' +INFERENCE_STEP_1_PRE = 'Running inference (pre-process model)...' +INFERENCE_STEP_1_VOC_S = 'Splitting vocals...' +INFERENCE_STEP_2_PRE = lambda pm, m:f'Loading pre-process model ({pm}: {m})...' +INFERENCE_STEP_2_SEC = lambda pm, m:f'Loading secondary model ({pm}: {m})...' +INFERENCE_STEP_2_VOC_S = lambda pm, m:f'Loading vocal splitter model ({pm}: {m})...' +INFERENCE_STEP_2_SEC_CACHED_MODOEL = lambda pm, m:f'Secondary model ({pm}: {m}) cache loaded.\n' +INFERENCE_STEP_2_PRE_CACHED_MODOEL = lambda pm, m:f'Pre-process model ({pm}: {m}) cache loaded.\n' +INFERENCE_STEP_2_SEC_CACHED = 'Loading cached secondary model source(s)... Done!\n' +INFERENCE_STEP_2_PRIMARY_CACHED = ' Model cache loaded.\n' +INFERENCE_STEP_2 = 'Inference complete.' +INFERENCE_STEP_DEVERBING = ' Deverbing...' +SAVING_STEM = 'Saving ', ' stem...' +SAVING_ALL_STEMS = 'Saving all stems...' +ENSEMBLING_OUTPUTS = 'Ensembling outputs...' +DONE = ' Done!\n' +ENSEMBLES_SAVED = 'Ensembled outputs saved!\n\n' + +#Additional Text +CHOOSE_PROC_METHOD_MAIN_LABEL = 'CHOOSE PROCESS METHOD' +SELECT_SAVED_SETTINGS_MAIN_LABEL = 'SELECT SAVED SETTINGS' +CHOOSE_MDX_MODEL_MAIN_LABEL = 'CHOOSE MDX-NET MODEL' +BATCHES_MDX_MAIN_LABEL = 'BATCH SIZE' +VOL_COMP_MDX_MAIN_LABEL = 'VOLUME COMPENSATION' +SEGMENT_MDX_MAIN_LABEL = 'SEGMENT SIZE' +SELECT_VR_MODEL_MAIN_LABEL = 'CHOOSE VR MODEL' +AGGRESSION_SETTING_MAIN_LABEL = 'AGGRESSION SETTING' +WINDOW_SIZE_MAIN_LABEL = 'WINDOW SIZE' +CHOOSE_DEMUCS_MODEL_MAIN_LABEL = 'CHOOSE DEMUCS MODEL' +CHOOSE_STEMS_MAIN_LABEL = 'CHOOSE STEM(S)' +CHOOSE_SEGMENT_MAIN_LABEL = 'SEGMENT' +ENSEMBLE_OPTIONS_MAIN_LABEL = 'ENSEMBLE OPTIONS' +CHOOSE_MAIN_PAIR_MAIN_LABEL = 'MAIN STEM PAIR' +CHOOSE_ENSEMBLE_ALGORITHM_MAIN_LABEL = 'ENSEMBLE ALGORITHM' +AVAILABLE_MODELS_MAIN_LABEL = 'AVAILABLE MODELS' +CHOOSE_AUDIO_TOOLS_MAIN_LABEL = 'CHOOSE AUDIO TOOL' +CHOOSE_MANUAL_ALGORITHM_MAIN_LABEL = 'CHOOSE ALGORITHM' +CHOOSE_RATE_MAIN_LABEL = 'RATE' +CHOOSE_SEMITONES_MAIN_LABEL = 'SEMITONES' +GPU_CONVERSION_MAIN_LABEL = 'GPU Conversion' +CHANGE_LOG_HEADER = lambda patch:f"Patch Version:\n\n{patch}" +INVALID_INPUT_E = ' Invalid input! ' +LB_UP = "Move Selection Up" +LB_DOWN = "Move Selection Down" +LB_CLEAR = "Clear Box" +LB_MOVE_OVER_P = "Move Selection to Secondary List" +LB_MOVE_OVER_S = "Move Selection to Primary List" +FILE_ONE_MAIN_LABEL = "PRIMARY AUDIO" +FILE_TWO_MAIN_LABEL = "SECONDARY AUDIO" +FILE_ONE_MATCH_MAIN_LABEL = "TARGET AUDIO" +FILE_TWO_MATCH_MAIN_LABEL = "REFERENCE AUDIO" +TIME_WINDOW_MAIN_LABEL = "TIME ADJUSTMENT" +INTRO_ANALYSIS_MAIN_LABEL = "INTRO ANALYSIS" +VOLUME_ADJUSTMENT_MAIN_LABEL = "VOLUME ADJUSTMENT" +SELECT_INPUTS = "Select Input(s)" +SELECTED_INPUTS = 'Selected Inputs' +WIDEN_BOX = 'Widen Box' +CONFIRM_ENTRIES = 'Confirm Entries' +CLOSE_WINDOW = 'Close Window' +DUAL_AUDIO_PROCESSING = 'Dual Audio Batch Processing' +CANCEL_TEXT = "Cancel" +CONFIRM_TEXT = "Confirm" +SELECT_MODEL_TEXT = 'Select Model' +NONE_SELECTED = 'None Selected' +SAVE_TEXT = 'Save' +OVERLAP_TEXT = 'Overlap' +ACCEPT_ANY_INPUT_TEXT = 'Accept Any Input' +ACTIVATE_PRE_PROCESS_MODEL_TEXT = 'Activate Pre-process Model' +ACTIVATE_SECONDARY_MODEL_TEXT = 'Activate Secondary Model' +ADDITIONAL_MENUS_INFORMATION_TEXT = 'Additional Menus & Information' +ADDITIONAL_SETTINGS_TEXT = 'Additional Settings' +ADVANCED_ALIGN_TOOL_OPTIONS_TEXT = 'Advanced Align Tool Options' +ADVANCED_DEMUCS_OPTIONS_TEXT = 'Advanced Demucs Options' +ADVANCED_ENSEMBLE_OPTIONS_TEXT = 'Advanced Ensemble Options' +ADVANCED_MDXNET23_OPTIONS_TEXT = 'Advanced MDX-NET23 Options' +ADVANCED_MDXNET_OPTIONS_TEXT = 'Advanced MDX-Net Options' +ADVANCED_OPTION_MENU_TEXT = 'Advanced Option Menu' +ADVANCED_VR_OPTIONS_TEXT = 'Advanced VR Options' +AGGRESSION_SETTING_TEXT = 'Aggression Setting' +APPEND_ENSEMBLE_NAME_TEXT = 'Append Ensemble Name' +APPLICATION_DOWNLOAD_CENTER_TEXT = 'Application Download Center' +APPLICATION_UPDATES_TEXT = 'Application Updates' +AUDIO_FORMAT_SETTINGS_TEXT = 'Audio Format Settings' +BALANCE_VALUE_TEXT = 'Balance Value' +BATCH_SIZE_TEXT = 'Batch Size' +BV_MODEL_TEXT = 'BV Model' +CHANGE_MODEL_DEFAULT_TEXT = 'Change Model Default' +CHANGE_MODEL_DEFAULTS_TEXT = 'Change Model Defaults' +CHANGE_PARAMETERS_TEXT = 'Change Parameters' +CHOOSE_ADVANCED_MENU_TEXT = 'Choose Advanced Menu' +CHOOSE_MODEL_PARAM_TEXT = 'Choose Model Param' +CLEAR_AUTOSET_CACHE_TEXT = 'Clear Auto-Set Cache' +COMBINE_STEMS_TEXT = 'Combine Stems' +CONFIRM_UPDATE_TEXT = 'Confirm Update' +COPIED_TEXT = 'Copied!' +COPY_ALL_TEXT_TEXT = 'Copy All Text' +DEFINED_PARAMETERS_DELETED_TEXT = 'Defined Parameters Deleted' +DELETE_PARAMETERS_TEXT = 'Delete Parameters' +DELETE_USER_SAVED_SETTING_TEXT = 'Delete User Saved Setting' +DEMUCS_TEXT = 'Demucs' +DENOISE_OUTPUT_TEXT = 'Denoise Output' +DEVERB_VOCALS_TEXT = 'Deverb Vocals' +DONE_TEXT = 'Done' +DOWNLOAD_CENTER_TEXT = 'Download Center' +DOWNLOAD_CODE_TEXT = 'Download Code' +DOWNLOAD_LINKS_TEXT = 'Download Link(s)' +DOWNLOAD_UPDATE_IN_APPLICATION_TEXT = 'Download Update in Application' +ENABLE_HELP_HINTS_TEXT = 'Enable Help Hints' +ENABLE_TTA_TEXT = 'Enable TTA' +ENABLE_VOCAL_SPLIT_MODE_TEXT = 'Enable Vocal Split Mode' +ENSEMBLE_NAME_TEXT = 'Ensemble Name' +ENSEMBLE_WAVFORMS_TEXT = 'Ensemble Wavforms' +ERROR_CONSOLE_TEXT = 'Error Console' +GENERAL_MENU_TEXT = 'General Menu' +GENERAL_PROCESS_SETTINGS_TEXT = 'General Process Settings' +GENERATE_MODEL_FOLDER_TEXT = 'Generate Model Folder' +HIGHEND_PROCESS_TEXT = 'High-End Process' +INPUT_CODE_TEXT = 'Input Code' +INPUT_STEM_NAME_TEXT = 'Input Stem Name' +INPUT_UNIQUE_STEM_NAME_TEXT = 'Input Unique Stem Name' +IS_INVERSE_STEM_TEXT = 'Is Inverse Stem' +KARAOKE_MODEL_TEXT = 'Karaoke Model' +MANUAL_DOWNLOADS_TEXT = 'Manual Downloads' +MATCH_FREQ_CUTOFF_TEXT = 'Match Freq Cut-off' +MDXNET_C_MODEL_PARAMETERS_TEXT = 'MDX-Net C Model Parameters' +MDXNET_MODEL_SETTINGS_TEXT = 'MDX-Net Model Settings' +MDXNET_TEXT = 'MDX-Net' +MODEL_PARAMETERS_CHANGED_TEXT = 'Model Parameters Changed' +MODEL_SAMPLE_MODE_SETTINGS_TEXT = 'Model Sample Mode Settings' +MODEL_TEST_MODE_TEXT = 'Model Test Mode' +MP3_BITRATE_TEXT = 'Mp3 Bitrate' +NAME_SETTINGS_TEXT = 'Name Settings' +NO_DEFINED_PARAMETERS_FOUND_TEXT = 'No Defined Parameters Found' +NO_TEXT = 'No' +NORMALIZE_OUTPUT_TEXT = 'Normalize Output' +USE_OPENCL_TEXT = 'Use OpenCL' +NOT_ENOUGH_MODELS_TEXT = 'Not Enough Models' +NOTIFICATION_CHIMES_TEXT = 'Notification Chimes' +OPEN_APPLICATION_DIRECTORY_TEXT = 'Open Application Directory' +OPEN_LINK_TO_MODEL_TEXT = 'Open Link to Model' +OPEN_MODEL_DIRECTORY_TEXT = 'Open Model Directory' +OPEN_MODEL_FOLDER_TEXT = 'Open Model Folder' +OPEN_MODELS_FOLDER_TEXT = 'Open Models Folder' +PHASE_SHIFTS_TEXT = 'Phase Shifts' +POST_PROCESS_TEXT = 'Post-Process' +POST_PROCESS_THRESHOLD_TEXT = 'Post-process Threshold' +PREPROCESS_MODEL_CHOOSE_TEXT = 'Pre-process Model' +PRIMARY_STEM_TEXT = 'Primary Stem' +REFRESH_LIST_TEXT = 'Refresh List' +REMOVE_SAVED_ENSEMBLE_TEXT = 'Remove Saved Ensemble' +REPORT_ISSUE_TEXT = 'Report Issue' +RESET_ALL_SETTINGS_TO_DEFAULT_TEXT = 'Reset All Settings to Default' +RESTART_APPLICATION_TEXT = 'Restart Application' +SAMPLE_CLIP_DURATION_TEXT = 'Sample Clip Duration' +SAVE_ALIGNED_TRACK_TEXT = 'Save Aligned Track' +SAVE_ALL_OUTPUTS_TEXT = 'Save All Outputs' +SAVE_CURRENT_ENSEMBLE_TEXT = 'Save Current Ensemble' +SAVE_CURRENT_SETTINGS_TEXT = 'Save Current Settings' +SAVE_INSTRUMENTAL_MIXTURE_TEXT = 'Save Instrumental Mixture' +SAVE_SPLIT_VOCAL_INSTRUMENTALS_TEXT = 'Save Split Vocal Instrumentals' +SECONDARY_MODEL_TEXT = 'Secondary Model' +SECONDARY_PHASE_TEXT = 'Secondary Phase' +SECONDS_TEXT = 'Seconds' +SEGMENT_DEFAULT_TEXT = 'Segment Default' +SEGMENT_SIZE_TEXT = 'Segment Size' +SEGMENTS_TEXT = 'Segments' +SELECT_DOWNLOAD_TEXT = 'Select Download' +SELECT_MODEL_PARAM_TEXT = 'Select Model Param' +SELECT_VOCAL_TYPE_TO_DEVERB_TEXT = 'Select Vocal Type to Deverb' +SELECTED_MODEL_PLACEMENT_PATH_TEXT = 'Selected Model Placement Path' +SETTINGS_GUIDE_TEXT = 'Settings Guide' +SETTINGS_TEST_MODE_TEXT = 'Settings Test Mode' +SHIFT_CONVERSION_PITCH_TEXT = 'Shift Conversion Pitch' +SHIFTS_TEXT = 'Shifts' +SILENCE_MATCHING_TEXT = 'Silence Matching' +SPECIFY_MDX_NET_MODEL_PARAMETERS_TEXT = 'Specify MDX-Net Model Parameters' +SPECIFY_PARAMETERS_TEXT = 'Specify Parameters' +SPECIFY_VR_MODEL_PARAMETERS_TEXT = 'Specify VR Model Parameters' +SPECTRAL_INVERSION_TEXT = 'Spectral Inversion' +SPECTRAL_MATCHING_TEXT = 'Spectral Matching' +SPLIT_MODE_TEXT = 'Split Mode' +STEM_NAME_TEXT = 'Stem Name' +STOP_DOWNLOAD_TEXT = 'Stop Download' +SUPPORT_UVR_TEXT = 'Support UVR' +TRY_MANUAL_DOWNLOAD_TEXT = 'Try Manual Download' +UPDATE_FOUND_TEXT = 'Update Found' +USER_DOWNLOAD_CODES_TEXT = 'User Download Codes' +UVR_BUY_ME_A_COFFEE_LINK_TEXT = 'UVR \'Buy Me a Coffee\' Link' +UVR_ERROR_LOG_TEXT = 'UVR Error Log' +UVR_PATREON_LINK_TEXT = 'UVR Patreon Link' +VOCAL_DEVERB_OPTIONS_TEXT = 'Vocal Deverb Options' +VOCAL_SPLIT_MODE_OPTIONS_TEXT = 'Vocal Split Mode Options' +VOCAL_SPLIT_OPTIONS_TEXT = 'Vocal Split Options' +VOLUME_COMPENSATION_TEXT = 'Volume Compensation' +VR_51_MODEL_TEXT = 'VR 5.1 Model' +VR_ARCH_TEXT = 'VR Arch' +WAV_TYPE_TEXT = 'Wav Type' +CUDA_NUM_TEXT = 'GPU Device' +WINDOW_SIZE_TEXT = 'Window Size' +YES_TEXT = 'Yes' +VERIFY_INPUTS_TEXT = 'Verify Inputs' +AUDIO_INPUT_TOTAL_TEXT = 'Audio Input Total' +MDX23C_ONLY_OPTIONS_TEXT = 'MDXNET23 Only Options' +PROCESS_STARTING_TEXT = 'Process starting... ' +MISSING_MESS_TEXT = 'is missing or currupted.' +SIMILAR_TEXT = "are the same." +LOADING_VERSION_INFO_TEXT = 'Loading version information...' +CHECK_FOR_UPDATES_TEXT = 'Check for Updates' +INFO_UNAVAILABLE_TEXT = "Information unavailable." +UPDATE_CONFIRMATION_TEXT = 'Are you sure you want to continue?\n\nThe application will need to be restarted.\n' +BROKEN_OR_INCOM_TEXT = 'Broken or Incompatible File(s) Removed. Check Error Log for details.' +BMAC_UVR_TEXT = 'UVR \"Buy Me a Coffee\" Link' +MDX_MENU_WAR_TEXT = '(Leave this setting as is if you are unsure.)' +NO_FILES_TEXT = 'No Files' +CHOOSE_INPUT_TEXT = 'Choose Input' +OPEN_INPUT_DIR_TEXT = 'Open Input Directory' +BATCH_PROCESS_MENU_TEXT = 'Batch Process Menu' +TEMP_FILE_DELETION_TEXT = 'Temp File Deletion' +VOCAL_SPLITTER_OPTIONS_TEXT = 'Vocal Splitter Options' +WAVEFORM_ENSEMBLE_TEXT = 'Waveform Ensemble' +SELECT_INPUT_TEXT = 'Select Input' +SELECT_OUTPUT_TEXT = 'Select Output' +TIME_CORRECTION_TEXT = 'Time Correction' +UVR_LIS_INFO_TEXT = 'UVR License Information' +ADDITIONAL_RES_CREDITS_TEXT = 'Additional Resources & Credits' +SAVE_INST_MIXTURE_TEXT = 'Save Instrumental Mixture' +DOWNLOAD_UPDATE_IN_APP_TEXT = 'Download Update in Application' +WAVE_TYPE_TEXT = 'WAVE TYPE' +OPEN_LINK_TO_MODEL_TEXT = "Open Link to Model" +OPEN_MODEL_DIRECTORY = "Open Model Directory" +SELECTED_MODEL_PLACE_PATH_TEXT = 'Selected Model Placement Path' +IS_INVERSE_STEM_TEXT = "Is Inverse Stem" +INPUT_STEM_NAME_TEXT = "Input Stem Name" +INPUT_UNIQUE_STEM_NAME_TEXT = "Input Unique Stem Name" +DONE_MENU_TEXT = "Done" +OK_TEXT = "Ok" +ENSEMBLE_WARNING_NOT_ENOUGH_SHORT_TEXT = "Not Enough Models" +ENSEMBLE_WARNING_NOT_ENOUGH_TEXT = "You must select 2 or more models to save an ensemble." +NOT_ENOUGH_ERROR_TEXT = "Not enough files to process.\n" +INVALID_FOLDER_ERROR_TEXT = 'Invalid Folder', 'Your given export path is not a valid folder!' + +GET_DL_VIP_CODE_TEXT = ("Obtain codes by visiting one of the following links below." + "\nFrom there you can donate, pledge, " + "or just obatain the code!\n (Donations are not required to obtain VIP code)") +CONFIRM_RESTART_TEXT = 'Restart Confirmation', 'This will restart the application and halt any running processes. Your current settings will be saved. \n\n Are you sure you wish to continue?' +ERROR_LOADING_FILE_TEXT = 'Error Loading the Following File', 'Raw Error Details' +LOADING_MODEL_TEXT = 'Loading model' +FULL_APP_SET_TEXT = 'Full Application Settings' +PROCESS_STARTING_TEXT = 'Process starting... ' +PROCESS_STOPPED_BY_USER = '\n\nProcess stopped by user.' +NEW_UPDATE_FOUND_TEXT = lambda version:f"\n\nNew Update Found: {version}\n\nClick the update button in the \"Settings\" menu to download and install!" +ROLL_BACK_TEXT = 'Click Here to Roll Back' + +def secondary_stem(stem:str): + """Determines secondary stem""" + + stem = stem if stem else NO_STEM + + if stem in STEM_PAIR_MAPPER.keys(): + for key, value in STEM_PAIR_MAPPER.items(): + if stem in key: + secondary_stem = value + else: + secondary_stem = stem.replace(NO_STEM, "") if NO_STEM in stem else f"{NO_STEM}{stem}" + + return secondary_stem diff --git a/gui_data/cr_text.txt b/gui_data/cr_text.txt new file mode 100644 index 0000000000000000000000000000000000000000..6b921f6eb1b555aa5e126dbada388268871b868e --- /dev/null +++ b/gui_data/cr_text.txt @@ -0,0 +1,104 @@ +Most Recent Changes: + +Patch UVR_Patch_10_6_23_4_27: +~ MPS for MacOS is now compatible with all MDX-Net & VR Arch models! +~ Fixed memory issue with MDX23C models. +~ Added the ability to choose GPU device to process tracks. +~ Fixed a few graphical bugs. + +Other Changes: +~ Added "Vocal Split Mode," a chain ensemble that utilizes karaoke & BVE + (backing vocal extractor) models to split vocals into lead vocal and backing vocal + stems. +~ Updated "Audio Tools" to include "Matchering". +~ The "Align Tool" has been improved to line up inputs even when there are differences +in timing, similar to what Utagoe does. +~ Integrated a right-click menu for entries in the batch process window specifically + for Matchering & Align tool. +~ Introduced a right-click option for Matchering & Align tool on the main window to +directly access input folders. +~ Addressed the anomaly where models would self-select in dropdowns. + ~ This seemed related to having a multitude of models. + ~ Revamped the dropdown menus entirely. +~ Fixed splash screen closing issue. +~ Demucs Pre-process Model settings properly reset to default +~ The MP3 encoder now uses LAME. +~ Resolved the problem of using the "MDX23C_D1581" model as a pre-process model for + Demucs conversions were leading to a "Key Error: 'Primary Stem'" error. +~ Rectified the "missing file" error that was popping up when trying to convert using + the Demucs v2 Demucs model. +~ Solved the problem in Demucs using the htdemucs_6s multi-stem model: when certain + stems were selected and pitch-shift conversion was used, a KeyError prevented + the stem from being saved. The stem separation now works correctly, and files are + saved to the intended destination. +~ Adjusted the "Reset All Settings To Default" function to ensure it also resets the + text of the Sample Mode slider. +~ Made corrections so that if you activate Model Test Mode and then restart the app, + the "Accept Any Input" option will not be inadvertently enabled. +~ Addressed the ensemble issue with MDX23C models. +~ Updated the "Select Stems" menu for 2 stem models under MDX23C. +~ Revamped the MDXC23 Overlap Menu. +~ Provided an option to decide on using the VR denoise model in MDX-NET. + ~ Introduced a selection menu with choices: "None," "Standard," and "Denoise Model." +~ Added an option to automatically deverb vocals for Demucs & MDX models. (does not + work in ensemble mode at this time) +~ Updated the Download Center to feature MDX23C models. +~ Fixed the glitch where ensemble final outputs weren't saved. +~ Enhanced align/matchering outputs to support MP3 and FLAC. +~ Refined tooltips for "Overlap" and "Segment Default." +~ Ensured the download lists refresh properly even when there's no internet. +~ Many more fixes. + +Resources & Credits: + +---------------------------------------------------------------------------------------------- +Name: +ZFTurbo + +Contribution: +~ Created the weights for the new MDX23C models. +~ Trained the new MDX23C models. + +Resources: +~ These models are available to use online. See link details below: + ~ https://mvsep.com + ~ Separation type -> MDX23C (vocals, instrumental) + ~ Vocal model type -> 8K FFT, Full Band (SDR vocals: 10.17, SDR instrum: 16.48) +---------------------------------------------------------------------------------------------- +Name: +Bas Curtiz + +Contribution: +~ Conducted thorough testing and produced detailed tutorials on SDR evaluation. +~ Implemented systematic SDR quality assessments. +~ Authored Google Document detailing the optimal models for each genre. +~ Authored Google Document capturing UVR frequency cutoffs and time-elapsed comparisons between GPU and CPU performances. +~ Authored a Google Document consolidating best practices, tips, and tricks. + +Resources: +~ SDR Per Genre Breakdown +~ UVR Model Frequency Cutoff Chart +~ Tips & Tricks +~ Link: (See "Discord Resources Link" below) +---------------------------------------------------------------------------------------------- +Name: +deton24 + +Contribution: +~ SDR quality checks. +~ Authored a Google Document that serves as an all-in-one guide to source separation. + +Resources: +~ All-in-One Source Separation Guide +~ Link: (See "Discord Resources Link" below) +---------------------------------------------------------------------------------------------- +Name: +dca100fb8 + +Contribution: +~ Testing +~ Bug reporting +~ Suggestions +---------------------------------------------------------------------------------------------- + +Discord Resources Link: https://discord.com/channels/708579735583588363/1153421553694953482 diff --git a/gui_data/error_handling.py b/gui_data/error_handling.py new file mode 100644 index 0000000000000000000000000000000000000000..b74d3459a52e98b641bd4f9ae4e347ad75c5650b --- /dev/null +++ b/gui_data/error_handling.py @@ -0,0 +1,110 @@ +from datetime import datetime +import traceback + +CUDA_MEMORY_ERROR = "CUDA out of memory" +CUDA_RUNTIME_ERROR = "CUDNN error executing cudnnSetTensorNdDescriptor" +DEMUCS_MODEL_MISSING_ERROR = "is neither a single pre-trained model or a bag of models." +ENSEMBLE_MISSING_MODEL_ERROR = "local variable \'enseExport\' referenced before assignment" +FFMPEG_MISSING_ERROR = """audioread\__init__.py", line 116, in audio_open""" +FILE_MISSING_ERROR = "FileNotFoundError" +MDX_MEMORY_ERROR = "onnxruntime::CudaCall CUDA failure 2: out of memory" +MDX_MODEL_MISSING = "[ONNXRuntimeError] : 3 : NO_SUCHFILE" +MDX_MODEL_SETTINGS_ERROR = "Got invalid dimensions for input" +MDX_RUNTIME_ERROR = "onnxruntime::BFCArena::AllocateRawInternal" +MODULE_ERROR = "ModuleNotFoundError" +WINDOW_SIZE_ERROR = "h1_shape[3] must be greater than h2_shape[3]" +SF_WRITE_ERROR = "sf.write" +SYSTEM_MEMORY_ERROR = "DefaultCPUAllocator: not enough memory" +MISSING_MODEL_ERROR = "'NoneType\' object has no attribute \'model_basename\'" +ARRAY_SIZE_ERROR = "ValueError: \"array is too big; `arr.size * arr.dtype.itemsize` is larger than the maximum possible size.\"" +GPU_INCOMPATIBLE_ERROR = "no kernel image is available for execution on the device" +SELECT_CORRECT_GPU = "CUDA kernel errors might be asynchronously reported at some other API call,so the stacktrace below might be incorrect." + +CONTACT_DEV = 'If this error persists, please contact the developers with the error details.' + +ERROR_MAPPER = { + CUDA_MEMORY_ERROR: + ('The application was unable to allocate enough GPU memory to use this model. ' + + 'Please close any GPU intensive applications and try again.\n' + + 'If the error persists, your GPU might not be supported.') , + CUDA_RUNTIME_ERROR: + (f'Your PC cannot process this audio file with the segment size selected. Please lower the segment size and try again.\n\n{CONTACT_DEV}'), + DEMUCS_MODEL_MISSING_ERROR: + ('The selected Demucs model is missing. ' + + 'Please download the model or make sure it is in the correct directory.'), + ENSEMBLE_MISSING_MODEL_ERROR: + ('The application was unable to locate a model you selected for this ensemble.\n\n' + + 'Please do the following to use all compatible models:\n\n1. Navigate to the \"Updates\" tab in the Help Guide.\n2. Download and install the model expansion pack.\n3. Then try again.\n\n' + + 'If the error persists, please verify all models are present.'), + FFMPEG_MISSING_ERROR: + ('The input file type is not supported or FFmpeg is missing. Please select a file type supported by FFmpeg and try again. ' + + 'If FFmpeg is missing or not installed, you will only be able to process \".wav\" files until it is available on this system. ' + + f'See the \"More Info\" tab in the Help Guide.\n\n{CONTACT_DEV}'), + FILE_MISSING_ERROR: + (f'Missing file error raised. Please address the error and try again.\n\n{CONTACT_DEV}'), + MDX_MEMORY_ERROR: + ('The application was unable to allocate enough GPU memory to use this model.\n\n' + + 'Please do the following:\n\n1. Close any GPU intensive applications.\n2. Lower the set segment size.\n3. Then try again.\n\n' + + 'If the error persists, your GPU might not be supported.'), + MDX_MODEL_MISSING: + ('The application could not detect this MDX-Net model on your system. ' + + 'Please make sure all the models are present in the correct directory.\n\n' + + 'If the error persists, please reinstall application or contact the developers.'), + MDX_RUNTIME_ERROR: + ('The application was unable to allocate enough GPU memory to use this model.\n\n' + + 'Please do the following:\n\n1. Close any GPU intensive applications.\n2. Lower the set segment size.\n3. Then try again.\n\n' + + 'If the error persists, your GPU might not be supported.'), + WINDOW_SIZE_ERROR: + ('Invalid window size.\n\n' + + 'The chosen window size is likely not compatible with this model. Please select a different size and try again.'), + SF_WRITE_ERROR: + ('Could not write audio file.\n\n' + + 'This could be due to one of the following:\n\n1. Low storage on target device.\n2. The export directory no longer exists.\n3. A system permissions issue.'), + SYSTEM_MEMORY_ERROR: + ('The application was unable to allocate enough system memory to use this model.\n\n' + + 'Please do the following:\n\n1. Restart this application.\n2. Ensure any CPU intensive applications are closed.\n3. Then try again.\n\n' + + 'Please Note: Intel Pentium and Intel Celeron processors do not work well with this application.\n\n' + + 'If the error persists, the system may not have enough RAM, or your CPU might not be supported.'), + MISSING_MODEL_ERROR: + ('Model Missing: The application was unable to locate the chosen model.\n\n' + + 'If the error persists, please verify any selected models are present.'), + GPU_INCOMPATIBLE_ERROR: + ('This process is not compatible with your GPU.\n\n' + + 'Please uncheck \"GPU Conversion\" and try again'), + SELECT_CORRECT_GPU: + ('Make sure you\'ve chosen the correct GPU.\n\n' + 'Go to the "Settings Guide", click the "Additional Settings" tab and select the correct GPU device.'), + ARRAY_SIZE_ERROR: + ('The application was not able to process the given audiofile. Please convert the audiofile to another format and try again.'), +} + +def error_text(process_method, exception): + + traceback_text = ''.join(traceback.format_tb(exception.__traceback__)) + message = f'{type(exception).__name__}: "{exception}"\nTraceback Error: "\n{traceback_text}"\n' + error_message = f'\n\nRaw Error Details:\n\n{message}\nError Time Stamp [{datetime.now().strftime("%Y-%m-%d %H:%M:%S")}]\n' + process = f'Last Error Received:\n\nProcess: {process_method}\n\n' + + for error_type, full_text in ERROR_MAPPER.items(): + if error_type in message: + final_message = full_text + break + else: + final_message = (CONTACT_DEV) + + return f"{process}{final_message}{error_message}" + +def error_dialouge(exception): + + error_name = f'{type(exception).__name__}' + traceback_text = ''.join(traceback.format_tb(exception.__traceback__)) + message = f'{error_name}: "{exception}"\n{traceback_text}"' + + for error_type, full_text in ERROR_MAPPER.items(): + if error_type in message: + final_message = full_text + break + else: + final_message = (f'An Error Occurred: {error_name}\n\n{CONTACT_DEV}') + + return final_message diff --git a/gui_data/fail_chime.wav b/gui_data/fail_chime.wav new file mode 100644 index 0000000000000000000000000000000000000000..85b23b3110c10458612dc75f97556bfc5b4c89f7 Binary files /dev/null and b/gui_data/fail_chime.wav differ diff --git a/gui_data/fonts/Montserrat/Montserrat.ttf b/gui_data/fonts/Montserrat/Montserrat.ttf new file mode 100644 index 0000000000000000000000000000000000000000..2a2b2aaa67884c57c8e83921345ce852b7c7dbea Binary files /dev/null and b/gui_data/fonts/Montserrat/Montserrat.ttf differ diff --git a/gui_data/fonts/centurygothic/GOTHIC.ttf b/gui_data/fonts/centurygothic/GOTHIC.ttf new file mode 100644 index 0000000000000000000000000000000000000000..c60a324123eaaeab58c400c139bcabbc6d0d2a9f Binary files /dev/null and b/gui_data/fonts/centurygothic/GOTHIC.ttf differ diff --git a/gui_data/fonts/other/own_font_goes_here.txt b/gui_data/fonts/other/own_font_goes_here.txt new file mode 100644 index 0000000000000000000000000000000000000000..c227083464fb9af8955c90d2924774ee50abb547 --- /dev/null +++ b/gui_data/fonts/other/own_font_goes_here.txt @@ -0,0 +1 @@ +0 \ No newline at end of file diff --git a/gui_data/img/File.png b/gui_data/img/File.png new file mode 100644 index 0000000000000000000000000000000000000000..f0efc7d4547a3c85cae3630c2765bf4cf5c90af7 Binary files /dev/null and b/gui_data/img/File.png differ diff --git a/gui_data/img/GUI-Icon.ico b/gui_data/img/GUI-Icon.ico new file mode 100644 index 0000000000000000000000000000000000000000..c3169b948c581e16a70babeaf782079433d63ae5 Binary files /dev/null and b/gui_data/img/GUI-Icon.ico differ diff --git a/gui_data/img/GUI-Icon.png b/gui_data/img/GUI-Icon.png new file mode 100644 index 0000000000000000000000000000000000000000..bd90698c02f0b2d1f0667465f57190fb179e88d7 Binary files /dev/null and b/gui_data/img/GUI-Icon.png differ diff --git a/gui_data/img/UVR-banner.png b/gui_data/img/UVR-banner.png new file mode 100644 index 0000000000000000000000000000000000000000..a1eea2b7ceac6f50d421cf028f1dbdc60bf9541d Binary files /dev/null and b/gui_data/img/UVR-banner.png differ diff --git a/gui_data/img/UVR.icns b/gui_data/img/UVR.icns new file mode 100644 index 0000000000000000000000000000000000000000..458aa26deeb74ad6a25218021cef4a1a12fa6001 Binary files /dev/null and b/gui_data/img/UVR.icns differ diff --git a/gui_data/img/UVR_v5.6.png b/gui_data/img/UVR_v5.6.png new file mode 100644 index 0000000000000000000000000000000000000000..217176894e017b98f56ec22fa0f00e608d1628e7 Binary files /dev/null and b/gui_data/img/UVR_v5.6.png differ diff --git a/gui_data/img/clear.png b/gui_data/img/clear.png new file mode 100644 index 0000000000000000000000000000000000000000..901024df3e9725dfb32839676e061e10f2f5fce5 Binary files /dev/null and b/gui_data/img/clear.png differ diff --git a/gui_data/img/copy.png b/gui_data/img/copy.png new file mode 100644 index 0000000000000000000000000000000000000000..40438723cf602bac52e81fe918d9a0b481d720c7 Binary files /dev/null and b/gui_data/img/copy.png differ diff --git a/gui_data/img/credits.png b/gui_data/img/credits.png new file mode 100644 index 0000000000000000000000000000000000000000..f32a49465093d4217e9e6625151da7a34ed09684 Binary files /dev/null and b/gui_data/img/credits.png differ diff --git a/gui_data/img/donate.png b/gui_data/img/donate.png new file mode 100644 index 0000000000000000000000000000000000000000..75341dfbc8afaae6be9121ab3d1e6aecbde841fb Binary files /dev/null and b/gui_data/img/donate.png differ diff --git a/gui_data/img/down.png b/gui_data/img/down.png new file mode 100644 index 0000000000000000000000000000000000000000..56a900c7e77d713b796e273e7ba4bac14da5a511 Binary files /dev/null and b/gui_data/img/down.png differ diff --git a/gui_data/img/download.png b/gui_data/img/download.png new file mode 100644 index 0000000000000000000000000000000000000000..3e9e3f642ae1124bd8ffe4baf68cacebd827b507 Binary files /dev/null and b/gui_data/img/download.png differ diff --git a/gui_data/img/help.png b/gui_data/img/help.png new file mode 100644 index 0000000000000000000000000000000000000000..fb576bb21e10a263cccf77c413ae807e7e317bf7 Binary files /dev/null and b/gui_data/img/help.png differ diff --git a/gui_data/img/key.png b/gui_data/img/key.png new file mode 100644 index 0000000000000000000000000000000000000000..35a5a19520d7bf1f78d3e9f65fd61ff292b56490 Binary files /dev/null and b/gui_data/img/key.png differ diff --git a/gui_data/img/left.png b/gui_data/img/left.png new file mode 100644 index 0000000000000000000000000000000000000000..9dca0f7eacd3559f2553e18dc77948b8f5777a3a Binary files /dev/null and b/gui_data/img/left.png differ diff --git a/gui_data/img/pause.png b/gui_data/img/pause.png new file mode 100644 index 0000000000000000000000000000000000000000..819b46570f461c6402aef7ac51896ec5f15a7f28 Binary files /dev/null and b/gui_data/img/pause.png differ diff --git a/gui_data/img/play.png b/gui_data/img/play.png new file mode 100644 index 0000000000000000000000000000000000000000..d01b06a835795ffe7c0980930364108069b30614 Binary files /dev/null and b/gui_data/img/play.png differ diff --git a/gui_data/img/right.png b/gui_data/img/right.png new file mode 100644 index 0000000000000000000000000000000000000000..fc00c560ffc8d671566664338ee1a41152a9e16e Binary files /dev/null and b/gui_data/img/right.png differ diff --git a/gui_data/img/splash.bmp b/gui_data/img/splash.bmp new file mode 100644 index 0000000000000000000000000000000000000000..a86423ef339e4d17ee08aa142c1bbc016e2b98a5 Binary files /dev/null and b/gui_data/img/splash.bmp differ diff --git a/gui_data/img/stop.png b/gui_data/img/stop.png new file mode 100644 index 0000000000000000000000000000000000000000..e094d1b66cbf6a7777aae2cf2656ba1ca7272641 Binary files /dev/null and b/gui_data/img/stop.png differ diff --git a/gui_data/img/stop_player.png b/gui_data/img/stop_player.png new file mode 100644 index 0000000000000000000000000000000000000000..8eccf6bd82b0e2a4b9674ef9e4939da106a9a22b Binary files /dev/null and b/gui_data/img/stop_player.png differ diff --git a/gui_data/img/up.png b/gui_data/img/up.png new file mode 100644 index 0000000000000000000000000000000000000000..44fc7d8bd4db1d66dbbb171f85005002bc8260fe Binary files /dev/null and b/gui_data/img/up.png differ diff --git a/gui_data/model_manual_download.json b/gui_data/model_manual_download.json new file mode 100644 index 0000000000000000000000000000000000000000..3ae9be99ea002163dd49b1991425b75fcaad6807 --- /dev/null +++ b/gui_data/model_manual_download.json @@ -0,0 +1 @@ +{"current_version": "UVR_Patch_3_31_23_5_5", "current_version_mac": "UVR_Patch_3_31_23_5_5", "current_version_linux": "UVR_Patch_3_31_23_5_5", "vr_download_list": {"VR Arch Single Model v5: 1_HP-UVR": "1_HP-UVR.pth", "VR Arch Single Model v5: 2_HP-UVR": "2_HP-UVR.pth", "VR Arch Single Model v5: 3_HP-Vocal-UVR": "3_HP-Vocal-UVR.pth", "VR Arch Single Model v5: 4_HP-Vocal-UVR": "4_HP-Vocal-UVR.pth", "VR Arch Single Model v5: 5_HP-Karaoke-UVR": "5_HP-Karaoke-UVR.pth", "VR Arch Single Model v5: 6_HP-Karaoke-UVR": "6_HP-Karaoke-UVR.pth", "VR Arch Single Model v5: 7_HP2-UVR": "7_HP2-UVR.pth", "VR Arch Single Model v5: 8_HP2-UVR": "8_HP2-UVR.pth", "VR Arch Single Model v5: 9_HP2-UVR": "9_HP2-UVR.pth", "VR Arch Single Model v5: 10_SP-UVR-2B-32000-1": "10_SP-UVR-2B-32000-1.pth", "VR Arch Single Model v5: 11_SP-UVR-2B-32000-2": "11_SP-UVR-2B-32000-2.pth", "VR Arch Single Model v5: 12_SP-UVR-3B-44100": "12_SP-UVR-3B-44100.pth", "VR Arch Single Model v5: 13_SP-UVR-4B-44100-1": "13_SP-UVR-4B-44100-1.pth", "VR Arch Single Model v5: 14_SP-UVR-4B-44100-2": "14_SP-UVR-4B-44100-2.pth", "VR Arch Single Model v5: 15_SP-UVR-MID-44100-1": "15_SP-UVR-MID-44100-1.pth", "VR Arch Single Model v5: 16_SP-UVR-MID-44100-2": "16_SP-UVR-MID-44100-2.pth", "VR Arch Single Model v5: 17_HP-Wind_Inst-UVR": "17_HP-Wind_Inst-UVR.pth", "VR Arch Single Model v5: UVR-De-Echo-Aggressive by FoxJoy": "UVR-De-Echo-Aggressive.pth", "VR Arch Single Model v5: UVR-De-Echo-Normal by FoxJoy": "UVR-De-Echo-Normal.pth", "VR Arch Single Model v5: UVR-DeEcho-DeReverb by FoxJoy": "UVR-DeEcho-DeReverb.pth", "VR Arch Single Model v5: UVR-DeNoise-Lite by FoxJoy": "UVR-DeNoise-Lite.pth", "VR Arch Single Model v5: UVR-DeNoise by FoxJoy": "UVR-DeNoise.pth", "VR Arch Single Model v5: UVR-BVE-4B_SN-44100-1": "UVR-BVE-4B_SN-44100-1.pth", "VR Arch Single Model v4: MGM_HIGHEND_v4": "MGM_HIGHEND_v4.pth", "VR Arch Single Model v4: MGM_LOWEND_A_v4": "MGM_LOWEND_A_v4.pth", "VR Arch Single Model v4: MGM_LOWEND_B_v4": "MGM_LOWEND_B_v4.pth", "VR Arch Single Model v4: MGM_MAIN_v4": "MGM_MAIN_v4.pth"}, "mdx_download_list": {"MDX-Net Model: UVR-MDX-NET Inst HQ 1": "UVR-MDX-NET-Inst_HQ_1.onnx", "MDX-Net Model: UVR-MDX-NET Inst HQ 2": "UVR-MDX-NET-Inst_HQ_2.onnx", "MDX-Net Model: UVR-MDX-NET Inst HQ 3": "UVR-MDX-NET-Inst_HQ_3.onnx", "MDX-Net Model: UVR-MDX-NET Main": "UVR_MDXNET_Main.onnx", "MDX-Net Model: UVR-MDX-NET Inst Main": "UVR-MDX-NET-Inst_Main.onnx", "MDX-Net Model: UVR-MDX-NET 1": "UVR_MDXNET_1_9703.onnx", "MDX-Net Model: UVR-MDX-NET 2": "UVR_MDXNET_2_9682.onnx", "MDX-Net Model: UVR-MDX-NET 3": "UVR_MDXNET_3_9662.onnx", "MDX-Net Model: UVR-MDX-NET Inst 1": "UVR-MDX-NET-Inst_1.onnx", "MDX-Net Model: UVR-MDX-NET Inst 2": "UVR-MDX-NET-Inst_2.onnx", "MDX-Net Model: UVR-MDX-NET Inst 3": "UVR-MDX-NET-Inst_3.onnx", "MDX-Net Model: UVR-MDX-NET Karaoke": "UVR_MDXNET_KARA.onnx", "MDX-Net Model: UVR-MDX-NET Karaoke 2": "UVR_MDXNET_KARA_2.onnx", "MDX-Net Model: UVR_MDXNET_9482": "UVR_MDXNET_9482.onnx", "MDX-Net Model: UVR-MDX-NET Voc FT": "UVR-MDX-NET-Voc_FT.onnx", "MDX-Net Model: Kim Vocal 1": "Kim_Vocal_1.onnx", "MDX-Net Model: Kim Vocal 2": "Kim_Vocal_2.onnx", "MDX-Net Model: Kim Inst": "Kim_Inst.onnx", "MDX-Net Model: Reverb HQ By FoxJoy": "Reverb_HQ_By_FoxJoy.onnx", "MDX-Net Model: kuielab_a_vocals": "kuielab_a_vocals.onnx", "MDX-Net Model: kuielab_a_other": "kuielab_a_other.onnx", "MDX-Net Model: kuielab_a_bass": "kuielab_a_bass.onnx", "MDX-Net Model: kuielab_a_drums": "kuielab_a_drums.onnx", "MDX-Net Model: kuielab_b_vocals": "kuielab_b_vocals.onnx", "MDX-Net Model: kuielab_b_other": "kuielab_b_other.onnx", "MDX-Net Model: kuielab_b_bass": "kuielab_b_bass.onnx", "MDX-Net Model: kuielab_b_drums": "kuielab_b_drums.onnx"}, "demucs_download_list": {"Demucs v4: htdemucs_ft": {"f7e0c4bc-ba3fe64a.th": "https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/f7e0c4bc-ba3fe64a.th", "d12395a8-e57c48e6.th": "https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/d12395a8-e57c48e6.th", "92cfc3b6-ef3bcb9c.th": "https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/92cfc3b6-ef3bcb9c.th", "04573f0d-f3cf25b2.th": "https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/04573f0d-f3cf25b2.th", "htdemucs_ft.yaml": "https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/htdemucs_ft.yaml"}, "Demucs v4: htdemucs": {"955717e8-8726e21a.th": "https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/955717e8-8726e21a.th", "htdemucs.yaml": "https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/htdemucs.yaml"}, "Demucs v4: hdemucs_mmi": {"75fc33f5-1941ce65.th": "https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/75fc33f5-1941ce65.th", "hdemucs_mmi.yaml": "https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/hdemucs_mmi.yaml"}, "Demucs v4: htdemucs_6s": {"5c90dfd2-34c22ccb.th": "https://dl.fbaipublicfiles.com/demucs/hybrid_transformer/5c90dfd2-34c22ccb.th", "htdemucs_6s.yaml": "https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/htdemucs_6s.yaml"}, "Demucs v3: mdx": {"0d19c1c6-0f06f20e.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/0d19c1c6-0f06f20e.th", "7ecf8ec1-70f50cc9.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/7ecf8ec1-70f50cc9.th", "c511e2ab-fe698775.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/c511e2ab-fe698775.th", "7d865c68-3d5dd56b.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/7d865c68-3d5dd56b.th", "mdx.yaml": "https://raw.githubusercontent.com/facebookresearch/demucs/main/demucs/remote/mdx.yaml"}, "Demucs v3: mdx_q": {"6b9c2ca1-3fd82607.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/6b9c2ca1-3fd82607.th", "b72baf4e-8778635e.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/b72baf4e-8778635e.th", "42e558d4-196e0e1b.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/42e558d4-196e0e1b.th", "305bc58f-18378783.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/305bc58f-18378783.th", "mdx_q.yaml": "https://raw.githubusercontent.com/facebookresearch/demucs/main/demucs/remote/mdx_q.yaml"}, "Demucs v3: mdx_extra": {"e51eebcc-c1b80bdd.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/e51eebcc-c1b80bdd.th", "a1d90b5c-ae9d2452.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/a1d90b5c-ae9d2452.th", "5d2d6c55-db83574e.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/5d2d6c55-db83574e.th", "cfa93e08-61801ae1.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/cfa93e08-61801ae1.th", "mdx_extra.yaml": "https://raw.githubusercontent.com/facebookresearch/demucs/main/demucs/remote/mdx_extra.yaml"}, "Demucs v3: mdx_extra_q": {"83fc094f-4a16d450.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/83fc094f-4a16d450.th", "464b36d7-e5a9386e.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/464b36d7-e5a9386e.th", "14fc6a69-a89dd0ee.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/14fc6a69-a89dd0ee.th", "7fd6ef75-a905dd85.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/7fd6ef75-a905dd85.th", "mdx_extra_q.yaml": "https://raw.githubusercontent.com/facebookresearch/demucs/main/demucs/remote/mdx_extra_q.yaml"}, "Demucs v3: UVR Model": {"ebf34a2db.th": "https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/ebf34a2db.th", "UVR_Demucs_Model_1.yaml": "https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/UVR_Demucs_Model_1.yaml"}, "Demucs v3: repro_mdx_a": {"9a6b4851-03af0aa6.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/9a6b4851-03af0aa6.th", "1ef250f1-592467ce.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/1ef250f1-592467ce.th", "fa0cb7f9-100d8bf4.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/fa0cb7f9-100d8bf4.th", "902315c2-b39ce9c9.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/902315c2-b39ce9c9.th", "repro_mdx_a.yaml": "https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/repro_mdx_a.yaml"}, "Demucs v3: repro_mdx_a_time_only": {"9a6b4851-03af0aa6.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/9a6b4851-03af0aa6.th", "1ef250f1-592467ce.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/1ef250f1-592467ce.th", "repro_mdx_a_time_only.yaml": "https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/repro_mdx_a_time_only.yaml"}, "Demucs v3: repro_mdx_a_hybrid_only": {"fa0cb7f9-100d8bf4.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/fa0cb7f9-100d8bf4.th", "902315c2-b39ce9c9.th": "https://dl.fbaipublicfiles.com/demucs/mdx_final/902315c2-b39ce9c9.th", "repro_mdx_a_hybrid_only.yaml": "https://github.com/TRvlvr/model_repo/releases/download/all_public_uvr_models/repro_mdx_a_hybrid_only.yaml"}, "Demucs v2: demucs": {"demucs-e07c671f.th": "https://dl.fbaipublicfiles.com/demucs/v3.0/demucs-e07c671f.th"}, "Demucs v2: demucs_extra": {"demucs_extra-3646af93.th": "https://dl.fbaipublicfiles.com/demucs/v3.0/demucs_extra-3646af93.th"}, "Demucs v2: demucs48_hq": {"demucs48_hq-28a1282c.th": "https://dl.fbaipublicfiles.com/demucs/v3.0/demucs48_hq-28a1282c.th"}, "Demucs v2: tasnet": {"tasnet-beb46fac.th": "https://dl.fbaipublicfiles.com/demucs/v3.0/tasnet-beb46fac.th"}, "Demucs v2: tasnet_extra": {"tasnet_extra-df3777b2.th": "https://dl.fbaipublicfiles.com/demucs/v3.0/tasnet_extra-df3777b2.th"}, "Demucs v2: demucs_unittest": {"demucs_unittest-09ebc15f.th": "https://dl.fbaipublicfiles.com/demucs/v3.0/demucs_unittest-09ebc15f.th"}, "Demucs v1: demucs": {"demucs.th": "https://dl.fbaipublicfiles.com/demucs/v2.0/demucs.th"}, "Demucs v1: demucs_extra": {"demucs_extra.th": "https://dl.fbaipublicfiles.com/demucs/v2.0/demucs_extra.th"}, "Demucs v1: light": {"light.th": "https://dl.fbaipublicfiles.com/demucs/v2.0/light.th"}, "Demucs v1: light_extra": {"light_extra.th": "https://dl.fbaipublicfiles.com/demucs/v2.0/light_extra.th"}, "Demucs v1: tasnet": {"tasnet.th": "https://dl.fbaipublicfiles.com/demucs/v2.0/tasnet.th"}, "Demucs v1: tasnet_extra": {"tasnet_extra.th": "https://dl.fbaipublicfiles.com/demucs/v2.0/tasnet_extra.th"}}, "mdx_download_vip_list": {"MDX-Net Model VIP: UVR-MDX-NET_Main_340": "UVR-MDX-NET_Main_340.onnx", "MDX-Net Model VIP: UVR-MDX-NET_Main_390": "UVR-MDX-NET_Main_390.onnx", "MDX-Net Model VIP: UVR-MDX-NET_Main_406": "UVR-MDX-NET_Main_406.onnx", "MDX-Net Model VIP: UVR-MDX-NET_Main_427": "UVR-MDX-NET_Main_427.onnx", "MDX-Net Model VIP: UVR-MDX-NET_Main_438": "UVR-MDX-NET_Main_438.onnx", "MDX-Net Model VIP: UVR-MDX-NET_Inst_82_beta": "UVR-MDX-NET_Inst_82_beta.onnx", "MDX-Net Model VIP: UVR-MDX-NET_Inst_90_beta": "UVR-MDX-NET_Inst_90_beta.onnx", "MDX-Net Model VIP: UVR-MDX-NET_Inst_187_beta": "UVR-MDX-NET_Inst_187_beta.onnx", "MDX-Net Model VIP: UVR-MDX-NET-Inst_full_292": "UVR-MDX-NET-Inst_full_292.onnx"}, "mdx23_download_list": {"MDX23 Model: MDX23C_D1581": {"MDX23C_D1581.ckpt": "model_2_stem_061321.yaml"}}, "vr_download_vip_list": [], "demucs_download_vip_list": []} \ No newline at end of file diff --git a/gui_data/old_data_check.py b/gui_data/old_data_check.py new file mode 100644 index 0000000000000000000000000000000000000000..8c69cb2ba6d09a71047786df2f5b6dd689127323 --- /dev/null +++ b/gui_data/old_data_check.py @@ -0,0 +1,27 @@ +import os +import shutil + +def file_check(original_dir, new_dir): + + if os.path.isdir(original_dir): + for file in os.listdir(original_dir): + shutil.move(os.path.join(original_dir, file), os.path.join(new_dir, file)) + + if len(os.listdir(original_dir)) == 0: + shutil.rmtree(original_dir) + +def remove_unneeded_yamls(demucs_dir): + + for file in os.listdir(demucs_dir): + if file.endswith('.yaml'): + if os.path.isfile(os.path.join(demucs_dir, file)): + os.remove(os.path.join(demucs_dir, file)) + +def remove_temps(remove_dir): + + if os.path.isdir(remove_dir): + try: + shutil.rmtree(remove_dir) + except Exception as e: + print(e) + \ No newline at end of file diff --git a/gui_data/own_font.json b/gui_data/own_font.json new file mode 100644 index 0000000000000000000000000000000000000000..dd81c3664123fbba15ddc16752a96b8b3da54945 --- /dev/null +++ b/gui_data/own_font.json @@ -0,0 +1,4 @@ +{ + "font_name": null, + "font_file": null +} \ No newline at end of file diff --git a/gui_data/saved_ensembles/inst_ensemble.json b/gui_data/saved_ensembles/inst_ensemble.json new file mode 100644 index 0000000000000000000000000000000000000000..0bca9062a589345b07c231c5ecd0cb4e4f801b86 --- /dev/null +++ b/gui_data/saved_ensembles/inst_ensemble.json @@ -0,0 +1,11 @@ +{ + "ensemble_main_stem": "Vocals/Instrumental", + "ensemble_type": "Average/Average", + "selected_models": [ + "VR Arc: 7_HP2-UVR", + "MDX-Net: UVR-MDX-NET Inst 3", + "MDX-Net: UVR-MDX-NET Inst HQ 3", + "MDX-Net: UVR-MDX-NET Inst Main", + "Demucs: v4 | htdemucs_ft" + ] +} \ No newline at end of file diff --git a/gui_data/saved_ensembles/saved_ensembles_go_here.txt b/gui_data/saved_ensembles/saved_ensembles_go_here.txt new file mode 100644 index 0000000000000000000000000000000000000000..92b9d07ac6daa72cd49ac47d56d7eba768008a1c --- /dev/null +++ b/gui_data/saved_ensembles/saved_ensembles_go_here.txt @@ -0,0 +1 @@ +saved_ensembles_go_here \ No newline at end of file diff --git a/gui_data/saved_settings/saved_settings_go_here.txt b/gui_data/saved_settings/saved_settings_go_here.txt new file mode 100644 index 0000000000000000000000000000000000000000..b2138571a522140636df40b497456204e20db480 --- /dev/null +++ b/gui_data/saved_settings/saved_settings_go_here.txt @@ -0,0 +1 @@ +saved_settings_go_here \ No newline at end of file diff --git a/gui_data/sv_ttk/__init__.py b/gui_data/sv_ttk/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8d62495fc04f0beee3326cef97723283d7875c91 --- /dev/null +++ b/gui_data/sv_ttk/__init__.py @@ -0,0 +1,69 @@ +from pathlib import Path +import platform + +if platform.system() == "Darwin": + sun_valley_tcl = "sun-valley_darwin.tcl" +else: + sun_valley_tcl = "sun-valley.tcl" + +inited = False +root = None + + +def init(func): + def wrapper(*args, **kwargs): + global inited + global root + + if not inited: + from tkinter import _default_root + + path = (Path(__file__).parent / sun_valley_tcl).resolve() + + try: + _default_root.tk.call("source", str(path)) + except AttributeError: + raise RuntimeError( + "can't set theme. " + "Tk is not initialized. " + "Please first create a tkinter.Tk instance, then set the theme." + ) from None + else: + inited = True + root = _default_root + + return func(*args, **kwargs) + + return wrapper + + +@init +def set_theme(theme, font_name="Century Gothic", f_size=10, fg_color_set="#F6F6F7"): + if theme not in {"dark", "light"}: + raise RuntimeError(f"not a valid theme name: {theme}") + + root.globalsetvar("fontName", (font_name, f_size)) + root.globalsetvar("fgcolorset", (fg_color_set)) + root.tk.call("set_theme", theme) + + +@init +def get_theme(): + theme = root.tk.call("ttk::style", "theme", "use") + + try: + return {"sun-valley-dark": "dark", "sun-valley-light": "light"}[theme] + except KeyError: + return theme + + +@init +def toggle_theme(): + if get_theme() == "dark": + use_light_theme() + else: + use_dark_theme() + + +use_dark_theme = lambda: set_theme("dark") +use_light_theme = lambda: set_theme("light") diff --git a/gui_data/sv_ttk/__pycache__/__init__.cpython-310.pyc b/gui_data/sv_ttk/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7d15ae0c1f3e743968aa223dfcf433c6028744b Binary files /dev/null and b/gui_data/sv_ttk/__pycache__/__init__.cpython-310.pyc differ diff --git a/gui_data/sv_ttk/sun-valley.tcl b/gui_data/sv_ttk/sun-valley.tcl new file mode 100644 index 0000000000000000000000000000000000000000..094ed13bb3818054e40af08712fb03638c820c7a --- /dev/null +++ b/gui_data/sv_ttk/sun-valley.tcl @@ -0,0 +1,52 @@ +# Copyright © 2021 rdbende + +source [file join [file dirname [info script]] theme dark.tcl] + +option add *tearOff 0 + +proc set_theme {mode} { + if {$mode == "dark"} { + ttk::style theme use "sun-valley-dark" + + set fontString "$::fontName" + set fgSet "$::fgcolorset" + + array set colors { + -fg "#F6F6F7" + -bg "#0e0e0f" + -disabledfg "#F6F6F7" + -selectfg "#F6F6F7" + -selectbg "#003b50" + } + + ttk::style configure . \ + -background $colors(-bg) \ + -foreground $fgSet \ + -troughcolor $colors(-bg) \ + -focuscolor $colors(-selectbg) \ + -selectbackground $colors(-selectbg) \ + -selectforeground $colors(-selectfg) \ + -insertwidth 0 \ + -insertcolor $colors(-fg) \ + -fieldbackground $colors(-selectbg) \ + -font $fontString \ + -borderwidth 0 \ + -relief flat + + tk_setPalette \ + background [ttk::style lookup . -background] \ + foreground [ttk::style lookup . -foreground] \ + highlightColor [ttk::style lookup . -focuscolor] \ + selectBackground [ttk::style lookup . -selectbackground] \ + selectForeground [ttk::style lookup . -selectforeground] \ + activeBackground [ttk::style lookup . -selectbackground] \ + activeForeground [ttk::style lookup . -selectforeground] + + ttk::style map . -foreground [list disabled $colors(-disabledfg)] + + option add *font [ttk::style lookup . -font] + option add *Menu.selectcolor $colors(-fg) + option add *Menu.background #0e0e0f + + } +} diff --git a/gui_data/sv_ttk/sun-valley_darwin.tcl b/gui_data/sv_ttk/sun-valley_darwin.tcl new file mode 100644 index 0000000000000000000000000000000000000000..f76c5350ec64d605f4acda14075e9fc9497fa311 --- /dev/null +++ b/gui_data/sv_ttk/sun-valley_darwin.tcl @@ -0,0 +1,49 @@ +# Copyright © 2021 rdbende + +source [file join [file dirname [info script]] theme dark.tcl] + +option add *tearOff 0 + +proc set_theme {mode} { + if {$mode == "dark"} { + ttk::style theme use "sun-valley-dark" + + array set colors { + -fg "#F6F6F7" + -bg "#0e0e0f" + -disabledfg "#F6F6F7" + -selectfg "#F6F6F7" + -selectbg "#003b50" + } + + ttk::style configure . \ + -background $colors(-bg) \ + -foreground $colors(-fg) \ + -troughcolor $colors(-bg) \ + -focuscolor $colors(-selectbg) \ + -selectbackground $colors(-selectbg) \ + -selectforeground $colors(-selectfg) \ + -insertwidth 0 \ + -insertcolor $colors(-fg) \ + -fieldbackground $colors(-selectbg) \ + -font {"Century Gothic" 13} \ + -borderwidth 0 \ + -relief flat + + tk_setPalette \ + background [ttk::style lookup . -background] \ + foreground [ttk::style lookup . -foreground] \ + highlightColor [ttk::style lookup . -focuscolor] \ + selectBackground [ttk::style lookup . -selectbackground] \ + selectForeground [ttk::style lookup . -selectforeground] \ + activeBackground [ttk::style lookup . -selectbackground] \ + activeForeground [ttk::style lookup . -selectforeground] + + ttk::style map . -foreground [list disabled $colors(-disabledfg)] + + option add *font [ttk::style lookup . -font] + option add *Menu.selectcolor $colors(-fg) + option add *Menu.background #0e0e0f + + } +} diff --git a/gui_data/sv_ttk/theme/dark.tcl b/gui_data/sv_ttk/theme/dark.tcl new file mode 100644 index 0000000000000000000000000000000000000000..cd6e38813f4c32905a2073ec0d4118edaaaf5f23 --- /dev/null +++ b/gui_data/sv_ttk/theme/dark.tcl @@ -0,0 +1,563 @@ +# Copyright © 2021 rdbende + +# A stunning dark theme for ttk based on Microsoft's Sun Valley visual style + +package require Tk 8.6 + +namespace eval ttk::theme::sun-valley-dark { + variable version 1.0 + package provide ttk::theme::sun-valley-dark $version + + ttk::style theme create sun-valley-dark -parent clam -settings { + proc load_images {imgdir} { + variable images + foreach file [glob -directory $imgdir *.png] { + set images([file tail [file rootname $file]]) \ + [image create photo -file $file -format png] + } + } + + load_images [file join [file dirname [info script]] dark] + + array set colors { + -fg "#F6F6F7" + -bg "#0e0e0f" + -disabledfg "#F6F6F7" + -selectfg "#ffffff" + -selectbg "#2f60d8" + } + + ttk::style layout TButton { + Button.button -children { + Button.padding -children { + Button.label -side left -expand 1 + } + } + } + + ttk::style layout Toolbutton { + Toolbutton.button -children { + Toolbutton.padding -children { + Toolbutton.label -side left -expand 1 + } + } + } + + ttk::style layout TMenubutton { + Menubutton.button -children { + Menubutton.padding -children { + Menubutton.label -side left -expand 1 + Menubutton.indicator -side right -sticky nsew + } + } + } + + ttk::style layout TOptionMenu { + OptionMenu.button -children { + OptionMenu.padding -children { + OptionMenu.label -side left -expand 0 + OptionMenu.indicator -side right -sticky nsew + } + } + } + + ttk::style layout Accent.TButton { + AccentButton.button -children { + AccentButton.padding -children { + AccentButton.label -side left -expand 1 + } + } + } + + ttk::style layout Titlebar.TButton { + TitlebarButton.button -children { + TitlebarButton.padding -children { + TitlebarButton.label -side left -expand 1 + } + } + } + + ttk::style layout Close.Titlebar.TButton { + CloseButton.button -children { + CloseButton.padding -children { + CloseButton.label -side left -expand 1 + } + } + } + + ttk::style layout TCheckbutton { + Checkbutton.button -children { + Checkbutton.padding -children { + Checkbutton.indicator -side left + Checkbutton.label -side right -expand 1 + } + } + } + + ttk::style layout Switch.TCheckbutton { + Switch.button -children { + Switch.padding -children { + Switch.indicator -side left + Switch.label -side right -expand 1 + } + } + } + + ttk::style layout Toggle.TButton { + ToggleButton.button -children { + ToggleButton.padding -children { + ToggleButton.label -side left -expand 1 + } + } + } + + ttk::style layout TRadiobutton { + Radiobutton.button -children { + Radiobutton.padding -children { + Radiobutton.indicator -side left + Radiobutton.label -side right -expand 1 + } + } + } + + ttk::style layout Vertical.TScrollbar { + Vertical.Scrollbar.trough -sticky ns -children { + Vertical.Scrollbar.uparrow -side top + Vertical.Scrollbar.downarrow -side bottom + Vertical.Scrollbar.thumb -expand 1 + } + } + + ttk::style layout Horizontal.TScrollbar { + Horizontal.Scrollbar.trough -sticky ew -children { + Horizontal.Scrollbar.leftarrow -side left + Horizontal.Scrollbar.rightarrow -side right + Horizontal.Scrollbar.thumb -expand 1 + } + } + + ttk::style layout TSeparator { + TSeparator.separator -sticky nsew + } + + + # # Modify the TCombobox style + # ttk::style configure TCombobox -borderwidth 3 + + # # Define the layout of the ThickBorder.TCombobox + # ttk::style layout ThickBorder.TCombobox { + # Combobox.field -sticky nsew -children { + # Combobox.padding -expand 1 -sticky nsew -children { + # Combobox.textarea -sticky nsew + # } + # } + # null -side right -sticky ns -children { + # Combobox.arrow -sticky nsew + # } + # } + + # # Use a canvas as the parent of the combobox and create a custom border + # canvas .c -width 200 -height 30 -highlightthickness 0 + # canvas .c create rectangle 2 2 198 28 -width 3 -outline black + # pack .c + # ttk::combobox .c.cbox -values {"Option 1" "Option 2" "Option 3"} -style ThickBorder.TCombobox + # .c create window 100 15 -window .c.cbox + + ttk::style layout TCombobox { + Combobox.field -sticky nsew -children { + Combobox.padding -expand 1 -sticky nsew -children { + Combobox.textarea -sticky nsew + } + } + null -side right -sticky ns -children { + Combobox.arrow -sticky nsew + } + } + + ttk::style layout TSpinbox { + Spinbox.field -sticky nsew -children { + Spinbox.padding -expand 1 -sticky nsew -children { + Spinbox.textarea -sticky nsew + } + + } + null -side right -sticky nsew -children { + Spinbox.uparrow -side left -sticky nsew + Spinbox.downarrow -side right -sticky nsew + } + } + + ttk::style layout Card.TFrame { + Card.field { + Card.padding -expand 1 + } + } + + ttk::style layout TLabelframe { + Labelframe.border { + Labelframe.padding -expand 1 -children { + Labelframe.label -side left + } + } + } + + ttk::style layout TNotebook { + Notebook.border -children { + TNotebook.Tab -expand 1 + Notebook.client -sticky nsew + } + } + + ttk::style layout Treeview.Item { + Treeitem.padding -sticky nsew -children { + Treeitem.image -side left -sticky {} + Treeitem.indicator -side left -sticky {} + Treeitem.text -side left -sticky {} + } + } + + # Button + ttk::style configure TButton -padding {8 4} -anchor center -foreground $colors(-fg) + + ttk::style map TButton -foreground \ + [list disabled #7a7a7a \ + pressed #d0d0d0] + + ttk::style element create Button.button image \ + [list $images(button-rest) \ + {selected disabled} $images(button-disabled) \ + disabled $images(button-disabled) \ + selected $images(button-rest) \ + pressed $images(button-pressed) \ + active $images(button-hover) \ + ] -border 4 -sticky nsew + + # Toolbutton + ttk::style configure Toolbutton -padding {8 4} -anchor center + + ttk::style element create Toolbutton.button image \ + [list $images(empty) \ + {selected disabled} $images(button-disabled) \ + selected $images(button-rest) \ + pressed $images(button-pressed) \ + active $images(button-hover) \ + ] -border 4 -sticky nsew + + # Menubutton + ttk::style configure TMenubutton -padding {8 4 0 4} + + ttk::style element create Menubutton.button \ + image [list $images(button-rest) \ + disabled $images(button-disabled) \ + pressed $images(button-pressed) \ + active $images(button-hover) \ + ] -border 4 -sticky nsew + + ttk::style element create Menubutton.indicator image $images(arrow-down) -width 28 -sticky {} + + # OptionMenu + ttk::style configure TOptionMenu -padding {8 4 0 4} + ttk::style configure OptionMenudropdown -borderwidth 0 -relief ridge + + ttk::style element create OptionMenu.button \ + image [list $images(button-rest) \ + disabled $images(button-disabled) \ + pressed $images(button-pressed) \ + active $images(button-hover) \ + ] -border 0 -sticky nsew + + ttk::style element create OptionMenu.indicator image $images(arrow-down) -width 28 -sticky {} + + # Accent.TButton + ttk::style configure Accent.TButton -padding {8 4} -anchor center -foreground #ffffff + + ttk::style map Accent.TButton -foreground \ + [list pressed #25536a \ + disabled #a5a5a5] + + ttk::style element create AccentButton.button image \ + [list $images(button-accent-rest) \ + {selected disabled} $images(button-accent-disabled) \ + disabled $images(button-accent-disabled) \ + selected $images(button-accent-rest) \ + pressed $images(button-accent-pressed) \ + active $images(button-accent-hover) \ + ] -border 4 -sticky nsew + + # Titlebar.TButton + ttk::style configure Titlebar.TButton -padding {8 4} -anchor center -foreground #ffffff + + ttk::style map Titlebar.TButton -foreground \ + [list disabled #6f6f6f \ + pressed #d1d1d1 \ + active #ffffff] + + ttk::style element create TitlebarButton.button image \ + [list $images(empty) \ + disabled $images(empty) \ + pressed $images(button-titlebar-pressed) \ + active $images(button-titlebar-hover) \ + ] -border 4 -sticky nsew + + # Close.Titlebar.TButton + ttk::style configure Close.Titlebar.TButton -padding {8 4} -anchor center -foreground #ffffff + + ttk::style map Close.Titlebar.TButton -foreground \ + [list disabled #6f6f6f \ + pressed #e8bfbb \ + active #ffffff] + + ttk::style element create CloseButton.button image \ + [list $images(empty) \ + disabled $images(empty) \ + pressed $images(button-close-pressed) \ + active $images(button-close-hover) \ + ] -border 4 -sticky nsew + + # Checkbutton + ttk::style configure TCheckbutton -padding 2 + + ttk::style element create Checkbutton.indicator image \ + [list $images(check-unsel-rest) \ + {alternate disabled} $images(check-tri-disabled) \ + {selected disabled} $images(check-disabled) \ + disabled $images(check-unsel-disabled) \ + {pressed alternate} $images(check-tri-hover) \ + {active alternate} $images(check-tri-hover) \ + alternate $images(check-tri-rest) \ + {pressed selected} $images(check-hover) \ + {active selected} $images(check-hover) \ + selected $images(check-rest) \ + {pressed !selected} $images(check-unsel-pressed) \ + active $images(check-unsel-hover) \ + ] -width 26 -sticky w + + # Switch.TCheckbutton + ttk::style element create Switch.indicator image \ + [list $images(switch-off-rest) \ + {selected disabled} $images(switch-on-disabled) \ + disabled $images(switch-off-disabled) \ + {pressed selected} $images(switch-on-pressed) \ + {active selected} $images(switch-on-hover) \ + selected $images(switch-on-rest) \ + {pressed !selected} $images(switch-off-pressed) \ + active $images(switch-off-hover) \ + ] -width 46 -sticky w + + # Toggle.TButton + ttk::style configure Toggle.TButton -padding {8 4 8 4} -anchor center -foreground $colors(-fg) + + ttk::style map Toggle.TButton -foreground \ + [list {selected disabled} #a5a5a5 \ + {selected pressed} #d0d0d0 \ + selected #ffffff \ + pressed #25536a \ + disabled #7a7a7a + ] + + + ttk::style element create ToggleButton.button image \ + [list $images(button-rest) \ + {selected disabled} $images(button-accent-disabled) \ + disabled $images(button-disabled) \ + {pressed selected} $images(button-rest) \ + {active selected} $images(button-accent-hover) \ + selected $images(button-accent-rest) \ + {pressed !selected} $images(button-accent-rest) \ + active $images(button-hover) \ + ] -border 4 -sticky nsew + + # Radiobutton + ttk::style configure TRadiobutton -padding 0 + + ttk::style element create Radiobutton.indicator image \ + [list $images(radio-unsel-rest) \ + {selected disabled} $images(radio-disabled) \ + disabled $images(radio-unsel-disabled) \ + {pressed selected} $images(radio-pressed) \ + {active selected} $images(radio-hover) \ + selected $images(radio-rest) \ + {pressed !selected} $images(radio-unsel-pressed) \ + active $images(radio-unsel-hover) \ + ] -width 20 -sticky w + + ttk::style configure Menu.TRadiobutton -padding 0 + + ttk::style element create Menu.Radiobutton.indicator image \ + [list $images(radio-unsel-rest) \ + {selected disabled} $images(radio-disabled) \ + disabled $images(radio-unsel-disabled) \ + {pressed selected} $images(radio-pressed) \ + {active selected} $images(radio-hover) \ + selected $images(radio-rest) \ + {pressed !selected} $images(radio-unsel-pressed) \ + active $images(radio-unsel-hover) \ + ] -width 20 -sticky w + + # Scrollbar + + #ttk::style layout Vertical.TScrollbar + + ttk::style element create Horizontal.Scrollbar.trough image $images(scroll-hor-trough) -sticky ew -border 0 + ttk::style element create Horizontal.Scrollbar.thumb image $images(scroll-hor-thumb) -sticky ew -border 3 + + ttk::style element create Horizontal.Scrollbar.rightarrow image $images(scroll-right) -sticky {} -width 13 + ttk::style element create Horizontal.Scrollbar.leftarrow image $images(scroll-left) -sticky {} -width 13 + + ttk::style element create Vertical.Scrollbar.trough image $images(scroll-vert-trough) -sticky ns -border 0 + ttk::style element create Vertical.Scrollbar.thumb image $images(scroll-vert-thumb) -sticky ns -border 3 + + ttk::style element create Vertical.Scrollbar.uparrow image $images(scroll-up) -sticky {} -height 13 + ttk::style element create Vertical.Scrollbar.downarrow image $images(scroll-down) -sticky {} -height 13 + + # Scale + ttk::style element create Horizontal.Scale.trough image $images(scale-trough-hor) \ + -border 5 -padding 0 + + ttk::style element create Vertical.Scale.trough image $images(scale-trough-vert) \ + -border 5 -padding 0 + + ttk::style element create Scale.slider \ + image [list $images(scale-thumb-rest) \ + disabled $images(scale-thumb-disabled) \ + pressed $images(scale-thumb-pressed) \ + active $images(scale-thumb-hover) \ + ] -sticky {} + + # Progressbar + ttk::style element create Horizontal.Progressbar.trough image $images(progress-trough-hor) \ + -border 1 -sticky ew + + ttk::style element create Horizontal.Progressbar.pbar image $images(progress-pbar-hor) \ + -border 2 -sticky ew + + ttk::style element create Vertical.Progressbar.trough image $images(progress-trough-vert) \ + -border 1 -sticky ns + + ttk::style element create Vertical.Progressbar.pbar image $images(progress-pbar-vert) \ + -border 2 -sticky ns + + # Entry + ttk::style configure TEntry -foreground $colors(-fg) + + ttk::style map TEntry -foreground \ + [list disabled #757575 \ + pressed #cfcfcf + ] + + ttk::style element create Entry.field \ + image [list $images(entry-rest) \ + {focus hover !invalid} $images(entry-focus) \ + invalid $images(entry-invalid) \ + disabled $images(entry-disabled) \ + {focus !invalid} $images(entry-focus) \ + hover $images(entry-hover) \ + ] -border 5 -padding 8 -sticky nsew + + # Combobox + ttk::style configure TCombobox -foreground $colors(-fg) + + ttk::style map TCombobox -foreground \ + [list disabled #757575 \ + pressed #cfcfcf + ] + + ttk::style configure TCombobox -foreground $colors(-fg) + ttk::style configure ComboboxPopdownFrame -borderwidth 3 -relief solid + + ttk::style map TCombobox -selectbackground [list \ + {readonly hover} $colors(-selectbg) \ + {readonly focus} $colors(-selectbg) \ + ] -selectforeground [list \ + {readonly hover} $colors(-selectfg) \ + {readonly focus} $colors(-selectfg) \ + ] + + ttk::style element create Combobox.field \ + image [list $images(button-rest) \ + {readonly disabled} $images(button-disabled) \ + {readonly pressed} $images(button-rest) \ + {readonly hover} $images(button-hover) \ + readonly $images(button-rest) \ + invalid $images(entry-invalid) \ + disabled $images(combo-disabled) \ + focus $images(entry-focus) \ + hover $images(button-hover) \ + ] -border 5 -padding 8 -sticky nsew + + ttk::style element create Combobox.arrow image $images(arrow-down) -width 35 -sticky {} + + # Spinbox + ttk::style configure TSpinbox -foreground $colors(-fg) + + ttk::style map TSpinbox -foreground \ + [list disabled #757575 \ + pressed #cfcfcf + ] + + ttk::style element create Spinbox.field \ + image [list $images(entry-rest) \ + invalid $images(entry-invalid) \ + disabled $images(entry-disabled) \ + focus $images(entry-focus) \ + hover $images(entry-hover) \ + ] -border 5 -padding {8 8 54 8} -sticky nsew + + ttk::style element create Spinbox.uparrow image $images(arrow-up) -width 35 -sticky {} + ttk::style element create Spinbox.downarrow image $images(arrow-down) -width 35 -sticky {} + + # Sizegrip + ttk::style element create Sizegrip.sizegrip image $images(sizegrip) \ + -sticky nsew + + # Separator + ttk::style element create TSeparator.separator image $images(separator) + + # Card + ttk::style element create Card.field image $images(card) \ + -border 10 -padding 4 -sticky nsew + + # Labelframe + ttk::style element create Labelframe.border image $images(card) \ + -border 5 -padding 4 -sticky nsew + + # Notebook + ttk::style configure TNotebook -padding 1 + + ttk::style element create Notebook.border \ + image $images(notebook-border) -border 5 -padding 5 + + ttk::style element create Notebook.client image $images(notebook) + + ttk::style element create Notebook.tab \ + image [list $images(tab-rest) \ + selected $images(tab-selected) \ + active $images(tab-hover) \ + ] -border 13 -padding {16 14 16 6} -height 32 + + # Treeview + ttk::style element create Treeview.field image $images(card) \ + -border 5 + + ttk::style element create Treeheading.cell \ + image [list $images(treeheading-rest) \ + pressed $images(treeheading-pressed) \ + active $images(treeheading-hover) + ] -border 5 -padding 15 -sticky nsew + + ttk::style element create Treeitem.indicator \ + image [list $images(arrow-right) \ + user2 $images(empty) \ + user1 $images(arrow-down) \ + ] -width 26 -sticky {} + + ttk::style configure Treeview -background $colors(-bg) -rowheight [expr {[font metrics font -linespace] + 2}] + ttk::style map Treeview \ + -background [list selected #292929] \ + -foreground [list selected $colors(-selectfg)] + + # Panedwindow + # Insane hack to remove clam's ugly sash + ttk::style configure Sash -gripcount 0 + } +} \ No newline at end of file diff --git a/gui_data/sv_ttk/theme/dark/arrow-down.png b/gui_data/sv_ttk/theme/dark/arrow-down.png new file mode 100644 index 0000000000000000000000000000000000000000..2b0a9d8d4da574230214360781df04f68754ddd5 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/arrow-down.png differ diff --git a/gui_data/sv_ttk/theme/dark/arrow-right.png b/gui_data/sv_ttk/theme/dark/arrow-right.png new file mode 100644 index 0000000000000000000000000000000000000000..2638d885eb9be2833b009ef5b88081a4b22b34b2 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/arrow-right.png differ diff --git a/gui_data/sv_ttk/theme/dark/arrow-up.png b/gui_data/sv_ttk/theme/dark/arrow-up.png new file mode 100644 index 0000000000000000000000000000000000000000..f935a0d32a699308758f10b1ccee21c92e7c6b64 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/arrow-up.png differ diff --git a/gui_data/sv_ttk/theme/dark/button-accent-disabled.png b/gui_data/sv_ttk/theme/dark/button-accent-disabled.png new file mode 100644 index 0000000000000000000000000000000000000000..bf7bd9ba70f9371a9c3d98b8f97039e611018479 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/button-accent-disabled.png differ diff --git a/gui_data/sv_ttk/theme/dark/button-accent-hover.png b/gui_data/sv_ttk/theme/dark/button-accent-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..8aea9dd682773a2a5ed32f6a9c14a4db8f2c00ac Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/button-accent-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/button-accent-pressed.png b/gui_data/sv_ttk/theme/dark/button-accent-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..edc1114e5e03094ebe8796dd528ca5fb932ec9eb Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/button-accent-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/button-accent-rest.png b/gui_data/sv_ttk/theme/dark/button-accent-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..75e64f84d1f8e6cf20777de748c2cf54e53e9faf Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/button-accent-rest.png differ diff --git a/gui_data/sv_ttk/theme/dark/button-close-hover.png b/gui_data/sv_ttk/theme/dark/button-close-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..6fc0c00cc7d742753dc71c0add0fb32fc7756b8c Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/button-close-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/button-close-pressed.png b/gui_data/sv_ttk/theme/dark/button-close-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..6023dc1435a0288cf824533d496312b237fdf8b8 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/button-close-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/button-disabled.png b/gui_data/sv_ttk/theme/dark/button-disabled.png new file mode 100644 index 0000000000000000000000000000000000000000..71bb0a8f261f00a67737cc58c47511c236d80c41 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/button-disabled.png differ diff --git a/gui_data/sv_ttk/theme/dark/button-hover.png b/gui_data/sv_ttk/theme/dark/button-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..20413753ca1b8d669bd5594a9c82da521a95b65d Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/button-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/button-pressed.png b/gui_data/sv_ttk/theme/dark/button-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..42701498d18f5361bf3cff92033cabc845565217 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/button-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/button-rest.png b/gui_data/sv_ttk/theme/dark/button-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..128f5f68e4a2d4902842c4e73999094247c8f28f Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/button-rest.png differ diff --git a/gui_data/sv_ttk/theme/dark/button-titlebar-hover.png b/gui_data/sv_ttk/theme/dark/button-titlebar-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..fcb37512f76d28becbc7ece0f26be88ee16892d2 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/button-titlebar-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/button-titlebar-pressed.png b/gui_data/sv_ttk/theme/dark/button-titlebar-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..2ed0623e8d5bed5e8a24d960b49a3a9e7c6f5ddd Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/button-titlebar-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/card.png b/gui_data/sv_ttk/theme/dark/card.png new file mode 100644 index 0000000000000000000000000000000000000000..eaac11c57163d48359431f46d921f14658d79595 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/card.png differ diff --git a/gui_data/sv_ttk/theme/dark/check-disabled.png b/gui_data/sv_ttk/theme/dark/check-disabled.png new file mode 100644 index 0000000000000000000000000000000000000000..f766ebafa3c9fa5b7dcf944e0a8c14961cb9c35a Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/check-disabled.png differ diff --git a/gui_data/sv_ttk/theme/dark/check-hover.png b/gui_data/sv_ttk/theme/dark/check-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..59358d4b32bee4f9bc58e6104e80d8f902365f28 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/check-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/check-pressed.png b/gui_data/sv_ttk/theme/dark/check-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..02ee6affa06c7e5cfdfc0bae44810dbd76a9ab3b Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/check-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/check-rest.png b/gui_data/sv_ttk/theme/dark/check-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..aa8dc67a880e899d6526dfa5198967e78b8b122e Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/check-rest.png differ diff --git a/gui_data/sv_ttk/theme/dark/check-tri-disabled.png b/gui_data/sv_ttk/theme/dark/check-tri-disabled.png new file mode 100644 index 0000000000000000000000000000000000000000..a9d31c760240a0deceaf99ab0f3a622de75aab08 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/check-tri-disabled.png differ diff --git a/gui_data/sv_ttk/theme/dark/check-tri-hover.png b/gui_data/sv_ttk/theme/dark/check-tri-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..ed218a0e3c8c18531195f7e33d384183cb8d1738 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/check-tri-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/check-tri-pressed.png b/gui_data/sv_ttk/theme/dark/check-tri-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..68d7a993b3eec568eb268c4133d7cba9cf9b9508 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/check-tri-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/check-tri-rest.png b/gui_data/sv_ttk/theme/dark/check-tri-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..26edcdb13c66fa429b933f7af6a8ae04a2cc17a2 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/check-tri-rest.png differ diff --git a/gui_data/sv_ttk/theme/dark/check-unsel-disabled.png b/gui_data/sv_ttk/theme/dark/check-unsel-disabled.png new file mode 100644 index 0000000000000000000000000000000000000000..9f4be2293be707fe6d3d64b782de6d6795e4f848 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/check-unsel-disabled.png differ diff --git a/gui_data/sv_ttk/theme/dark/check-unsel-hover.png b/gui_data/sv_ttk/theme/dark/check-unsel-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..a96a0dbd0a2d9d027a809809d61555d597c9193e Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/check-unsel-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/check-unsel-pressed.png b/gui_data/sv_ttk/theme/dark/check-unsel-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..26767b8a3e7f367e06f4cf5ebef1a1297d62280f Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/check-unsel-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/check-unsel-rest.png b/gui_data/sv_ttk/theme/dark/check-unsel-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..128f5f68e4a2d4902842c4e73999094247c8f28f Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/check-unsel-rest.png differ diff --git a/gui_data/sv_ttk/theme/dark/combo-disabled.png b/gui_data/sv_ttk/theme/dark/combo-disabled.png new file mode 100644 index 0000000000000000000000000000000000000000..9f4be2293be707fe6d3d64b782de6d6795e4f848 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/combo-disabled.png differ diff --git a/gui_data/sv_ttk/theme/dark/empty.png b/gui_data/sv_ttk/theme/dark/empty.png new file mode 100644 index 0000000000000000000000000000000000000000..22183634d5e36298e12ed067750da6c7d2fcdea9 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/empty.png differ diff --git a/gui_data/sv_ttk/theme/dark/entry-disabled.png b/gui_data/sv_ttk/theme/dark/entry-disabled.png new file mode 100644 index 0000000000000000000000000000000000000000..1d9bce84ddb0c228a3ed5e90364601f5b1eefed2 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/entry-disabled.png differ diff --git a/gui_data/sv_ttk/theme/dark/entry-focus.png b/gui_data/sv_ttk/theme/dark/entry-focus.png new file mode 100644 index 0000000000000000000000000000000000000000..58999e476e9af4e7b401bd558c9c85be4b3deeb7 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/entry-focus.png differ diff --git a/gui_data/sv_ttk/theme/dark/entry-hover.png b/gui_data/sv_ttk/theme/dark/entry-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..6b93830ab5908d7af96c755acd111761524e0f4e Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/entry-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/entry-invalid.png b/gui_data/sv_ttk/theme/dark/entry-invalid.png new file mode 100644 index 0000000000000000000000000000000000000000..7304b247cd136a2042d9ef347406655172be6537 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/entry-invalid.png differ diff --git a/gui_data/sv_ttk/theme/dark/entry-rest-combo.png b/gui_data/sv_ttk/theme/dark/entry-rest-combo.png new file mode 100644 index 0000000000000000000000000000000000000000..3f32bba56213a1da4122229c5269abf4ceaa82c6 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/entry-rest-combo.png differ diff --git a/gui_data/sv_ttk/theme/dark/entry-rest.png b/gui_data/sv_ttk/theme/dark/entry-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..e8767526ae057387f873d60cc1ec1ae578b77cec Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/entry-rest.png differ diff --git a/gui_data/sv_ttk/theme/dark/notebook-border.png b/gui_data/sv_ttk/theme/dark/notebook-border.png new file mode 100644 index 0000000000000000000000000000000000000000..0827a074e2330c873964133f877788a1fb4cbeb1 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/notebook-border.png differ diff --git a/gui_data/sv_ttk/theme/dark/notebook.png b/gui_data/sv_ttk/theme/dark/notebook.png new file mode 100644 index 0000000000000000000000000000000000000000..15c05f87c1761bf7070eda68c5c2225852343e1a Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/notebook.png differ diff --git a/gui_data/sv_ttk/theme/dark/progress-pbar-hor.png b/gui_data/sv_ttk/theme/dark/progress-pbar-hor.png new file mode 100644 index 0000000000000000000000000000000000000000..f8035f8394d2299bb2ca03f3ff8116d081e4c70c Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/progress-pbar-hor.png differ diff --git a/gui_data/sv_ttk/theme/dark/progress-pbar-vert.png b/gui_data/sv_ttk/theme/dark/progress-pbar-vert.png new file mode 100644 index 0000000000000000000000000000000000000000..3d0cb29758f0c6b066031b946818eeff9731dba4 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/progress-pbar-vert.png differ diff --git a/gui_data/sv_ttk/theme/dark/progress-trough-hor.png b/gui_data/sv_ttk/theme/dark/progress-trough-hor.png new file mode 100644 index 0000000000000000000000000000000000000000..9fe480766ed57accc392f94ead242a1e3761d5d2 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/progress-trough-hor.png differ diff --git a/gui_data/sv_ttk/theme/dark/progress-trough-vert.png b/gui_data/sv_ttk/theme/dark/progress-trough-vert.png new file mode 100644 index 0000000000000000000000000000000000000000..22a8c1c640586e6d8af59d584ff97089b570fec8 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/progress-trough-vert.png differ diff --git a/gui_data/sv_ttk/theme/dark/radio-disabled.png b/gui_data/sv_ttk/theme/dark/radio-disabled.png new file mode 100644 index 0000000000000000000000000000000000000000..b724f3a2229656060532fab32c3c6fc908bdaa5d Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/radio-disabled.png differ diff --git a/gui_data/sv_ttk/theme/dark/radio-hover.png b/gui_data/sv_ttk/theme/dark/radio-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..982334519f74e3784c04e96014b954959ad0a1b8 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/radio-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/radio-pressed.png b/gui_data/sv_ttk/theme/dark/radio-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..ed89533169d16c5068da1ce9f77ffe6f8438850a Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/radio-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/radio-rest.png b/gui_data/sv_ttk/theme/dark/radio-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..ef891d151a4807c35408bfbf52c00528b62378d5 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/radio-rest.png differ diff --git a/gui_data/sv_ttk/theme/dark/radio-unsel-disabled.png b/gui_data/sv_ttk/theme/dark/radio-unsel-disabled.png new file mode 100644 index 0000000000000000000000000000000000000000..0b4142bb363e4aac1f14a347ca2d99a298d822c8 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/radio-unsel-disabled.png differ diff --git a/gui_data/sv_ttk/theme/dark/radio-unsel-hover.png b/gui_data/sv_ttk/theme/dark/radio-unsel-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..7feda0b45b355e52771e482c53726ab0e5e2c407 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/radio-unsel-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/radio-unsel-pressed.png b/gui_data/sv_ttk/theme/dark/radio-unsel-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..7a76749185e8ee823a95bc3f0f62ff0e7f1a9abe Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/radio-unsel-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/radio-unsel-rest.png b/gui_data/sv_ttk/theme/dark/radio-unsel-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..f311983f8ec0da215b1c99b3a85856b0acf6f9d7 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/radio-unsel-rest.png differ diff --git a/gui_data/sv_ttk/theme/dark/scale-thumb-disabled.png b/gui_data/sv_ttk/theme/dark/scale-thumb-disabled.png new file mode 100644 index 0000000000000000000000000000000000000000..329e2630ccf0f3c63c91ac9b0fc72067c90a72ac Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scale-thumb-disabled.png differ diff --git a/gui_data/sv_ttk/theme/dark/scale-thumb-hover.png b/gui_data/sv_ttk/theme/dark/scale-thumb-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..84e3d18a5b3ebb5f8ec1ecdfdba948b1ed1ad35c Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scale-thumb-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/scale-thumb-pressed.png b/gui_data/sv_ttk/theme/dark/scale-thumb-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..5d9925e79f6680ecd5aec1878f8fb5c7505c68be Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scale-thumb-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/scale-thumb-rest.png b/gui_data/sv_ttk/theme/dark/scale-thumb-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..740437b5bffab5e23b5b9f5a115a94c72deefc62 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scale-thumb-rest.png differ diff --git a/gui_data/sv_ttk/theme/dark/scale-trough-hor.png b/gui_data/sv_ttk/theme/dark/scale-trough-hor.png new file mode 100644 index 0000000000000000000000000000000000000000..264046235b72a383c4434f516ac310ab74a49f02 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scale-trough-hor.png differ diff --git a/gui_data/sv_ttk/theme/dark/scale-trough-vert.png b/gui_data/sv_ttk/theme/dark/scale-trough-vert.png new file mode 100644 index 0000000000000000000000000000000000000000..205fed89725e9c6399776707dff139f02916abc3 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scale-trough-vert.png differ diff --git a/gui_data/sv_ttk/theme/dark/scroll-down.png b/gui_data/sv_ttk/theme/dark/scroll-down.png new file mode 100644 index 0000000000000000000000000000000000000000..4c0e24fa06c2478235e21ac90290d52cbe893c0a Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scroll-down.png differ diff --git a/gui_data/sv_ttk/theme/dark/scroll-hor-thumb.png b/gui_data/sv_ttk/theme/dark/scroll-hor-thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..795a88a783f0f550e52469479672e0a238e86d5f Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scroll-hor-thumb.png differ diff --git a/gui_data/sv_ttk/theme/dark/scroll-hor-trough.png b/gui_data/sv_ttk/theme/dark/scroll-hor-trough.png new file mode 100644 index 0000000000000000000000000000000000000000..89d04035b472f791ab57dc0443a8b7f70b39d3e1 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scroll-hor-trough.png differ diff --git a/gui_data/sv_ttk/theme/dark/scroll-left.png b/gui_data/sv_ttk/theme/dark/scroll-left.png new file mode 100644 index 0000000000000000000000000000000000000000..f43538b8ac001aac048d41cdd92f2a349649fda9 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scroll-left.png differ diff --git a/gui_data/sv_ttk/theme/dark/scroll-right.png b/gui_data/sv_ttk/theme/dark/scroll-right.png new file mode 100644 index 0000000000000000000000000000000000000000..a56511f0fc18526f223fc850507831ca7bec0dbb Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scroll-right.png differ diff --git a/gui_data/sv_ttk/theme/dark/scroll-up.png b/gui_data/sv_ttk/theme/dark/scroll-up.png new file mode 100644 index 0000000000000000000000000000000000000000..7ddba7fff7796e7f279467b5fc1acbfbe765ad53 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scroll-up.png differ diff --git a/gui_data/sv_ttk/theme/dark/scroll-vert-thumb.png b/gui_data/sv_ttk/theme/dark/scroll-vert-thumb.png new file mode 100644 index 0000000000000000000000000000000000000000..0c3f067a3b3092053057dddee1566fd9c65a8fbf Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scroll-vert-thumb.png differ diff --git a/gui_data/sv_ttk/theme/dark/scroll-vert-trough.png b/gui_data/sv_ttk/theme/dark/scroll-vert-trough.png new file mode 100644 index 0000000000000000000000000000000000000000..52fd58f3049e992f600fa9c37d5cc3768c83493d Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/scroll-vert-trough.png differ diff --git a/gui_data/sv_ttk/theme/dark/separator.png b/gui_data/sv_ttk/theme/dark/separator.png new file mode 100644 index 0000000000000000000000000000000000000000..6e01f551a104e787194301b25e8d12128bb520f6 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/separator.png differ diff --git a/gui_data/sv_ttk/theme/dark/sizegrip.png b/gui_data/sv_ttk/theme/dark/sizegrip.png new file mode 100644 index 0000000000000000000000000000000000000000..7080c04c67807e08452264809cbacf35b89a0503 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/sizegrip.png differ diff --git a/gui_data/sv_ttk/theme/dark/switch-off-disabled.png b/gui_data/sv_ttk/theme/dark/switch-off-disabled.png new file mode 100644 index 0000000000000000000000000000000000000000..4032c6128b8db875f5c8b3b67ff1a200458ff095 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/switch-off-disabled.png differ diff --git a/gui_data/sv_ttk/theme/dark/switch-off-hover.png b/gui_data/sv_ttk/theme/dark/switch-off-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..5a136bd3f4c6fd280134f300e5cf50136e217b05 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/switch-off-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/switch-off-pressed.png b/gui_data/sv_ttk/theme/dark/switch-off-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..040e2ea30c96c8fcf103abdb749a706ec9b01975 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/switch-off-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/switch-off-rest.png b/gui_data/sv_ttk/theme/dark/switch-off-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..6c31bb235da79718ef3684ee86a922d44fa470a6 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/switch-off-rest.png differ diff --git a/gui_data/sv_ttk/theme/dark/switch-on-disabled.png b/gui_data/sv_ttk/theme/dark/switch-on-disabled.png new file mode 100644 index 0000000000000000000000000000000000000000..c0d67c567752d96e322b3f3dcea9cd95bbf8efec Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/switch-on-disabled.png differ diff --git a/gui_data/sv_ttk/theme/dark/switch-on-hover.png b/gui_data/sv_ttk/theme/dark/switch-on-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..fd4de9497e12ce541db5e02ffdc02a640dca8f0e Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/switch-on-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/switch-on-pressed.png b/gui_data/sv_ttk/theme/dark/switch-on-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..00e87c68a1b37cde51142d964e4ac1374a69de4f Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/switch-on-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/switch-on-rest.png b/gui_data/sv_ttk/theme/dark/switch-on-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..52a19ea658446caede4fb410192b05bdc78353ef Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/switch-on-rest.png differ diff --git a/gui_data/sv_ttk/theme/dark/tab-hover.png b/gui_data/sv_ttk/theme/dark/tab-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..43a113b35c4019897390acd452cda5aab135512a Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/tab-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/tab-rest.png b/gui_data/sv_ttk/theme/dark/tab-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..e8a6f3f61bb7a3aa8bcca71abd5cdefa87f9f801 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/tab-rest.png differ diff --git a/gui_data/sv_ttk/theme/dark/tab-selected.png b/gui_data/sv_ttk/theme/dark/tab-selected.png new file mode 100644 index 0000000000000000000000000000000000000000..eb7b211bbffe6a523e7dd947c1d06af4ce33aa3e Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/tab-selected.png differ diff --git a/gui_data/sv_ttk/theme/dark/treeheading-hover.png b/gui_data/sv_ttk/theme/dark/treeheading-hover.png new file mode 100644 index 0000000000000000000000000000000000000000..beaaf1353fe62c3607118cbabce26b78ec5ceea0 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/treeheading-hover.png differ diff --git a/gui_data/sv_ttk/theme/dark/treeheading-pressed.png b/gui_data/sv_ttk/theme/dark/treeheading-pressed.png new file mode 100644 index 0000000000000000000000000000000000000000..9cd311dc6cf1afeee2f326ea640dfa216e6d8738 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/treeheading-pressed.png differ diff --git a/gui_data/sv_ttk/theme/dark/treeheading-rest.png b/gui_data/sv_ttk/theme/dark/treeheading-rest.png new file mode 100644 index 0000000000000000000000000000000000000000..374ed4992d9893965a9489a665e09b9c04ab91c7 Binary files /dev/null and b/gui_data/sv_ttk/theme/dark/treeheading-rest.png differ diff --git a/gui_data/tkinterdnd2/TkinterDnD.py b/gui_data/tkinterdnd2/TkinterDnD.py new file mode 100644 index 0000000000000000000000000000000000000000..8bae5c22fc941e61dbafef89dd5b102367408b36 --- /dev/null +++ b/gui_data/tkinterdnd2/TkinterDnD.py @@ -0,0 +1,294 @@ +'''Python wrapper for the tkdnd tk extension. + +The tkdnd extension provides an interface to native, platform specific +drag and drop mechanisms. Under Unix the drag & drop protocol in use is +the XDND protocol version 5 (also used by the Qt toolkit, and the KDE and +GNOME desktops). Under Windows, the OLE2 drag & drop interfaces are used. +Under Macintosh, the Cocoa drag and drop interfaces are used. + +Once the TkinterDnD2 package is installed, it is safe to do: + +from TkinterDnD2 import * + +This will add the classes TkinterDnD.Tk and TkinterDnD.TixTk to the global +namespace, plus the following constants: +PRIVATE, NONE, ASK, COPY, MOVE, LINK, REFUSE_DROP, +DND_TEXT, DND_FILES, DND_ALL, CF_UNICODETEXT, CF_TEXT, CF_HDROP, +FileGroupDescriptor, FileGroupDescriptorW + +Drag and drop for the application can then be enabled by using one of the +classes TkinterDnD.Tk() or (in case the tix extension shall be used) +TkinterDnD.TixTk() as application main window instead of a regular +tkinter.Tk() window. This will add the drag-and-drop specific methods to the +Tk window and all its descendants. +''' + + +import tkinter +from tkinter import tix + +TkdndVersion = None +ARM = 'arm' + +def _require(tkroot): + '''Internal function.''' + global TkdndVersion + try: + import os.path + import platform + + if platform.system()=="Darwin": + tkdnd_platform_rep = "osx_arm" if platform.processor() == ARM or ARM in platform.platform() else "osx64" + elif platform.system()=="Linux": + tkdnd_platform_rep = "linux64" + elif platform.system()=="Windows": + tkdnd_platform_rep = "win64" + else: + raise RuntimeError('Plaform not supported.') + + module_path = os.path.join(os.path.dirname(__file__), 'tkdnd', tkdnd_platform_rep) + tkroot.tk.call('lappend', 'auto_path', module_path) + TkdndVersion = tkroot.tk.call('package', 'require', 'tkdnd') + except tkinter.TclError: + raise RuntimeError('Unable to load tkdnd library.') + return TkdndVersion + +class DnDEvent: + """Internal class. + Container for the properties of a drag-and-drop event, similar to a + normal tkinter.Event. + An instance of the DnDEvent class has the following attributes: + action (string) + actions (tuple) + button (int) + code (string) + codes (tuple) + commonsourcetypes (tuple) + commontargettypes (tuple) + data (string) + name (string) + types (tuple) + modifiers (tuple) + supportedsourcetypes (tuple) + sourcetypes (tuple) + type (string) + supportedtargettypes (tuple) + widget (widget instance) + x_root (int) + y_root (int) + Depending on the type of DnD event however, not all attributes may be set. + """ + pass + +class DnDWrapper: + '''Internal class.''' + # some of the percent substitutions need to be enclosed in braces + # so we can use splitlist() to convert them into tuples + _subst_format_dnd = ('%A', '%a', '%b', '%C', '%c', '%CST', + '%CTT', '%D', '%e', '%L', '%m', '%ST', + '%T', '%t', '%TT', '%W', '%X', '%Y') + _subst_format_str_dnd = " ".join(_subst_format_dnd) + #print('_subst_format_dnd: ', _subst_format_dnd) + tkinter.BaseWidget._subst_format_dnd = _subst_format_dnd + tkinter.BaseWidget._subst_format_str_dnd = _subst_format_str_dnd + + def _substitute_dnd(self, *args): + """Internal function.""" + if len(args) != len(self._subst_format_dnd): + return args + def getint_event(s): + try: + return int(s) + except ValueError: + return s + def splitlist_event(s): + try: + return self.tk.splitlist(s) + except ValueError: + return s + # valid percent substitutions for DnD event types + # (tested with tkdnd-2.8 on debian jessie): + # <> : %W, %X, %Y %e, %t + # <> : %A, %W, %e + # <> : all except : %D (always empty) + # <> : all except %D (always empty) + # <> :all except %D (always empty) + # <> : all + A, a, b, C, c, CST, CTT, D, e, L, m, ST, T, t, TT, W, X, Y = args + ev = DnDEvent() + ev.action = A + ev.actions = splitlist_event(a) + ev.button = getint_event(b) + ev.code = C + ev.codes = splitlist_event(c) + ev.commonsourcetypes = splitlist_event(CST) + ev.commontargettypes = splitlist_event(CTT) + ev.data = D + ev.name = e + ev.types = splitlist_event(L) + ev.modifiers = splitlist_event(m) + ev.supportedsourcetypes = splitlist_event(ST) + ev.sourcetypes = splitlist_event(t) + ev.type = T + ev.supportedtargettypes = splitlist_event(TT) + try: + ev.widget = self.nametowidget(W) + except KeyError: + ev.widget = W + ev.x_root = getint_event(X) + ev.y_root = getint_event(Y) + + + + return (ev,) + tkinter.BaseWidget._substitute_dnd = _substitute_dnd + + def _dnd_bind(self, what, sequence, func, add, needcleanup=True): + """Internal function.""" + if isinstance(func, str): + self.tk.call(what + (sequence, func)) + elif func: + funcid = self._register(func, self._substitute_dnd, needcleanup) + # FIXME: why doesn't the "return 'break'" mechanism work here?? + #cmd = ('%sif {"[%s %s]" == "break"} break\n' % (add and '+' or '', + # funcid, self._subst_format_str_dnd)) + cmd = '%s%s %s' %(add and '+' or '', funcid, + self._subst_format_str_dnd) + self.tk.call(what + (sequence, cmd)) + return funcid + elif sequence: + return self.tk.call(what + (sequence,)) + else: + return self.tk.splitlist(self.tk.call(what)) + tkinter.BaseWidget._dnd_bind = _dnd_bind + + def dnd_bind(self, sequence=None, func=None, add=None): + '''Bind to this widget at drag and drop event SEQUENCE a call + to function FUNC. + SEQUENCE may be one of the following: + <>, <>, <>, <>, + <>, <>, <> . + The callbacks for the > events, with the exception of + <>, should always return an action (i.e. one of COPY, + MOVE, LINK, ASK or PRIVATE). + The callback for the <> event must return a tuple + containing three elements: the drop action(s) supported by the + drag source, the format type(s) that the data can be dropped as and + finally the data that shall be dropped. Each of these three elements + may be a tuple of strings or a single string.''' + return self._dnd_bind(('bind', self._w), sequence, func, add) + tkinter.BaseWidget.dnd_bind = dnd_bind + + def drag_source_register(self, button=None, *dndtypes): + '''This command will register SELF as a drag source. + A drag source is a widget than can start a drag action. This command + can be executed multiple times on a widget. + When SELF is registered as a drag source, optional DNDTYPES can be + provided. These DNDTYPES will be provided during a drag action, and + it can contain platform independent or platform specific types. + Platform independent are DND_Text for dropping text portions and + DND_Files for dropping a list of files (which can contain one or + multiple files) on SELF. However, these types are + indicative/informative. SELF can initiate a drag action with even a + different type list. Finally, button is the mouse button that will be + used for starting the drag action. It can have any of the values 1 + (left mouse button), 2 (middle mouse button - wheel) and 3 + (right mouse button). If button is not specified, it defaults to 1.''' + # hack to fix a design bug from the first version + if button is None: + button = 1 + else: + try: + button = int(button) + except ValueError: + # no button defined, button is actually + # something like DND_TEXT + dndtypes = (button,) + dndtypes + button = 1 + self.tk.call( + 'tkdnd::drag_source', 'register', self._w, dndtypes, button) + tkinter.BaseWidget.drag_source_register = drag_source_register + + def drag_source_unregister(self): + '''This command will stop SELF from being a drag source. Thus, window + will stop receiving events related to drag operations. It is an error + to use this command for a window that has not been registered as a + drag source with drag_source_register().''' + self.tk.call('tkdnd::drag_source', 'unregister', self._w) + tkinter.BaseWidget.drag_source_unregister = drag_source_unregister + + def drop_target_register(self, *dndtypes): + '''This command will register SELF as a drop target. A drop target is + a widget than can accept a drop action. This command can be executed + multiple times on a widget. When SELF is registered as a drop target, + optional DNDTYPES can be provided. These types list can contain one or + more types that SELF will accept during a drop action, and it can + contain platform independent or platform specific types. Platform + independent are DND_Text for dropping text portions and DND_Files for + dropping a list of files (which can contain one or multiple files) on + SELF.''' + self.tk.call('tkdnd::drop_target', 'register', self._w, dndtypes) + tkinter.BaseWidget.drop_target_register = drop_target_register + + def drop_target_unregister(self): + '''This command will stop SELF from being a drop target. Thus, SELF + will stop receiving events related to drop operations. It is an error + to use this command for a window that has not been registered as a + drop target with drop_target_register().''' + self.tk.call('tkdnd::drop_target', 'unregister', self._w) + tkinter.BaseWidget.drop_target_unregister = drop_target_unregister + + def platform_independent_types(self, *dndtypes): + '''This command will accept a list of types that can contain platform + independnent or platform specific types. A new list will be returned, + where each platform specific type in DNDTYPES will be substituted by + one or more platform independent types. Thus, the returned list may + have more elements than DNDTYPES.''' + return self.tk.split(self.tk.call( + 'tkdnd::platform_independent_types', dndtypes)) + tkinter.BaseWidget.platform_independent_types = platform_independent_types + + def platform_specific_types(self, *dndtypes): + '''This command will accept a list of types that can contain platform + independnent or platform specific types. A new list will be returned, + where each platform independent type in DNDTYPES will be substituted + by one or more platform specific types. Thus, the returned list may + have more elements than DNDTYPES.''' + return self.tk.split(self.tk.call( + 'tkdnd::platform_specific_types', dndtypes)) + tkinter.BaseWidget.platform_specific_types = platform_specific_types + + def get_dropfile_tempdir(self): + '''This command will return the temporary directory used by TkDND for + storing temporary files. When the package is loaded, this temporary + directory will be initialised to a proper directory according to the + operating system. This default initial value can be changed to be the + value of the following environmental variables: + TKDND_TEMP_DIR, TEMP, TMP.''' + return self.tk.call('tkdnd::GetDropFileTempDirectory') + tkinter.BaseWidget.get_dropfile_tempdir = get_dropfile_tempdir + + def set_dropfile_tempdir(self, tempdir): + '''This command will change the temporary directory used by TkDND for + storing temporary files to TEMPDIR.''' + self.tk.call('tkdnd::SetDropFileTempDirectory', tempdir) + tkinter.BaseWidget.set_dropfile_tempdir = set_dropfile_tempdir + +####################################################################### +#### The main window classes that enable Drag & Drop for #### +#### themselves and all their descendant widgets: #### +####################################################################### + +class Tk(tkinter.Tk, DnDWrapper): + '''Creates a new instance of a tkinter.Tk() window; all methods of the + DnDWrapper class apply to this window and all its descendants.''' + def __init__(self, *args, **kw): + tkinter.Tk.__init__(self, *args, **kw) + self.TkdndVersion = _require(self) + +class TixTk(tix.Tk, DnDWrapper): + '''Creates a new instance of a tix.Tk() window; all methods of the + DnDWrapper class apply to this window and all its descendants.''' + def __init__(self, *args, **kw): + tix.Tk.__init__(self, *args, **kw) + self.TkdndVersion = _require(self) diff --git a/gui_data/tkinterdnd2/__init__.py b/gui_data/tkinterdnd2/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..17f037dd669b966490d32d0fb218dfe75b187713 --- /dev/null +++ b/gui_data/tkinterdnd2/__init__.py @@ -0,0 +1,25 @@ +# dnd actions +PRIVATE = 'private' +NONE = 'none' +ASK = 'ask' +COPY = 'copy' +MOVE = 'move' +LINK = 'link' +REFUSE_DROP = 'refuse_drop' + +# dnd types +DND_TEXT = 'DND_Text' +DND_FILES = 'DND_Files' +DND_ALL = '*' +CF_UNICODETEXT = 'CF_UNICODETEXT' +CF_TEXT = 'CF_TEXT' +CF_HDROP = 'CF_HDROP' + +FileGroupDescriptor = 'FileGroupDescriptor - FileContents'# ?? +FileGroupDescriptorW = 'FileGroupDescriptorW - FileContents'# ?? + +from . import TkinterDnD +from .TkinterDnD import Tk +from .TkinterDnD import TixTk + + diff --git a/gui_data/tkinterdnd2/__pycache__/TkinterDnD.cpython-310.pyc b/gui_data/tkinterdnd2/__pycache__/TkinterDnD.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00f3b200d8c4e2574034c9f2b33991612ac95e20 Binary files /dev/null and b/gui_data/tkinterdnd2/__pycache__/TkinterDnD.cpython-310.pyc differ diff --git a/gui_data/tkinterdnd2/__pycache__/__init__.cpython-310.pyc b/gui_data/tkinterdnd2/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f6c47e7f5fcf3c621a8396e0daec8ae9df2d5f9 Binary files /dev/null and b/gui_data/tkinterdnd2/__pycache__/__init__.cpython-310.pyc differ diff --git a/gui_data/tkinterdnd2/tkdnd/linux64/libtkdnd2.9.2.so b/gui_data/tkinterdnd2/tkdnd/linux64/libtkdnd2.9.2.so new file mode 100644 index 0000000000000000000000000000000000000000..03dbd1bb1f3ed549879f17db070a3d3b6f42538c Binary files /dev/null and b/gui_data/tkinterdnd2/tkdnd/linux64/libtkdnd2.9.2.so differ diff --git a/gui_data/tkinterdnd2/tkdnd/linux64/pkgIndex.tcl b/gui_data/tkinterdnd2/tkdnd/linux64/pkgIndex.tcl new file mode 100644 index 0000000000000000000000000000000000000000..9edcc8f2b4049c40b38f59c18de7b8525f3d7246 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/linux64/pkgIndex.tcl @@ -0,0 +1,10 @@ +# +# Tcl package index file +# +package ifneeded tkdnd 2.9.2 \ + "source \{$dir/tkdnd.tcl\} ; \ + tkdnd::initialise \{$dir\} libtkdnd2.9.2.so tkdnd" + +package ifneeded tkdnd::utils 2.9.2 \ + "source \{$dir/tkdnd_utils.tcl\} ; \ + package provide tkdnd::utils 2.9.2" diff --git a/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd.tcl b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd.tcl new file mode 100644 index 0000000000000000000000000000000000000000..12d1dd289de6b78e83922a1b1653ef6165dc70db --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd.tcl @@ -0,0 +1,469 @@ +# +# tkdnd.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +package require Tk + +namespace eval ::tkdnd { + variable _topw ".drag" + variable _tabops + variable _state + variable _x0 + variable _y0 + variable _platform_namespace + variable _drop_file_temp_dir + variable _auto_update 1 + variable _dx 3 ;# The difference in pixels before a drag is initiated. + variable _dy 3 ;# The difference in pixels before a drag is initiated. + + variable _windowingsystem + + bind TkDND_Drag1 {tkdnd::_begin_drag press 1 %W %s %X %Y %x %y} + bind TkDND_Drag1 {tkdnd::_begin_drag motion 1 %W %s %X %Y %x %y} + bind TkDND_Drag2 {tkdnd::_begin_drag press 2 %W %s %X %Y %x %y} + bind TkDND_Drag2 {tkdnd::_begin_drag motion 2 %W %s %X %Y %x %y} + bind TkDND_Drag3 {tkdnd::_begin_drag press 3 %W %s %X %Y %x %y} + bind TkDND_Drag3 {tkdnd::_begin_drag motion 3 %W %s %X %Y %x %y} + + # ---------------------------------------------------------------------------- + # Command tkdnd::initialise: Initialise the TkDND package. + # ---------------------------------------------------------------------------- + proc initialise { dir PKG_LIB_FILE PACKAGE_NAME} { + variable _platform_namespace + variable _drop_file_temp_dir + variable _windowingsystem + global env + + switch [tk windowingsystem] { + x11 { + set _windowingsystem x11 + } + win32 - + windows { + set _windowingsystem windows + } + aqua { + set _windowingsystem aqua + } + default { + error "unknown Tk windowing system" + } + } + + ## Get User's home directory: We try to locate the proper path from a set of + ## environmental variables... + foreach var {HOME HOMEPATH USERPROFILE ALLUSERSPROFILE APPDATA} { + if {[info exists env($var)]} { + if {[file isdirectory $env($var)]} { + set UserHomeDir $env($var) + break + } + } + } + + ## Should use [tk windowingsystem] instead of tcl platform array: + ## OS X returns "unix," but that's not useful because it has its own + ## windowing system, aqua + ## Under windows we have to also combine HOMEDRIVE & HOMEPATH... + if {![info exists UserHomeDir] && + [string equal $_windowingsystem windows] && + [info exists env(HOMEDRIVE)] && [info exists env(HOMEPATH)]} { + if {[file isdirectory $env(HOMEDRIVE)$env(HOMEPATH)]} { + set UserHomeDir $env(HOMEDRIVE)$env(HOMEPATH) + } + } + ## Have we located the needed path? + if {![info exists UserHomeDir]} { + set UserHomeDir [pwd] + } + set UserHomeDir [file normalize $UserHomeDir] + + ## Try to locate a temporary directory... + foreach var {TKDND_TEMP_DIR TEMP TMP} { + if {[info exists env($var)]} { + if {[file isdirectory $env($var)] && [file writable $env($var)]} { + set _drop_file_temp_dir $env($var) + break + } + } + } + if {![info exists _drop_file_temp_dir]} { + foreach _dir [list "$UserHomeDir/Local Settings/Temp" \ + "$UserHomeDir/AppData/Local/Temp" \ + /tmp \ + C:/WINDOWS/Temp C:/Temp C:/tmp \ + D:/WINDOWS/Temp D:/Temp D:/tmp] { + if {[file isdirectory $_dir] && [file writable $_dir]} { + set _drop_file_temp_dir $_dir + break + } + } + } + if {![info exists _drop_file_temp_dir]} { + set _drop_file_temp_dir $UserHomeDir + } + set _drop_file_temp_dir [file native $_drop_file_temp_dir] + + source $dir/tkdnd_generic.tcl + switch $_windowingsystem { + x11 { + source $dir/tkdnd_unix.tcl + set _platform_namespace xdnd + } + win32 - + windows { + source $dir/tkdnd_windows.tcl + set _platform_namespace olednd + } + aqua { + source $dir/tkdnd_macosx.tcl + set _platform_namespace macdnd + } + default { + error "unknown Tk windowing system" + } + } + load $dir/$PKG_LIB_FILE $PACKAGE_NAME + source $dir/tkdnd_compat.tcl + ${_platform_namespace}::initialise + };# initialise + + proc GetDropFileTempDirectory { } { + variable _drop_file_temp_dir + return $_drop_file_temp_dir + } + proc SetDropFileTempDirectory { dir } { + variable _drop_file_temp_dir + set _drop_file_temp_dir $dir + } + +};# namespace ::tkdnd + +# ---------------------------------------------------------------------------- +# Command tkdnd::drag_source +# ---------------------------------------------------------------------------- +proc ::tkdnd::drag_source { mode path { types {} } { event 1 } + { tagprefix TkDND_Drag } } { + set tags [bindtags $path] + set idx [lsearch $tags ${tagprefix}$event] + switch -- $mode { + register { + if { $idx != -1 } { + ## No need to do anything! + # bindtags $path [lreplace $tags $idx $idx ${tagprefix}$event] + } else { + bindtags $path [linsert $tags 1 ${tagprefix}$event] + } + _drag_source_update_types $path $types + } + unregister { + if { $idx != -1 } { + bindtags $path [lreplace $tags $idx $idx] + } + } + } +};# tkdnd::drag_source + +proc ::tkdnd::_drag_source_update_types { path types } { + set types [platform_specific_types $types] + set old_types [bind $path <>] + foreach type $types { + if {[lsearch $old_types $type] < 0} {lappend old_types $type} + } + bind $path <> $old_types +};# ::tkdnd::_drag_source_update_types + +# ---------------------------------------------------------------------------- +# Command tkdnd::drop_target +# ---------------------------------------------------------------------------- +proc ::tkdnd::drop_target { mode path { types {} } } { + variable _windowingsystem + set types [platform_specific_types $types] + switch -- $mode { + register { + switch $_windowingsystem { + x11 { + _register_types $path [winfo toplevel $path] $types + } + win32 - + windows { + _RegisterDragDrop $path + bind $path {+ tkdnd::_RevokeDragDrop %W} + } + aqua { + macdnd::registerdragwidget [winfo toplevel $path] $types + } + default { + error "unknown Tk windowing system" + } + } + set old_types [bind $path <>] + set new_types {} + foreach type $types { + if {[lsearch -exact $old_types $type] < 0} {lappend new_types $type} + } + if {[llength $new_types]} { + bind $path <> [concat $old_types $new_types] + } + } + unregister { + switch $_windowingsystem { + x11 { + } + win32 - + windows { + _RevokeDragDrop $path + } + aqua { + error todo + } + default { + error "unknown Tk windowing system" + } + } + bind $path <> {} + } + } +};# tkdnd::drop_target + +# ---------------------------------------------------------------------------- +# Command tkdnd::_begin_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::_begin_drag { event button source state X Y x y } { + variable _x0 + variable _y0 + variable _state + + switch -- $event { + press { + set _x0 $X + set _y0 $Y + set _state "press" + } + motion { + if { ![info exists _state] } { + # This is just extra protection. There seem to be + # rare cases where the motion comes before the press. + return + } + if { [string equal $_state "press"] } { + variable _dx + variable _dy + if { abs($_x0-$X) > ${_dx} || abs($_y0-$Y) > ${_dy} } { + set _state "done" + _init_drag $button $source $state $X $Y $x $y + } + } + } + } +};# tkdnd::_begin_drag + +# ---------------------------------------------------------------------------- +# Command tkdnd::_init_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::_init_drag { button source state rootX rootY X Y } { + # Call the <> binding. + set cmd [bind $source <>] + # puts "CMD: $cmd" + if {[string length $cmd]} { + set cmd [string map [list %W $source %X $rootX %Y $rootY %x $X %y $Y \ + %S $state %e <> %A \{\} %% % \ + %t [bind $source <>]] $cmd] + set code [catch {uplevel \#0 $cmd} info options] + # puts "CODE: $code ---- $info" + switch -exact -- $code { + 0 {} + 3 - 4 { + # FRINK: nocheck + return + } + default { + return -options $options $info + } + } + + set len [llength $info] + if {$len == 3} { + foreach { actions types _data } $info { break } + set types [platform_specific_types $types] + set data [list] + foreach type $types { + lappend data $_data + } + unset _data + } elseif {$len == 2} { + foreach { actions _data } $info { break } + set data [list]; set types [list] + foreach {t d} $_data { + foreach t [platform_specific_types $t] { + lappend types $t; lappend data $d + } + } + unset _data t d + } else { + if {$len == 1 && [string equal [lindex $actions 0] "refuse_drop"]} { + return + } + error "not enough items in the result of the <>\ + event binding. Either 2 or 3 items are expected. The command + executed was: \"$cmd\"\nResult was: \"$info\"" + } + set action refuse_drop + variable _windowingsystem + # puts "Source: \"$source\"" + # puts "Types: \"[join $types {", "}]\"" + # puts "Actions: \"[join $actions {", "}]\"" + # puts "Button: \"$button\"" + # puts "Data: \"[string range $data 0 100]\"" + switch $_windowingsystem { + x11 { + set action [xdnd::_dodragdrop $source $actions $types $data $button] + } + win32 - + windows { + set action [_DoDragDrop $source $actions $types $data $button] + } + aqua { + set action [macdnd::dodragdrop $source $actions $types $data $button] + } + default { + error "unknown Tk windowing system" + } + } + ## Call _end_drag to notify the widget of the result of the drag + ## operation... + _end_drag $button $source {} $action {} $data {} $state $rootX $rootY $X $Y + } +};# tkdnd::_init_drag + +# ---------------------------------------------------------------------------- +# Command tkdnd::_end_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::_end_drag { button source target action type data result + state rootX rootY X Y } { + set rootX 0 + set rootY 0 + # Call the <> binding. + set cmd [bind $source <>] + if {[string length $cmd]} { + set cmd [string map [list %W $source %X $rootX %Y $rootY %x $X %y $Y %% % \ + %S $state %e <> %A \{$action\}] $cmd] + set info [uplevel \#0 $cmd] + # if { $info != "" } { + # variable _windowingsystem + # foreach { actions types data } $info { break } + # set types [platform_specific_types $types] + # switch $_windowingsystem { + # x11 { + # error "dragging from Tk widgets not yet supported" + # } + # win32 - + # windows { + # set action [_DoDragDrop $source $actions $types $data $button] + # } + # aqua { + # macdnd::dodragdrop $source $actions $types $data + # } + # default { + # error "unknown Tk windowing system" + # } + # } + # ## Call _end_drag to notify the widget of the result of the drag + # ## operation... + # _end_drag $button $source {} $action {} $data {} $state $rootX $rootY + # } + } +};# tkdnd::_end_drag + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_specific_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_specific_types { types } { + variable _platform_namespace + ${_platform_namespace}::platform_specific_types $types +}; # tkdnd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + variable _platform_namespace + ${_platform_namespace}::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_specific_type +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_specific_type { type } { + variable _platform_namespace + ${_platform_namespace}::platform_specific_type $type +}; # tkdnd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_type +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_type { type } { + variable _platform_namespace + ${_platform_namespace}::platform_independent_type $type +}; # tkdnd::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::bytes_to_string +# ---------------------------------------------------------------------------- +proc ::tkdnd::bytes_to_string { bytes } { + set string {} + foreach byte $bytes { + append string [binary format c $byte] + } + return $string +};# tkdnd::bytes_to_string + +# ---------------------------------------------------------------------------- +# Command tkdnd::urn_unquote +# ---------------------------------------------------------------------------- +proc ::tkdnd::urn_unquote {url} { + set result "" + set start 0 + while {[regexp -start $start -indices {%[0-9a-fA-F]{2}} $url match]} { + foreach {first last} $match break + append result [string range $url $start [expr {$first - 1}]] + append result [format %c 0x[string range $url [incr first] $last]] + set start [incr last] + } + append result [string range $url $start end] + return [encoding convertfrom utf-8 $result] +};# tkdnd::urn_unquote diff --git a/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_compat.tcl b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_compat.tcl new file mode 100644 index 0000000000000000000000000000000000000000..efc96f7bb2fe74a9bafd1e79681c275c8ea0f8fc --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_compat.tcl @@ -0,0 +1,160 @@ +# +# tkdnd_compat.tcl -- +# +# This file implements some utility procedures, to support older versions +# of the TkDND package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval compat { + +};# namespace compat + +# ---------------------------------------------------------------------------- +# Command ::dnd +# ---------------------------------------------------------------------------- +proc ::dnd {method window args} { + switch $method { + bindtarget { + switch [llength $args] { + 0 {return [tkdnd::compat::bindtarget0 $window]} + 1 {return [tkdnd::compat::bindtarget1 $window [lindex $args 0]]} + 2 {return [tkdnd::compat::bindtarget2 $window [lindex $args 0] \ + [lindex $args 1]]} + 3 {return [tkdnd::compat::bindtarget3 $window [lindex $args 0] \ + [lindex $args 1] [lindex $args 2]]} + 4 {return [tkdnd::compat::bindtarget4 $window [lindex $args 0] \ + [lindex $args 1] [lindex $args 2] [lindex $args 3]]} + } + } + cleartarget { + return [tkdnd::compat::cleartarget $window] + } + bindsource { + switch [llength $args] { + 0 {return [tkdnd::compat::bindsource0 $window]} + 1 {return [tkdnd::compat::bindsource1 $window [lindex $args 0]]} + 2 {return [tkdnd::compat::bindsource2 $window [lindex $args 0] \ + [lindex $args 1]]} + 3 {return [tkdnd::compat::bindsource3 $window [lindex $args 0] \ + [lindex $args 1] [lindex $args 2]]} + } + } + clearsource { + return [tkdnd::compat::clearsource $window] + } + drag { + return [tkdnd::_init_drag 1 $window "press" 0 0 0 0] + } + } + error "invalid number of arguments!" +};# ::dnd + +# ---------------------------------------------------------------------------- +# Command compat::bindtarget +# ---------------------------------------------------------------------------- +proc compat::bindtarget0 {window} { + return [bind $window <>] +};# compat::bindtarget0 + +proc compat::bindtarget1 {window type} { + return [bindtarget2 $window $type ] +};# compat::bindtarget1 + +proc compat::bindtarget2 {window type event} { + switch $event { + {return [bind $window <>]} + {return [bind $window <>]} + {return [bind $window <>]} + {return [bind $window <>]} + } +};# compat::bindtarget2 + +proc compat::bindtarget3 {window type event script} { + set type [normalise_type $type] + ::tkdnd::drop_target register $window [list $type] + switch $event { + {return [bind $window <> $script]} + {return [bind $window <> $script]} + {return [bind $window <> $script]} + {return [bind $window <> $script]} + } +};# compat::bindtarget3 + +proc compat::bindtarget4 {window type event script priority} { + return [bindtarget3 $window $type $event $script] +};# compat::bindtarget4 + +proc compat::normalise_type { type } { + switch $type { + text/plain - + {text/plain;charset=UTF-8} - + Text {return DND_Text} + text/uri-list - + Files {return DND_Files} + default {return $type} + } +};# compat::normalise_type + +# ---------------------------------------------------------------------------- +# Command compat::bindsource +# ---------------------------------------------------------------------------- +proc compat::bindsource0 {window} { + return [bind $window <>] +};# compat::bindsource0 + +proc compat::bindsource1 {window type} { + return [bindsource2 $window $type ] +};# compat::bindsource1 + +proc compat::bindsource2 {window type script} { + set type [normalise_type $type] + ::tkdnd::drag_source register $window $type + bind $window <> "list {copy} {%t} \[$script\]" +};# compat::bindsource2 + +proc compat::bindsource3 {window type script priority} { + return [bindsource2 $window $type $script] +};# compat::bindsource3 + +# ---------------------------------------------------------------------------- +# Command compat::cleartarget +# ---------------------------------------------------------------------------- +proc compat::cleartarget {window} { +};# compat::cleartarget + +# ---------------------------------------------------------------------------- +# Command compat::clearsource +# ---------------------------------------------------------------------------- +proc compat::clearsource {window} { +};# compat::clearsource diff --git a/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_generic.tcl b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_generic.tcl new file mode 100644 index 0000000000000000000000000000000000000000..698b464fc68e8a2e0e681f5bac32c4c63338f2c3 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_generic.tcl @@ -0,0 +1,520 @@ +# +# tkdnd_generic.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval generic { + variable _types {} + variable _typelist {} + variable _codelist {} + variable _actionlist {} + variable _pressedkeys {} + variable _action {} + variable _common_drag_source_types {} + variable _common_drop_target_types {} + variable _drag_source {} + variable _drop_target {} + + variable _last_mouse_root_x 0 + variable _last_mouse_root_y 0 + + variable _tkdnd2platform + variable _platform2tkdnd + + proc debug {msg} { + puts $msg + };# debug + + proc initialise { } { + };# initialise + + proc initialise_platform_to_tkdnd_types { types } { + variable _platform2tkdnd + variable _tkdnd2platform + set _platform2tkdnd [dict create {*}$types] + set _tkdnd2platform [dict create] + foreach type [dict keys $_platform2tkdnd] { + dict lappend _tkdnd2platform [dict get $_platform2tkdnd $type] $type + } + };# initialise_platform_to_tkdnd_types + + proc initialise_tkdnd_to_platform_types { types } { + variable _tkdnd2platform + set _tkdnd2platform [dict create {*}$types] + };# initialise_tkdnd_to_platform_types + +};# namespace generic + +# ---------------------------------------------------------------------------- +# Command generic::HandleEnter +# ---------------------------------------------------------------------------- +proc generic::HandleEnter { drop_target drag_source typelist codelist + actionlist pressedkeys } { + variable _typelist; set _typelist $typelist + variable _pressedkeys; set _pressedkeys $pressedkeys + variable _action; set _action refuse_drop + variable _common_drag_source_types; set _common_drag_source_types {} + variable _common_drop_target_types; set _common_drop_target_types {} + variable _actionlist + variable _drag_source; set _drag_source $drag_source + variable _drop_target; set _drop_target {} + variable _actionlist; set _actionlist $actionlist + variable _codelist set _codelist $codelist + + variable _last_mouse_root_x; set _last_mouse_root_x 0 + variable _last_mouse_root_y; set _last_mouse_root_y 0 + # debug "\n===============================================================" + # debug "generic::HandleEnter: drop_target=$drop_target,\ + # drag_source=$drag_source,\ + # typelist=$typelist" + # debug "generic::HandleEnter: ACTION: default" + return default +};# generic::HandleEnter + +# ---------------------------------------------------------------------------- +# Command generic::HandlePosition +# ---------------------------------------------------------------------------- +proc generic::HandlePosition { drop_target drag_source pressedkeys + rootX rootY { time 0 } } { + variable _types + variable _typelist + variable _codelist + variable _actionlist + variable _pressedkeys + variable _action + variable _common_drag_source_types + variable _common_drop_target_types + variable _drag_source + variable _drop_target + + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + + # debug "generic::HandlePosition: drop_target=$drop_target,\ + # _drop_target=$_drop_target, rootX=$rootX, rootY=$rootY" + + if {![info exists _drag_source] && ![string length $_drag_source]} { + # debug "generic::HandlePosition: no or empty _drag_source:\ + # return refuse_drop" + return refuse_drop + } + + if {$drag_source ne "" && $drag_source ne $_drag_source} { + debug "generic position event from unexpected source: $_drag_source\ + != $drag_source" + return refuse_drop + } + + set _pressedkeys $pressedkeys + + ## Does the new drop target support any of our new types? + # foreach {common_drag_source_types common_drop_target_types} \ + # [GetWindowCommonTypes $drop_target $_typelist] {break} + foreach {drop_target common_drag_source_types common_drop_target_types} \ + [FindWindowWithCommonTypes $drop_target $_typelist] {break} + set data [GetDroppedData $time] + + # debug "\t($_drop_target) -> ($drop_target)" + if {$drop_target != $_drop_target} { + if {[string length $_drop_target]} { + ## Call the <> event. + # debug "\t<> on $_drop_target" + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $_drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A \{$_action\} %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D \{\} %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + uplevel \#0 $cmd + } + } + set _drop_target $drop_target + set _action refuse_drop + + if {[llength $common_drag_source_types]} { + set _action [lindex $_actionlist 0] + set _common_drag_source_types $common_drag_source_types + set _common_drop_target_types $common_drop_target_types + ## Drop target supports at least one type. Send a <>. + # puts "<> -> $drop_target" + set cmd [bind $drop_target <>] + if {[string length $cmd]} { + focus $drop_target + set cmd [string map [list %W $drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + } + } + } + + set _drop_target {} + if {[llength $common_drag_source_types]} { + set _common_drag_source_types $common_drag_source_types + set _common_drop_target_types $common_drop_target_types + set _drop_target $drop_target + ## Drop target supports at least one type. Send a <>. + set cmd [bind $drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + } + } + # Return values: copy, move, link, ask, private, refuse_drop, default + # debug "generic::HandlePosition: ACTION: $_action" + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + return $_action +};# generic::HandlePosition + +# ---------------------------------------------------------------------------- +# Command generic::HandleLeave +# ---------------------------------------------------------------------------- +proc generic::HandleLeave { } { + variable _types + variable _typelist + variable _codelist + variable _actionlist + variable _pressedkeys + variable _action + variable _common_drag_source_types + variable _common_drop_target_types + variable _drag_source + variable _drop_target + variable _last_mouse_root_x + variable _last_mouse_root_y + if {![info exists _drop_target]} {set _drop_target {}} + # debug "generic::HandleLeave: _drop_target=$_drop_target" + if {[info exists _drop_target] && [string length $_drop_target]} { + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $_drop_target \ + %X $_last_mouse_root_x %Y $_last_mouse_root_y \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A \{$_action\} %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D \{\} %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + } + } + foreach var {_types _typelist _actionlist _pressedkeys _action + _common_drag_source_types _common_drop_target_types + _drag_source _drop_target} { + set $var {} + } +};# generic::HandleLeave + +# ---------------------------------------------------------------------------- +# Command generic::HandleDrop +# ---------------------------------------------------------------------------- +proc generic::HandleDrop {drop_target drag_source pressedkeys rootX rootY time } { + variable _types + variable _typelist + variable _codelist + variable _actionlist + variable _pressedkeys + variable _action + variable _common_drag_source_types + variable _common_drop_target_types + variable _drag_source + variable _drop_target + variable _last_mouse_root_x + variable _last_mouse_root_y + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + + set _pressedkeys $pressedkeys + + # puts "generic::HandleDrop: $time" + + if {![info exists _drag_source] && ![string length $_drag_source]} { + return refuse_drop + } + if {![info exists _drop_target] && ![string length $_drop_target]} { + return refuse_drop + } + if {![llength $_common_drag_source_types]} {return refuse_drop} + ## Get the dropped data. + set data [GetDroppedData $time] + ## Try to select the most specific <> event. + foreach type [concat $_common_drag_source_types $_common_drop_target_types] { + set type [platform_independent_type $type] + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $_drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + # Return values: copy, move, link, ask, private, refuse_drop + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + return $_action + } + } + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $_drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + } + # Return values: copy, move, link, ask, private, refuse_drop + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + return $_action +};# generic::HandleDrop + +# ---------------------------------------------------------------------------- +# Command generic::GetWindowCommonTypes +# ---------------------------------------------------------------------------- +proc generic::GetWindowCommonTypes { win typelist } { + set types [bind $win <>] + # debug ">> Accepted types: $win $_types" + set common_drag_source_types {} + set common_drop_target_types {} + if {[llength $types]} { + ## Examine the drop target types, to find at least one match with the drag + ## source types... + set supported_types [supported_types $typelist] + foreach type $types { + foreach matched [lsearch -glob -all -inline $supported_types $type] { + ## Drop target supports this type. + lappend common_drag_source_types $matched + lappend common_drop_target_types $type + } + } + } + list $common_drag_source_types $common_drop_target_types +};# generic::GetWindowCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::FindWindowWithCommonTypes +# ---------------------------------------------------------------------------- +proc generic::FindWindowWithCommonTypes { win typelist } { + set toplevel [winfo toplevel $win] + while {![string equal $win $toplevel]} { + foreach {common_drag_source_types common_drop_target_types} \ + [GetWindowCommonTypes $win $typelist] {break} + if {[llength $common_drag_source_types]} { + return [list $win $common_drag_source_types $common_drop_target_types] + } + set win [winfo parent $win] + } + ## We have reached the toplevel, which may be also a target (SF Bug #30) + foreach {common_drag_source_types common_drop_target_types} \ + [GetWindowCommonTypes $win $typelist] {break} + if {[llength $common_drag_source_types]} { + return [list $win $common_drag_source_types $common_drop_target_types] + } + return { {} {} {} } +};# generic::FindWindowWithCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::GetDroppedData +# ---------------------------------------------------------------------------- +proc generic::GetDroppedData { time } { + variable _dropped_data + return $_dropped_data +};# generic::GetDroppedData + +# ---------------------------------------------------------------------------- +# Command generic::SetDroppedData +# ---------------------------------------------------------------------------- +proc generic::SetDroppedData { data } { + variable _dropped_data + set _dropped_data $data +};# generic::SetDroppedData + +# ---------------------------------------------------------------------------- +# Command generic::GetDragSource +# ---------------------------------------------------------------------------- +proc generic::GetDragSource { } { + variable _drag_source + return $_drag_source +};# generic::GetDragSource + +# ---------------------------------------------------------------------------- +# Command generic::GetDropTarget +# ---------------------------------------------------------------------------- +proc generic::GetDropTarget { } { + variable _drop_target + return $_drop_target +};# generic::GetDropTarget + +# ---------------------------------------------------------------------------- +# Command generic::GetDragSourceCommonTypes +# ---------------------------------------------------------------------------- +proc generic::GetDragSourceCommonTypes { } { + variable _common_drag_source_types + return $_common_drag_source_types +};# generic::GetDragSourceCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::GetDropTargetCommonTypes +# ---------------------------------------------------------------------------- +proc generic::GetDropTargetCommonTypes { } { + variable _common_drag_source_types + return $_common_drag_source_types +};# generic::GetDropTargetCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::platform_specific_types +# ---------------------------------------------------------------------------- +proc generic::platform_specific_types { types } { + set new_types {} + foreach type $types { + set new_types [concat $new_types [platform_specific_type $type]] + } + return $new_types +}; # generic::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command generic::platform_specific_type +# ---------------------------------------------------------------------------- +proc generic::platform_specific_type { type } { + variable _tkdnd2platform + if {[dict exists $_tkdnd2platform $type]} { + return [dict get $_tkdnd2platform $type] + } + list $type +}; # generic::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + set new_types {} + foreach type $types { + set new_types [concat $new_types [platform_independent_type $type]] + } + return $new_types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command generic::platform_independent_type +# ---------------------------------------------------------------------------- +proc generic::platform_independent_type { type } { + variable _platform2tkdnd + if {[dict exists $_platform2tkdnd $type]} { + return [dict get $_platform2tkdnd $type] + } + return $type +}; # generic::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command generic::supported_types +# ---------------------------------------------------------------------------- +proc generic::supported_types { types } { + set new_types {} + foreach type $types { + if {[supported_type $type]} {lappend new_types $type} + } + return $new_types +}; # generic::supported_types + +# ---------------------------------------------------------------------------- +# Command generic::supported_type +# ---------------------------------------------------------------------------- +proc generic::supported_type { type } { + variable _platform2tkdnd + if {[dict exists $_platform2tkdnd $type]} { + return 1 + } + return 0 +}; # generic::supported_type diff --git a/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_macosx.tcl b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_macosx.tcl new file mode 100644 index 0000000000000000000000000000000000000000..307f6da2e94286d01dc9e068fffebe46de3c43f3 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_macosx.tcl @@ -0,0 +1,144 @@ +# +# tkdnd_macosx.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. + +# This software is copyrighted by: +# Georgios Petasis, Athens, Greece. +# e-mail: petasisg@yahoo.gr, petasis@iit.demokritos.gr +# +# Mac portions (c) 2009 Kevin Walzer/WordTech Communications LLC, +# kw@codebykevin.com +# +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +#basic API for Mac Drag and Drop + +#two data types supported: strings and file paths + +#two commands at C level: ::tkdnd::macdnd::registerdragwidget, ::tkdnd::macdnd::unregisterdragwidget + +#data retrieval mechanism: text or file paths are copied from drag clipboard to system clipboard and retrieved via [clipboard get]; array of file paths is converted to single tab-separated string, can be split into Tcl list + +if {[tk windowingsystem] eq "aqua" && "AppKit" ni [winfo server .]} { + error {TkAqua Cocoa required} +} + +namespace eval macdnd { + + proc initialise { } { + ## Mapping from platform types to TkDND types... + ::tkdnd::generic::initialise_platform_to_tkdnd_types [list \ + NSPasteboardTypeString DND_Text \ + NSFilenamesPboardType DND_Files \ + NSPasteboardTypeHTML DND_HTML \ + ] + };# initialise + +};# namespace macdnd + +# ---------------------------------------------------------------------------- +# Command macdnd::HandleEnter +# ---------------------------------------------------------------------------- +proc macdnd::HandleEnter { path drag_source typelist { data {} } } { + variable _pressedkeys + variable _actionlist + set _pressedkeys 1 + set _actionlist { copy move link ask private } + ::tkdnd::generic::SetDroppedData $data + ::tkdnd::generic::HandleEnter $path $drag_source $typelist $typelist \ + $_actionlist $_pressedkeys +};# macdnd::HandleEnter + +# ---------------------------------------------------------------------------- +# Command macdnd::HandlePosition +# ---------------------------------------------------------------------------- +proc macdnd::HandlePosition { drop_target rootX rootY {drag_source {}} } { + variable _pressedkeys + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + ::tkdnd::generic::HandlePosition $drop_target $drag_source \ + $_pressedkeys $rootX $rootY +};# macdnd::HandlePosition + +# ---------------------------------------------------------------------------- +# Command macdnd::HandleLeave +# ---------------------------------------------------------------------------- +proc macdnd::HandleLeave { args } { + ::tkdnd::generic::HandleLeave +};# macdnd::HandleLeave + +# ---------------------------------------------------------------------------- +# Command macdnd::HandleDrop +# ---------------------------------------------------------------------------- +proc macdnd::HandleDrop { drop_target data args } { + variable _pressedkeys + variable _last_mouse_root_x + variable _last_mouse_root_y + ## Get the dropped data... + ::tkdnd::generic::SetDroppedData $data + ::tkdnd::generic::HandleDrop {} {} $_pressedkeys \ + $_last_mouse_root_x $_last_mouse_root_y 0 +};# macdnd::HandleDrop + +# ---------------------------------------------------------------------------- +# Command macdnd::GetDragSourceCommonTypes +# ---------------------------------------------------------------------------- +proc macdnd::GetDragSourceCommonTypes { } { + ::tkdnd::generic::GetDragSourceCommonTypes +};# macdnd::GetDragSourceCommonTypes + +# ---------------------------------------------------------------------------- +# Command macdnd::platform_specific_types +# ---------------------------------------------------------------------------- +proc macdnd::platform_specific_types { types } { + ::tkdnd::generic::platform_specific_types $types +}; # macdnd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command macdnd::platform_specific_type +# ---------------------------------------------------------------------------- +proc macdnd::platform_specific_type { type } { + ::tkdnd::generic::platform_specific_type $type +}; # macdnd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + ::tkdnd::generic::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command macdnd::platform_independent_type +# ---------------------------------------------------------------------------- +proc macdnd::platform_independent_type { type } { + ::tkdnd::generic::platform_independent_type $type +}; # macdnd::platform_independent_type diff --git a/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_unix.tcl b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_unix.tcl new file mode 100644 index 0000000000000000000000000000000000000000..56d17c4db718274df4b3b7a14f0d8e055a1002b6 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_unix.tcl @@ -0,0 +1,810 @@ +# +# tkdnd_unix.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval xdnd { + variable _dragging 0 + + proc initialise { } { + ## Mapping from platform types to TkDND types... + ::tkdnd::generic::initialise_platform_to_tkdnd_types [list \ + text/plain\;charset=utf-8 DND_Text \ + UTF8_STRING DND_Text \ + text/plain DND_Text \ + STRING DND_Text \ + TEXT DND_Text \ + COMPOUND_TEXT DND_Text \ + text/uri-list DND_Files \ + text/html\;charset=utf-8 DND_HTML \ + text/html DND_HTML \ + application/x-color DND_Color \ + ] + };# initialise + +};# namespace xdnd + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndEnter +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndEnter { path drag_source typelist time { data {} } } { + variable _pressedkeys + variable _actionlist + variable _typelist + set _pressedkeys 1 + set _actionlist { copy move link ask private } + set _typelist $typelist + # puts "xdnd::HandleXdndEnter: $time" + ::tkdnd::generic::SetDroppedData $data + ::tkdnd::generic::HandleEnter $path $drag_source $typelist $typelist \ + $_actionlist $_pressedkeys +};# xdnd::HandleXdndEnter + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndPosition +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndPosition { drop_target rootX rootY time {drag_source {}} } { + variable _pressedkeys + variable _typelist + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + # puts "xdnd::HandleXdndPosition: $time" + ## Get the dropped data... + catch { + ::tkdnd::generic::SetDroppedData [GetPositionData $drop_target $_typelist $time] + } + ::tkdnd::generic::HandlePosition $drop_target $drag_source \ + $_pressedkeys $rootX $rootY +};# xdnd::HandleXdndPosition + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndLeave +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndLeave { } { + ::tkdnd::generic::HandleLeave +};# xdnd::HandleXdndLeave + +# ---------------------------------------------------------------------------- +# Command xdnd::_HandleXdndDrop +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndDrop { time } { + variable _pressedkeys + variable _last_mouse_root_x + variable _last_mouse_root_y + ## Get the dropped data... + ::tkdnd::generic::SetDroppedData [GetDroppedData \ + [::tkdnd::generic::GetDragSource] [::tkdnd::generic::GetDropTarget] \ + [::tkdnd::generic::GetDragSourceCommonTypes] $time] + ::tkdnd::generic::HandleDrop {} {} $_pressedkeys \ + $_last_mouse_root_x $_last_mouse_root_y $time +};# xdnd::HandleXdndDrop + +# ---------------------------------------------------------------------------- +# Command xdnd::GetPositionData +# ---------------------------------------------------------------------------- +proc xdnd::GetPositionData { drop_target typelist time } { + foreach {drop_target common_drag_source_types common_drop_target_types} \ + [::tkdnd::generic::FindWindowWithCommonTypes $drop_target $typelist] {break} + GetDroppedData [::tkdnd::generic::GetDragSource] $drop_target \ + $common_drag_source_types $time +};# xdnd::GetPositionData + +# ---------------------------------------------------------------------------- +# Command xdnd::GetDroppedData +# ---------------------------------------------------------------------------- +proc xdnd::GetDroppedData { _drag_source _drop_target _common_drag_source_types time } { + if {![llength $_common_drag_source_types]} { + error "no common data types between the drag source and drop target widgets" + } + ## Is drag source in this application? + if {[catch {winfo pathname -displayof $_drop_target $_drag_source} p]} { + set _use_tk_selection 0 + } else { + set _use_tk_selection 1 + } + foreach type $_common_drag_source_types { + # puts "TYPE: $type ($_drop_target)" + # _get_selection $_drop_target $time $type + if {$_use_tk_selection} { + if {![catch { + selection get -displayof $_drop_target -selection XdndSelection \ + -type $type + } result options]} { + return [normalise_data $type $result] + } + } else { + # puts "_selection_get -displayof $_drop_target -selection XdndSelection \ + # -type $type -time $time" + #after 100 [list focus -force $_drop_target] + #after 50 [list raise [winfo toplevel $_drop_target]] + if {![catch { + _selection_get -displayof $_drop_target -selection XdndSelection \ + -type $type -time $time + } result options]} { + return [normalise_data $type $result] + } + } + } + return -options $options $result +};# xdnd::GetDroppedData + +# ---------------------------------------------------------------------------- +# Command xdnd::platform_specific_types +# ---------------------------------------------------------------------------- +proc xdnd::platform_specific_types { types } { + ::tkdnd::generic::platform_specific_types $types +}; # xdnd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command xdnd::platform_specific_type +# ---------------------------------------------------------------------------- +proc xdnd::platform_specific_type { type } { + ::tkdnd::generic::platform_specific_type $type +}; # xdnd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + ::tkdnd::generic::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command xdnd::platform_independent_type +# ---------------------------------------------------------------------------- +proc xdnd::platform_independent_type { type } { + ::tkdnd::generic::platform_independent_type $type +}; # xdnd::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command xdnd::_normalise_data +# ---------------------------------------------------------------------------- +proc xdnd::normalise_data { type data } { + # Tk knows how to interpret the following types: + # STRING, TEXT, COMPOUND_TEXT + # UTF8_STRING + # Else, it returns a list of 8 or 32 bit numbers... + switch -glob $type { + STRING - UTF8_STRING - TEXT - COMPOUND_TEXT {return $data} + text/html { + if {[catch { + encoding convertfrom unicode $data + } string]} { + set string $data + } + return [string map {\r\n \n} $string] + } + text/html\;charset=utf-8 - + text/plain\;charset=utf-8 - + text/plain { + if {[catch { + encoding convertfrom utf-8 [tkdnd::bytes_to_string $data] + } string]} { + set string $data + } + return [string map {\r\n \n} $string] + } + text/uri-list* { + if {[catch { + encoding convertfrom utf-8 [tkdnd::bytes_to_string $data] + } string]} { + set string $data + } + ## Get rid of \r\n + set string [string trim [string map {\r\n \n} $string]] + set files {} + foreach quoted_file [split $string] { + set file [tkdnd::urn_unquote $quoted_file] + switch -glob $file { + \#* {} + file://* {lappend files [string range $file 7 end]} + ftp://* - + https://* - + http://* {lappend files $quoted_file} + default {lappend files $file} + } + } + return $files + } + application/x-color { + return $data + } + text/x-moz-url - + application/q-iconlist - + default {return $data} + } +}; # xdnd::normalise_data + +############################################################################# +## +## XDND drag implementation +## +############################################################################# + +# ---------------------------------------------------------------------------- +# Command xdnd::_selection_ownership_lost +# ---------------------------------------------------------------------------- +proc xdnd::_selection_ownership_lost {} { + variable _dragging + set _dragging 0 +};# _selection_ownership_lost + +# ---------------------------------------------------------------------------- +# Command xdnd::_dodragdrop +# ---------------------------------------------------------------------------- +proc xdnd::_dodragdrop { source actions types data button } { + variable _dragging + + # puts "xdnd::_dodragdrop: source: $source, actions: $actions, types: $types,\ + # data: \"$data\", button: $button" + if {$_dragging} { + ## We are in the middle of another drag operation... + error "another drag operation in progress" + } + + variable _dodragdrop_drag_source $source + variable _dodragdrop_drop_target 0 + variable _dodragdrop_drop_target_proxy 0 + variable _dodragdrop_actions $actions + variable _dodragdrop_action_descriptions $actions + variable _dodragdrop_actions_len [llength $actions] + variable _dodragdrop_types $types + variable _dodragdrop_types_len [llength $types] + variable _dodragdrop_data $data + variable _dodragdrop_transfer_data {} + variable _dodragdrop_button $button + variable _dodragdrop_time 0 + variable _dodragdrop_default_action refuse_drop + variable _dodragdrop_waiting_status 0 + variable _dodragdrop_drop_target_accepts_drop 0 + variable _dodragdrop_drop_target_accepts_action refuse_drop + variable _dodragdrop_current_cursor $_dodragdrop_default_action + variable _dodragdrop_drop_occured 0 + variable _dodragdrop_selection_requestor 0 + + ## + ## If we have more than 3 types, the property XdndTypeList must be set on + ## the drag source widget... + ## + if {$_dodragdrop_types_len > 3} { + _announce_type_list $_dodragdrop_drag_source $_dodragdrop_types + } + + ## + ## Announce the actions & their descriptions on the XdndActionList & + ## XdndActionDescription properties... + ## + _announce_action_list $_dodragdrop_drag_source $_dodragdrop_actions \ + $_dodragdrop_action_descriptions + + ## + ## Arrange selection handlers for our drag source, and all the supported types + ## + registerSelectionHandler $source $types + + ## + ## Step 1: When a drag begins, the source takes ownership of XdndSelection. + ## + selection own -command ::tkdnd::xdnd::_selection_ownership_lost \ + -selection XdndSelection $source + set _dragging 1 + + ## Grab the mouse pointer... + _grab_pointer $source $_dodragdrop_default_action + + ## Register our generic event handler... + # The generic event callback will report events by modifying variable + # ::xdnd::_dodragdrop_event: a dict with event information will be set as + # the value of the variable... + _register_generic_event_handler + + ## Set a timeout for debugging purposes... + # after 60000 {set ::tkdnd::xdnd::_dragging 0} + + tkwait variable ::tkdnd::xdnd::_dragging + _SendXdndLeave + + set _dragging 0 + _ungrab_pointer $source + _unregister_generic_event_handler + catch {selection clear -selection XdndSelection} + unregisterSelectionHandler $source $types + return $_dodragdrop_drop_target_accepts_action +};# xdnd::_dodragdrop + +# ---------------------------------------------------------------------------- +# Command xdnd::_process_drag_events +# ---------------------------------------------------------------------------- +proc xdnd::_process_drag_events {event} { + # The return value from proc is normally 0. A non-zero return value indicates + # that the event is not to be handled further; that is, proc has done all + # processing that is to be allowed for the event + variable _dragging + if {!$_dragging} {return 0} + # puts $event + + variable _dodragdrop_time + set time [dict get $event time] + set type [dict get $event type] + if {$time < $_dodragdrop_time && ![string equal $type SelectionRequest]} { + return 0 + } + set _dodragdrop_time $time + + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_default_action + switch $type { + MotionNotify { + set rootx [dict get $event x_root] + set rooty [dict get $event y_root] + set window [_find_drop_target_window $_dodragdrop_drag_source \ + $rootx $rooty] + if {[string length $window]} { + ## Examine the modifiers to suggest an action... + set _dodragdrop_default_action [_default_action $event] + ## Is it a Tk widget? + # set path [winfo containing $rootx $rooty] + # puts "Window under mouse: $window ($path)" + if {$_dodragdrop_drop_target != $window} { + ## Send XdndLeave to $_dodragdrop_drop_target + _SendXdndLeave + ## Is there a proxy? If not, _find_drop_target_proxy returns the + ## target window, so we always get a valid "proxy". + set proxy [_find_drop_target_proxy $_dodragdrop_drag_source $window] + ## Send XdndEnter to $window + _SendXdndEnter $window $proxy + ## Send XdndPosition to $_dodragdrop_drop_target + _SendXdndPosition $rootx $rooty $_dodragdrop_default_action + } else { + ## Send XdndPosition to $_dodragdrop_drop_target + _SendXdndPosition $rootx $rooty $_dodragdrop_default_action + } + } else { + ## No window under the mouse. Send XdndLeave to $_dodragdrop_drop_target + _SendXdndLeave + } + } + ButtonPress { + } + ButtonRelease { + variable _dodragdrop_button + set button [dict get $event button] + if {$button == $_dodragdrop_button} { + ## The button that initiated the drag was released. Trigger drop... + _SendXdndDrop + } + return 1 + } + KeyPress { + } + KeyRelease { + set keysym [dict get $event keysym] + switch $keysym { + Escape { + ## The user has pressed escape. Abort... + if {$_dragging} {set _dragging 0} + } + } + } + SelectionRequest { + variable _dodragdrop_selection_requestor + variable _dodragdrop_selection_property + variable _dodragdrop_selection_selection + variable _dodragdrop_selection_target + variable _dodragdrop_selection_time + set _dodragdrop_selection_requestor [dict get $event requestor] + set _dodragdrop_selection_property [dict get $event property] + set _dodragdrop_selection_selection [dict get $event selection] + set _dodragdrop_selection_target [dict get $event target] + set _dodragdrop_selection_time $time + return 0 + } + default { + return 0 + } + } + return 0 +};# _process_drag_events + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndEnter +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndEnter {window proxy} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_types + variable _dodragdrop_waiting_status + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_target > 0} _SendXdndLeave + if {$_dodragdrop_drop_occured} return + set _dodragdrop_drop_target $window + set _dodragdrop_drop_target_proxy $proxy + set _dodragdrop_waiting_status 0 + if {$_dodragdrop_drop_target < 1} return + # puts "XdndEnter: $_dodragdrop_drop_target $_dodragdrop_drop_target_proxy" + _send_XdndEnter $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy $_dodragdrop_types +};# xdnd::_SendXdndEnter + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndPosition +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndPosition {rootx rooty action} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + if {$_dodragdrop_drop_target < 1} return + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} return + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_waiting_status + ## Arrange a new XdndPosition, to be send periodically... + variable _dodragdrop_xdnd_position_heartbeat + catch {after cancel $_dodragdrop_xdnd_position_heartbeat} + set _dodragdrop_xdnd_position_heartbeat [after 200 \ + [list ::tkdnd::xdnd::_SendXdndPosition $rootx $rooty $action]] + if {$_dodragdrop_waiting_status} {return} + # puts "XdndPosition: $_dodragdrop_drop_target $rootx $rooty $action" + _send_XdndPosition $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy $rootx $rooty $action + set _dodragdrop_waiting_status 1 +};# xdnd::_SendXdndPosition + +# ---------------------------------------------------------------------------- +# Command xdnd::_HandleXdndStatus +# ---------------------------------------------------------------------------- +proc xdnd::_HandleXdndStatus {event} { + variable _dodragdrop_drop_target + variable _dodragdrop_waiting_status + + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + set _dodragdrop_waiting_status 0 + foreach key {target accept want_position action x y w h} { + set $key [dict get $event $key] + } + set _dodragdrop_drop_target_accepts_drop $accept + set _dodragdrop_drop_target_accepts_action $action + if {$_dodragdrop_drop_target < 1} return + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} return + _update_cursor + # puts "XdndStatus: $event" +};# xdnd::_HandleXdndStatus + +# ---------------------------------------------------------------------------- +# Command xdnd::_HandleXdndFinished +# ---------------------------------------------------------------------------- +proc xdnd::_HandleXdndFinished {event} { + variable _dodragdrop_xdnd_finished_event_after_id + catch {after cancel $_dodragdrop_xdnd_finished_event_after_id} + set _dodragdrop_xdnd_finished_event_after_id {} + variable _dodragdrop_drop_target + set _dodragdrop_drop_target 0 + variable _dragging + if {$_dragging} {set _dragging 0} + + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + if {[dict size $event]} { + foreach key {target accept action} { + set $key [dict get $event $key] + } + set _dodragdrop_drop_target_accepts_drop $accept + set _dodragdrop_drop_target_accepts_action $action + } else { + set _dodragdrop_drop_target_accepts_drop 0 + } + if {!$_dodragdrop_drop_target_accepts_drop} { + set _dodragdrop_drop_target_accepts_action refuse_drop + } + # puts "XdndFinished: $event" +};# xdnd::_HandleXdndFinished + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndLeave +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndLeave {} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + if {$_dodragdrop_drop_target < 1} return + variable _dodragdrop_drop_target_proxy + # puts "XdndLeave: $_dodragdrop_drop_target" + _send_XdndLeave $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy + set _dodragdrop_drop_target 0 + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + set _dodragdrop_drop_target_accepts_drop 0 + set _dodragdrop_drop_target_accepts_action refuse_drop + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} return + _update_cursor +};# xdnd::_SendXdndLeave + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndDrop +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndDrop {} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + if {$_dodragdrop_drop_target < 1} { + ## The mouse has been released over a widget that does not accept drops. + _HandleXdndFinished {} + return + } + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} {return} + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + + set _dodragdrop_drop_occured 1 + _update_cursor clock + + if {!$_dodragdrop_drop_target_accepts_drop} { + _SendXdndLeave + _HandleXdndFinished {} + return + } + # puts "XdndDrop: $_dodragdrop_drop_target" + variable _dodragdrop_drop_timestamp + set _dodragdrop_drop_timestamp [_send_XdndDrop \ + $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy] + set _dodragdrop_drop_target 0 + # puts "XdndDrop: $_dodragdrop_drop_target" + ## Arrange a timeout for receiving XdndFinished... + variable _dodragdrop_xdnd_finished_event_after_id + set _dodragdrop_xdnd_finished_event_after_id \ + [after 10000 [list ::tkdnd::xdnd::_HandleXdndFinished {}]] +};# xdnd::_SendXdndDrop + +# ---------------------------------------------------------------------------- +# Command xdnd::_update_cursor +# ---------------------------------------------------------------------------- +proc xdnd::_update_cursor { {cursor {}}} { + # puts "_update_cursor $cursor" + variable _dodragdrop_current_cursor + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + + if {![string length $cursor]} { + set cursor refuse_drop + if {$_dodragdrop_drop_target_accepts_drop} { + set cursor $_dodragdrop_drop_target_accepts_action + } + } + if {![string equal $cursor $_dodragdrop_current_cursor]} { + _set_pointer_cursor $_dodragdrop_drag_source $cursor + set _dodragdrop_current_cursor $cursor + } +};# xdnd::_update_cursor + +# ---------------------------------------------------------------------------- +# Command xdnd::_default_action +# ---------------------------------------------------------------------------- +proc xdnd::_default_action {event} { + variable _dodragdrop_actions + variable _dodragdrop_actions_len + if {$_dodragdrop_actions_len == 1} {return [lindex $_dodragdrop_actions 0]} + + set alt [dict get $event Alt] + set shift [dict get $event Shift] + set control [dict get $event Control] + + if {$shift && $control && [lsearch $_dodragdrop_actions link] != -1} { + return link + } elseif {$control && [lsearch $_dodragdrop_actions copy] != -1} { + return copy + } elseif {$shift && [lsearch $_dodragdrop_actions move] != -1} { + return move + } elseif {$alt && [lsearch $_dodragdrop_actions link] != -1} { + return link + } + return default +};# xdnd::_default_action + +# ---------------------------------------------------------------------------- +# Command xdnd::getFormatForType +# ---------------------------------------------------------------------------- +proc xdnd::getFormatForType {type} { + switch -glob [string tolower $type] { + text/plain\;charset=utf-8 - + text/html\;charset=utf-8 - + utf8_string {set format UTF8_STRING} + text/html - + text/plain - + string - + text - + compound_text {set format STRING} + text/uri-list* {set format UTF8_STRING} + application/x-color {set format $type} + default {set format $type} + } + return $format +};# xdnd::getFormatForType + +# ---------------------------------------------------------------------------- +# Command xdnd::registerSelectionHandler +# ---------------------------------------------------------------------------- +proc xdnd::registerSelectionHandler {source types} { + foreach type $types { + selection handle -selection XdndSelection \ + -type $type \ + -format [getFormatForType $type] \ + $source [list ::tkdnd::xdnd::_SendData $type] + } +};# xdnd::registerSelectionHandler + +# ---------------------------------------------------------------------------- +# Command xdnd::unregisterSelectionHandler +# ---------------------------------------------------------------------------- +proc xdnd::unregisterSelectionHandler {source types} { + foreach type $types { + catch { + selection handle -selection XdndSelection \ + -type $type \ + -format [getFormatForType $type] \ + $source {} + } + } +};# xdnd::unregisterSelectionHandler + +# ---------------------------------------------------------------------------- +# Command xdnd::_convert_to_unsigned +# ---------------------------------------------------------------------------- +proc xdnd::_convert_to_unsigned {data format} { + switch $format { + 8 { set mask 0xff } + 16 { set mask 0xffff } + 32 { set mask 0xffffff } + default {error "unsupported format $format"} + } + ## Convert signed integer into unsigned... + set d [list] + foreach num $data { + lappend d [expr { $num & $mask }] + } + return $d +};# xdnd::_convert_to_unsigned + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendData +# ---------------------------------------------------------------------------- +proc xdnd::_SendData {type offset bytes args} { + variable _dodragdrop_drag_source + variable _dodragdrop_types + variable _dodragdrop_data + variable _dodragdrop_transfer_data + + ## The variable _dodragdrop_data contains a list of data, one for each + ## type in the _dodragdrop_types variable. We have to search types, and find + ## the corresponding entry in the _dodragdrop_data list. + set index [lsearch $_dodragdrop_types $type] + if {$index < 0} { + error "unable to locate data suitable for type \"$type\"" + } + set typed_data [lindex $_dodragdrop_data $index] + set format 8 + if {$offset == 0} { + ## Prepare the data to be transferred... + switch -glob $type { + text/plain* - UTF8_STRING - STRING - TEXT - COMPOUND_TEXT { + binary scan [encoding convertto utf-8 $typed_data] \ + c* _dodragdrop_transfer_data + set _dodragdrop_transfer_data \ + [_convert_to_unsigned $_dodragdrop_transfer_data $format] + } + text/uri-list* { + set files [list] + foreach file $typed_data { + switch -glob $file { + *://* {lappend files $file} + default {lappend files file://$file} + } + } + binary scan [encoding convertto utf-8 "[join $files \r\n]\r\n"] \ + c* _dodragdrop_transfer_data + set _dodragdrop_transfer_data \ + [_convert_to_unsigned $_dodragdrop_transfer_data $format] + } + application/x-color { + set format 16 + ## Try to understand the provided data: we accept a standard Tk colour, + ## or a list of 3 values (red green blue) or a list of 4 values + ## (red green blue opacity). + switch [llength $typed_data] { + 1 { set color [winfo rgb $_dodragdrop_drag_source $typed_data] + lappend color 65535 } + 3 { set color $typed_data; lappend color 65535 } + 4 { set color $typed_data } + default {error "unknown color data: \"$typed_data\""} + } + ## Convert the 4 elements into 16 bit values... + set _dodragdrop_transfer_data [list] + foreach c $color { + lappend _dodragdrop_transfer_data [format 0x%04X $c] + } + } + default { + set format 32 + binary scan $typed_data c* _dodragdrop_transfer_data + } + } + } + + ## + ## Data has been split into bytes. Count the bytes requested, and return them + ## + set data [lrange $_dodragdrop_transfer_data $offset [expr {$offset+$bytes-1}]] + switch $format { + 8 { + set data [encoding convertfrom utf-8 [binary format c* $data]] + } + 16 { + variable _dodragdrop_selection_requestor + if {$_dodragdrop_selection_requestor} { + ## Tk selection cannot process this format (only 8 & 32 supported). + ## Call our XChangeProperty... + set numItems [llength $data] + variable _dodragdrop_selection_property + variable _dodragdrop_selection_selection + variable _dodragdrop_selection_target + variable _dodragdrop_selection_time + XChangeProperty $_dodragdrop_drag_source \ + $_dodragdrop_selection_requestor \ + $_dodragdrop_selection_property \ + $_dodragdrop_selection_target \ + $format \ + $_dodragdrop_selection_time \ + $data $numItems + return -code break + } + } + 32 { + } + default { + error "unsupported format $format" + } + } + # puts "SendData: $type $offset $bytes $args ($typed_data)" + # puts " $data" + return $data +};# xdnd::_SendData diff --git a/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_utils.tcl b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_utils.tcl new file mode 100644 index 0000000000000000000000000000000000000000..ee961ddb1ca29b383496111eadc2ccdce7776b08 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_utils.tcl @@ -0,0 +1,252 @@ +# +# tkdnd_utils.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +package require tkdnd +namespace eval ::tkdnd { + namespace eval utils { + };# namespace ::tkdnd::utils + namespace eval text { + variable _drag_tag tkdnd::drag::selection::tag + variable _state {} + variable _drag_source_widget {} + variable _drop_target_widget {} + variable _now_dragging 0 + };# namespace ::tkdnd::text +};# namespace ::tkdnd + +bind TkDND_Drag_Text1 {tkdnd::text::_begin_drag clear 1 %W %s %X %Y %x %y} +bind TkDND_Drag_Text1 {tkdnd::text::_begin_drag motion 1 %W %s %X %Y %x %y} +bind TkDND_Drag_Text1 {tkdnd::text::_TextAutoScan %W %x %y} +bind TkDND_Drag_Text1 {tkdnd::text::_begin_drag reset 1 %W %s %X %Y %x %y} +bind TkDND_Drag_Text2 {tkdnd::text::_begin_drag clear 2 %W %s %X %Y %x %y} +bind TkDND_Drag_Text2 {tkdnd::text::_begin_drag motion 2 %W %s %X %Y %x %y} +bind TkDND_Drag_Text2 {tkdnd::text::_begin_drag reset 2 %W %s %X %Y %x %y} +bind TkDND_Drag_Text3 {tkdnd::text::_begin_drag clear 3 %W %s %X %Y %x %y} +bind TkDND_Drag_Text3 {tkdnd::text::_begin_drag motion 3 %W %s %X %Y %x %y} +bind TkDND_Drag_Text3 {tkdnd::text::_begin_drag reset 3 %W %s %X %Y %x %y} + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::drag_source +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::drag_source { mode path { types DND_Text } { event 1 } { tagprefix TkDND_Drag_Text } { tag sel } } { + switch -exact -- $mode { + register { + $path tag bind $tag \ + "tkdnd::text::_begin_drag press ${event} %W %s %X %Y %x %y" + ## Set a binding to the widget, to put selection as data... + bind $path <> "::tkdnd::text::DragInitCmd $path {%t} $tag" + ## Set a binding to the widget, to remove selection if action is move... + bind $path <> "::tkdnd::text::DragEndCmd $path %A $tag" + } + unregister { + $path tag bind $tag {} + bind $path <> {} + bind $path <> {} + } + } + ::tkdnd::drag_source $mode $path $types $event $tagprefix +};# ::tkdnd::text::drag_source + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::drop_target +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::drop_target { mode path { types DND_Text } } { + switch -exact -- $mode { + register { + bind $path <> "::tkdnd::text::DropPosition $path %X %Y %A %a %m" + bind $path <> "::tkdnd::text::Drop $path %D %X %Y %A %a %m" + } + unregister { + bind $path <> {} + bind $path <> {} + bind $path <> {} + bind $path <> {} + } + } + ::tkdnd::drop_target $mode $path $types +};# ::tkdnd::text::drop_target + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::DragInitCmd +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::DragInitCmd { path { types DND_Text } { tag sel } { actions { copy move } } } { + ## Save the selection indices... + variable _drag_source_widget + variable _drop_target_widget + set _drag_source_widget $path + set _drop_target_widget {} + _save_selection $path $tag + list $actions $types [$path get $tag.first $tag.last] +};# ::tkdnd::text::DragInitCmd + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::DragEndCmd +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::DragEndCmd { path action { tag sel } } { + variable _drag_source_widget + variable _drop_target_widget + set _drag_source_widget {} + set _drop_target_widget {} + _restore_selection $path $tag + switch -exact -- $action { + move { + ## Delete the original selected text... + variable _selection_first + variable _selection_last + $path delete $_selection_first $_selection_last + } + } +};# ::tkdnd::text::DragEndCmd + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::DropPosition +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::DropPosition { path X Y action actions keys} { + variable _drag_source_widget + variable _drop_target_widget + set _drop_target_widget $path + ## This check is primitive, a more accurate one is needed! + if {$path eq $_drag_source_widget} { + ## This is a drag within the same widget! Set action to move... + if {"move" in $actions} {set action move} + } + incr X -[winfo rootx $path] + incr Y -[winfo rooty $path] + $path mark set insert @$X,$Y; update + return $action +};# ::tkdnd::text::DropPosition + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::Drop +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::Drop { path data X Y action actions keys } { + incr X -[winfo rootx $path] + incr Y -[winfo rooty $path] + $path mark set insert @$X,$Y + $path insert [$path index insert] $data + return $action +};# ::tkdnd::text::Drop + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::_save_selection +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::_save_selection { path tag} { + variable _drag_tag + variable _selection_first + variable _selection_last + variable _selection_tag $tag + set _selection_first [$path index $tag.first] + set _selection_last [$path index $tag.last] + $path tag add $_drag_tag $_selection_first $_selection_last + $path tag configure $_drag_tag \ + -background [$path tag cget $tag -background] \ + -foreground [$path tag cget $tag -foreground] +};# tkdnd::text::_save_selection + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::_restore_selection +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::_restore_selection { path tag} { + variable _drag_tag + variable _selection_first + variable _selection_last + $path tag delete $_drag_tag + $path tag remove $tag 0.0 end + #$path tag add $tag $_selection_first $_selection_last +};# tkdnd::text::_restore_selection + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::_begin_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::_begin_drag { event button source state X Y x y } { + variable _drop_target_widget + variable _state + # puts "::tkdnd::text::_begin_drag $event $button $source $state $X $Y $x $y" + + switch -exact -- $event { + clear { + switch -exact -- $_state { + press { + ## Do not execute other bindings, as they will erase selection... + return -code break + } + } + set _state clear + } + motion { + variable _now_dragging + if {$_now_dragging} {return -code break} + if { [string equal $_state "press"] } { + variable _x0; variable _y0 + if { abs($_x0-$X) > ${::tkdnd::_dx} || abs($_y0-$Y) > ${::tkdnd::_dy} } { + set _state "done" + set _drop_target_widget {} + set _now_dragging 1 + set code [catch { + ::tkdnd::_init_drag $button $source $state $X $Y $x $y + } info options] + set _drop_target_widget {} + set _now_dragging 0 + if {$code != 0} { + ## Something strange occurred... + return -options $options $info + } + } + return -code break + } + set _state clear + } + press { + variable _x0; variable _y0 + set _x0 $X + set _y0 $Y + set _state "press" + } + reset { + set _state {} + } + } + if {$source eq $_drop_target_widget} {return -code break} + return -code continue +};# tkdnd::text::_begin_drag + +proc tkdnd::text::_TextAutoScan {w x y} { + variable _now_dragging + if {$_now_dragging} {return -code break} + return -code continue +};# tkdnd::text::_TextAutoScan diff --git a/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_windows.tcl b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_windows.tcl new file mode 100644 index 0000000000000000000000000000000000000000..a1d01f3a2c438eaf3f676437d4d4ba89b3ba64f0 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/linux64/tkdnd_windows.tcl @@ -0,0 +1,167 @@ +# +# tkdnd_windows.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval olednd { + + proc initialise { } { + ## Mapping from platform types to TkDND types... + ::tkdnd::generic::initialise_platform_to_tkdnd_types [list \ + CF_UNICODETEXT DND_Text \ + CF_TEXT DND_Text \ + CF_HDROP DND_Files \ + UniformResourceLocator DND_URL \ + CF_HTML DND_HTML \ + {HTML Format} DND_HTML \ + CF_RTF DND_RTF \ + CF_RTFTEXT DND_RTF \ + {Rich Text Format} DND_RTF \ + ] + # FileGroupDescriptorW DND_Files \ + # FileGroupDescriptor DND_Files \ + + ## Mapping from TkDND types to platform types... + ::tkdnd::generic::initialise_tkdnd_to_platform_types [list \ + DND_Text {CF_UNICODETEXT CF_TEXT} \ + DND_Files {CF_HDROP} \ + DND_URL {UniformResourceLocator UniformResourceLocatorW} \ + DND_HTML {CF_HTML {HTML Format}} \ + DND_RTF {CF_RTF CF_RTFTEXT {Rich Text Format}} \ + ] + };# initialise + +};# namespace olednd + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDragEnter +# ---------------------------------------------------------------------------- +proc olednd::HandleDragEnter { drop_target typelist actionlist pressedkeys + rootX rootY codelist { data {} } } { + ::tkdnd::generic::SetDroppedData $data + focus $drop_target + ::tkdnd::generic::HandleEnter $drop_target 0 $typelist \ + $codelist $actionlist $pressedkeys + set action [::tkdnd::generic::HandlePosition $drop_target {} \ + $pressedkeys $rootX $rootY] + if {$::tkdnd::_auto_update} {update idletasks} + return $action +};# olednd::HandleDragEnter + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDragOver +# ---------------------------------------------------------------------------- +proc olednd::HandleDragOver { drop_target pressedkeys rootX rootY } { + set action [::tkdnd::generic::HandlePosition $drop_target {} \ + $pressedkeys $rootX $rootY] + if {$::tkdnd::_auto_update} {update idletasks} + return $action +};# olednd::HandleDragOver + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDragLeave +# ---------------------------------------------------------------------------- +proc olednd::HandleDragLeave { drop_target } { + ::tkdnd::generic::HandleLeave + if {$::tkdnd::_auto_update} {update idletasks} +};# olednd::HandleDragLeave + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDrop +# ---------------------------------------------------------------------------- +proc olednd::HandleDrop { drop_target pressedkeys rootX rootY type data } { + ::tkdnd::generic::SetDroppedData [normalise_data $type $data] + set action [::tkdnd::generic::HandleDrop $drop_target {} \ + $pressedkeys $rootX $rootY 0] + if {$::tkdnd::_auto_update} {update idletasks} + return $action +};# olednd::HandleDrop + +# ---------------------------------------------------------------------------- +# Command olednd::GetDataType +# ---------------------------------------------------------------------------- +proc olednd::GetDataType { drop_target typelist } { + foreach {drop_target common_drag_source_types common_drop_target_types} \ + [::tkdnd::generic::FindWindowWithCommonTypes $drop_target $typelist] {break} + lindex $common_drag_source_types 0 +};# olednd::GetDataType + +# ---------------------------------------------------------------------------- +# Command olednd::GetDragSourceCommonTypes +# ---------------------------------------------------------------------------- +proc olednd::GetDragSourceCommonTypes { drop_target } { + ::tkdnd::generic::GetDragSourceCommonTypes +};# olednd::GetDragSourceCommonTypes + +# ---------------------------------------------------------------------------- +# Command olednd::platform_specific_types +# ---------------------------------------------------------------------------- +proc olednd::platform_specific_types { types } { + ::tkdnd::generic::platform_specific_types $types +}; # olednd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command olednd::platform_specific_type +# ---------------------------------------------------------------------------- +proc olednd::platform_specific_type { type } { + ::tkdnd::generic::platform_specific_type $type +}; # olednd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + ::tkdnd::generic::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command olednd::platform_independent_type +# ---------------------------------------------------------------------------- +proc olednd::platform_independent_type { type } { + ::tkdnd::generic::platform_independent_type $type +}; # olednd::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command olednd::normalise_data +# ---------------------------------------------------------------------------- +proc olednd::normalise_data { type data } { + switch [lindex [::tkdnd::generic::platform_independent_type $type] 0] { + DND_Text {return $data} + DND_Files {return $data} + DND_HTML {return [encoding convertfrom utf-8 $data]} + default {return $data} + } +}; # olednd::normalise_data diff --git a/gui_data/tkinterdnd2/tkdnd/osx64/libtkdnd2.9.2.dylib b/gui_data/tkinterdnd2/tkdnd/osx64/libtkdnd2.9.2.dylib new file mode 100644 index 0000000000000000000000000000000000000000..2f511c423e109f80e7511c768cfb81d16a1cd65a Binary files /dev/null and b/gui_data/tkinterdnd2/tkdnd/osx64/libtkdnd2.9.2.dylib differ diff --git a/gui_data/tkinterdnd2/tkdnd/osx64/pkgIndex.tcl b/gui_data/tkinterdnd2/tkdnd/osx64/pkgIndex.tcl new file mode 100644 index 0000000000000000000000000000000000000000..d46e91c53205b270f70be7c124dc7715ba93bed9 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx64/pkgIndex.tcl @@ -0,0 +1,10 @@ +# +# Tcl package index file +# +package ifneeded tkdnd 2.9.2 \ + "source \{$dir/tkdnd.tcl\} ; \ + tkdnd::initialise \{$dir\} libtkdnd2.9.2.dylib tkdnd" + +package ifneeded tkdnd::utils 2.9.2 \ + "source \{$dir/tkdnd_utils.tcl\} ; \ + package provide tkdnd::utils 2.9.2" diff --git a/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd.tcl b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd.tcl new file mode 100644 index 0000000000000000000000000000000000000000..12d1dd289de6b78e83922a1b1653ef6165dc70db --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd.tcl @@ -0,0 +1,469 @@ +# +# tkdnd.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +package require Tk + +namespace eval ::tkdnd { + variable _topw ".drag" + variable _tabops + variable _state + variable _x0 + variable _y0 + variable _platform_namespace + variable _drop_file_temp_dir + variable _auto_update 1 + variable _dx 3 ;# The difference in pixels before a drag is initiated. + variable _dy 3 ;# The difference in pixels before a drag is initiated. + + variable _windowingsystem + + bind TkDND_Drag1 {tkdnd::_begin_drag press 1 %W %s %X %Y %x %y} + bind TkDND_Drag1 {tkdnd::_begin_drag motion 1 %W %s %X %Y %x %y} + bind TkDND_Drag2 {tkdnd::_begin_drag press 2 %W %s %X %Y %x %y} + bind TkDND_Drag2 {tkdnd::_begin_drag motion 2 %W %s %X %Y %x %y} + bind TkDND_Drag3 {tkdnd::_begin_drag press 3 %W %s %X %Y %x %y} + bind TkDND_Drag3 {tkdnd::_begin_drag motion 3 %W %s %X %Y %x %y} + + # ---------------------------------------------------------------------------- + # Command tkdnd::initialise: Initialise the TkDND package. + # ---------------------------------------------------------------------------- + proc initialise { dir PKG_LIB_FILE PACKAGE_NAME} { + variable _platform_namespace + variable _drop_file_temp_dir + variable _windowingsystem + global env + + switch [tk windowingsystem] { + x11 { + set _windowingsystem x11 + } + win32 - + windows { + set _windowingsystem windows + } + aqua { + set _windowingsystem aqua + } + default { + error "unknown Tk windowing system" + } + } + + ## Get User's home directory: We try to locate the proper path from a set of + ## environmental variables... + foreach var {HOME HOMEPATH USERPROFILE ALLUSERSPROFILE APPDATA} { + if {[info exists env($var)]} { + if {[file isdirectory $env($var)]} { + set UserHomeDir $env($var) + break + } + } + } + + ## Should use [tk windowingsystem] instead of tcl platform array: + ## OS X returns "unix," but that's not useful because it has its own + ## windowing system, aqua + ## Under windows we have to also combine HOMEDRIVE & HOMEPATH... + if {![info exists UserHomeDir] && + [string equal $_windowingsystem windows] && + [info exists env(HOMEDRIVE)] && [info exists env(HOMEPATH)]} { + if {[file isdirectory $env(HOMEDRIVE)$env(HOMEPATH)]} { + set UserHomeDir $env(HOMEDRIVE)$env(HOMEPATH) + } + } + ## Have we located the needed path? + if {![info exists UserHomeDir]} { + set UserHomeDir [pwd] + } + set UserHomeDir [file normalize $UserHomeDir] + + ## Try to locate a temporary directory... + foreach var {TKDND_TEMP_DIR TEMP TMP} { + if {[info exists env($var)]} { + if {[file isdirectory $env($var)] && [file writable $env($var)]} { + set _drop_file_temp_dir $env($var) + break + } + } + } + if {![info exists _drop_file_temp_dir]} { + foreach _dir [list "$UserHomeDir/Local Settings/Temp" \ + "$UserHomeDir/AppData/Local/Temp" \ + /tmp \ + C:/WINDOWS/Temp C:/Temp C:/tmp \ + D:/WINDOWS/Temp D:/Temp D:/tmp] { + if {[file isdirectory $_dir] && [file writable $_dir]} { + set _drop_file_temp_dir $_dir + break + } + } + } + if {![info exists _drop_file_temp_dir]} { + set _drop_file_temp_dir $UserHomeDir + } + set _drop_file_temp_dir [file native $_drop_file_temp_dir] + + source $dir/tkdnd_generic.tcl + switch $_windowingsystem { + x11 { + source $dir/tkdnd_unix.tcl + set _platform_namespace xdnd + } + win32 - + windows { + source $dir/tkdnd_windows.tcl + set _platform_namespace olednd + } + aqua { + source $dir/tkdnd_macosx.tcl + set _platform_namespace macdnd + } + default { + error "unknown Tk windowing system" + } + } + load $dir/$PKG_LIB_FILE $PACKAGE_NAME + source $dir/tkdnd_compat.tcl + ${_platform_namespace}::initialise + };# initialise + + proc GetDropFileTempDirectory { } { + variable _drop_file_temp_dir + return $_drop_file_temp_dir + } + proc SetDropFileTempDirectory { dir } { + variable _drop_file_temp_dir + set _drop_file_temp_dir $dir + } + +};# namespace ::tkdnd + +# ---------------------------------------------------------------------------- +# Command tkdnd::drag_source +# ---------------------------------------------------------------------------- +proc ::tkdnd::drag_source { mode path { types {} } { event 1 } + { tagprefix TkDND_Drag } } { + set tags [bindtags $path] + set idx [lsearch $tags ${tagprefix}$event] + switch -- $mode { + register { + if { $idx != -1 } { + ## No need to do anything! + # bindtags $path [lreplace $tags $idx $idx ${tagprefix}$event] + } else { + bindtags $path [linsert $tags 1 ${tagprefix}$event] + } + _drag_source_update_types $path $types + } + unregister { + if { $idx != -1 } { + bindtags $path [lreplace $tags $idx $idx] + } + } + } +};# tkdnd::drag_source + +proc ::tkdnd::_drag_source_update_types { path types } { + set types [platform_specific_types $types] + set old_types [bind $path <>] + foreach type $types { + if {[lsearch $old_types $type] < 0} {lappend old_types $type} + } + bind $path <> $old_types +};# ::tkdnd::_drag_source_update_types + +# ---------------------------------------------------------------------------- +# Command tkdnd::drop_target +# ---------------------------------------------------------------------------- +proc ::tkdnd::drop_target { mode path { types {} } } { + variable _windowingsystem + set types [platform_specific_types $types] + switch -- $mode { + register { + switch $_windowingsystem { + x11 { + _register_types $path [winfo toplevel $path] $types + } + win32 - + windows { + _RegisterDragDrop $path + bind $path {+ tkdnd::_RevokeDragDrop %W} + } + aqua { + macdnd::registerdragwidget [winfo toplevel $path] $types + } + default { + error "unknown Tk windowing system" + } + } + set old_types [bind $path <>] + set new_types {} + foreach type $types { + if {[lsearch -exact $old_types $type] < 0} {lappend new_types $type} + } + if {[llength $new_types]} { + bind $path <> [concat $old_types $new_types] + } + } + unregister { + switch $_windowingsystem { + x11 { + } + win32 - + windows { + _RevokeDragDrop $path + } + aqua { + error todo + } + default { + error "unknown Tk windowing system" + } + } + bind $path <> {} + } + } +};# tkdnd::drop_target + +# ---------------------------------------------------------------------------- +# Command tkdnd::_begin_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::_begin_drag { event button source state X Y x y } { + variable _x0 + variable _y0 + variable _state + + switch -- $event { + press { + set _x0 $X + set _y0 $Y + set _state "press" + } + motion { + if { ![info exists _state] } { + # This is just extra protection. There seem to be + # rare cases where the motion comes before the press. + return + } + if { [string equal $_state "press"] } { + variable _dx + variable _dy + if { abs($_x0-$X) > ${_dx} || abs($_y0-$Y) > ${_dy} } { + set _state "done" + _init_drag $button $source $state $X $Y $x $y + } + } + } + } +};# tkdnd::_begin_drag + +# ---------------------------------------------------------------------------- +# Command tkdnd::_init_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::_init_drag { button source state rootX rootY X Y } { + # Call the <> binding. + set cmd [bind $source <>] + # puts "CMD: $cmd" + if {[string length $cmd]} { + set cmd [string map [list %W $source %X $rootX %Y $rootY %x $X %y $Y \ + %S $state %e <> %A \{\} %% % \ + %t [bind $source <>]] $cmd] + set code [catch {uplevel \#0 $cmd} info options] + # puts "CODE: $code ---- $info" + switch -exact -- $code { + 0 {} + 3 - 4 { + # FRINK: nocheck + return + } + default { + return -options $options $info + } + } + + set len [llength $info] + if {$len == 3} { + foreach { actions types _data } $info { break } + set types [platform_specific_types $types] + set data [list] + foreach type $types { + lappend data $_data + } + unset _data + } elseif {$len == 2} { + foreach { actions _data } $info { break } + set data [list]; set types [list] + foreach {t d} $_data { + foreach t [platform_specific_types $t] { + lappend types $t; lappend data $d + } + } + unset _data t d + } else { + if {$len == 1 && [string equal [lindex $actions 0] "refuse_drop"]} { + return + } + error "not enough items in the result of the <>\ + event binding. Either 2 or 3 items are expected. The command + executed was: \"$cmd\"\nResult was: \"$info\"" + } + set action refuse_drop + variable _windowingsystem + # puts "Source: \"$source\"" + # puts "Types: \"[join $types {", "}]\"" + # puts "Actions: \"[join $actions {", "}]\"" + # puts "Button: \"$button\"" + # puts "Data: \"[string range $data 0 100]\"" + switch $_windowingsystem { + x11 { + set action [xdnd::_dodragdrop $source $actions $types $data $button] + } + win32 - + windows { + set action [_DoDragDrop $source $actions $types $data $button] + } + aqua { + set action [macdnd::dodragdrop $source $actions $types $data $button] + } + default { + error "unknown Tk windowing system" + } + } + ## Call _end_drag to notify the widget of the result of the drag + ## operation... + _end_drag $button $source {} $action {} $data {} $state $rootX $rootY $X $Y + } +};# tkdnd::_init_drag + +# ---------------------------------------------------------------------------- +# Command tkdnd::_end_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::_end_drag { button source target action type data result + state rootX rootY X Y } { + set rootX 0 + set rootY 0 + # Call the <> binding. + set cmd [bind $source <>] + if {[string length $cmd]} { + set cmd [string map [list %W $source %X $rootX %Y $rootY %x $X %y $Y %% % \ + %S $state %e <> %A \{$action\}] $cmd] + set info [uplevel \#0 $cmd] + # if { $info != "" } { + # variable _windowingsystem + # foreach { actions types data } $info { break } + # set types [platform_specific_types $types] + # switch $_windowingsystem { + # x11 { + # error "dragging from Tk widgets not yet supported" + # } + # win32 - + # windows { + # set action [_DoDragDrop $source $actions $types $data $button] + # } + # aqua { + # macdnd::dodragdrop $source $actions $types $data + # } + # default { + # error "unknown Tk windowing system" + # } + # } + # ## Call _end_drag to notify the widget of the result of the drag + # ## operation... + # _end_drag $button $source {} $action {} $data {} $state $rootX $rootY + # } + } +};# tkdnd::_end_drag + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_specific_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_specific_types { types } { + variable _platform_namespace + ${_platform_namespace}::platform_specific_types $types +}; # tkdnd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + variable _platform_namespace + ${_platform_namespace}::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_specific_type +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_specific_type { type } { + variable _platform_namespace + ${_platform_namespace}::platform_specific_type $type +}; # tkdnd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_type +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_type { type } { + variable _platform_namespace + ${_platform_namespace}::platform_independent_type $type +}; # tkdnd::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::bytes_to_string +# ---------------------------------------------------------------------------- +proc ::tkdnd::bytes_to_string { bytes } { + set string {} + foreach byte $bytes { + append string [binary format c $byte] + } + return $string +};# tkdnd::bytes_to_string + +# ---------------------------------------------------------------------------- +# Command tkdnd::urn_unquote +# ---------------------------------------------------------------------------- +proc ::tkdnd::urn_unquote {url} { + set result "" + set start 0 + while {[regexp -start $start -indices {%[0-9a-fA-F]{2}} $url match]} { + foreach {first last} $match break + append result [string range $url $start [expr {$first - 1}]] + append result [format %c 0x[string range $url [incr first] $last]] + set start [incr last] + } + append result [string range $url $start end] + return [encoding convertfrom utf-8 $result] +};# tkdnd::urn_unquote diff --git a/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_compat.tcl b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_compat.tcl new file mode 100644 index 0000000000000000000000000000000000000000..efc96f7bb2fe74a9bafd1e79681c275c8ea0f8fc --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_compat.tcl @@ -0,0 +1,160 @@ +# +# tkdnd_compat.tcl -- +# +# This file implements some utility procedures, to support older versions +# of the TkDND package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval compat { + +};# namespace compat + +# ---------------------------------------------------------------------------- +# Command ::dnd +# ---------------------------------------------------------------------------- +proc ::dnd {method window args} { + switch $method { + bindtarget { + switch [llength $args] { + 0 {return [tkdnd::compat::bindtarget0 $window]} + 1 {return [tkdnd::compat::bindtarget1 $window [lindex $args 0]]} + 2 {return [tkdnd::compat::bindtarget2 $window [lindex $args 0] \ + [lindex $args 1]]} + 3 {return [tkdnd::compat::bindtarget3 $window [lindex $args 0] \ + [lindex $args 1] [lindex $args 2]]} + 4 {return [tkdnd::compat::bindtarget4 $window [lindex $args 0] \ + [lindex $args 1] [lindex $args 2] [lindex $args 3]]} + } + } + cleartarget { + return [tkdnd::compat::cleartarget $window] + } + bindsource { + switch [llength $args] { + 0 {return [tkdnd::compat::bindsource0 $window]} + 1 {return [tkdnd::compat::bindsource1 $window [lindex $args 0]]} + 2 {return [tkdnd::compat::bindsource2 $window [lindex $args 0] \ + [lindex $args 1]]} + 3 {return [tkdnd::compat::bindsource3 $window [lindex $args 0] \ + [lindex $args 1] [lindex $args 2]]} + } + } + clearsource { + return [tkdnd::compat::clearsource $window] + } + drag { + return [tkdnd::_init_drag 1 $window "press" 0 0 0 0] + } + } + error "invalid number of arguments!" +};# ::dnd + +# ---------------------------------------------------------------------------- +# Command compat::bindtarget +# ---------------------------------------------------------------------------- +proc compat::bindtarget0 {window} { + return [bind $window <>] +};# compat::bindtarget0 + +proc compat::bindtarget1 {window type} { + return [bindtarget2 $window $type ] +};# compat::bindtarget1 + +proc compat::bindtarget2 {window type event} { + switch $event { + {return [bind $window <>]} + {return [bind $window <>]} + {return [bind $window <>]} + {return [bind $window <>]} + } +};# compat::bindtarget2 + +proc compat::bindtarget3 {window type event script} { + set type [normalise_type $type] + ::tkdnd::drop_target register $window [list $type] + switch $event { + {return [bind $window <> $script]} + {return [bind $window <> $script]} + {return [bind $window <> $script]} + {return [bind $window <> $script]} + } +};# compat::bindtarget3 + +proc compat::bindtarget4 {window type event script priority} { + return [bindtarget3 $window $type $event $script] +};# compat::bindtarget4 + +proc compat::normalise_type { type } { + switch $type { + text/plain - + {text/plain;charset=UTF-8} - + Text {return DND_Text} + text/uri-list - + Files {return DND_Files} + default {return $type} + } +};# compat::normalise_type + +# ---------------------------------------------------------------------------- +# Command compat::bindsource +# ---------------------------------------------------------------------------- +proc compat::bindsource0 {window} { + return [bind $window <>] +};# compat::bindsource0 + +proc compat::bindsource1 {window type} { + return [bindsource2 $window $type ] +};# compat::bindsource1 + +proc compat::bindsource2 {window type script} { + set type [normalise_type $type] + ::tkdnd::drag_source register $window $type + bind $window <> "list {copy} {%t} \[$script\]" +};# compat::bindsource2 + +proc compat::bindsource3 {window type script priority} { + return [bindsource2 $window $type $script] +};# compat::bindsource3 + +# ---------------------------------------------------------------------------- +# Command compat::cleartarget +# ---------------------------------------------------------------------------- +proc compat::cleartarget {window} { +};# compat::cleartarget + +# ---------------------------------------------------------------------------- +# Command compat::clearsource +# ---------------------------------------------------------------------------- +proc compat::clearsource {window} { +};# compat::clearsource diff --git a/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_generic.tcl b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_generic.tcl new file mode 100644 index 0000000000000000000000000000000000000000..698b464fc68e8a2e0e681f5bac32c4c63338f2c3 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_generic.tcl @@ -0,0 +1,520 @@ +# +# tkdnd_generic.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval generic { + variable _types {} + variable _typelist {} + variable _codelist {} + variable _actionlist {} + variable _pressedkeys {} + variable _action {} + variable _common_drag_source_types {} + variable _common_drop_target_types {} + variable _drag_source {} + variable _drop_target {} + + variable _last_mouse_root_x 0 + variable _last_mouse_root_y 0 + + variable _tkdnd2platform + variable _platform2tkdnd + + proc debug {msg} { + puts $msg + };# debug + + proc initialise { } { + };# initialise + + proc initialise_platform_to_tkdnd_types { types } { + variable _platform2tkdnd + variable _tkdnd2platform + set _platform2tkdnd [dict create {*}$types] + set _tkdnd2platform [dict create] + foreach type [dict keys $_platform2tkdnd] { + dict lappend _tkdnd2platform [dict get $_platform2tkdnd $type] $type + } + };# initialise_platform_to_tkdnd_types + + proc initialise_tkdnd_to_platform_types { types } { + variable _tkdnd2platform + set _tkdnd2platform [dict create {*}$types] + };# initialise_tkdnd_to_platform_types + +};# namespace generic + +# ---------------------------------------------------------------------------- +# Command generic::HandleEnter +# ---------------------------------------------------------------------------- +proc generic::HandleEnter { drop_target drag_source typelist codelist + actionlist pressedkeys } { + variable _typelist; set _typelist $typelist + variable _pressedkeys; set _pressedkeys $pressedkeys + variable _action; set _action refuse_drop + variable _common_drag_source_types; set _common_drag_source_types {} + variable _common_drop_target_types; set _common_drop_target_types {} + variable _actionlist + variable _drag_source; set _drag_source $drag_source + variable _drop_target; set _drop_target {} + variable _actionlist; set _actionlist $actionlist + variable _codelist set _codelist $codelist + + variable _last_mouse_root_x; set _last_mouse_root_x 0 + variable _last_mouse_root_y; set _last_mouse_root_y 0 + # debug "\n===============================================================" + # debug "generic::HandleEnter: drop_target=$drop_target,\ + # drag_source=$drag_source,\ + # typelist=$typelist" + # debug "generic::HandleEnter: ACTION: default" + return default +};# generic::HandleEnter + +# ---------------------------------------------------------------------------- +# Command generic::HandlePosition +# ---------------------------------------------------------------------------- +proc generic::HandlePosition { drop_target drag_source pressedkeys + rootX rootY { time 0 } } { + variable _types + variable _typelist + variable _codelist + variable _actionlist + variable _pressedkeys + variable _action + variable _common_drag_source_types + variable _common_drop_target_types + variable _drag_source + variable _drop_target + + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + + # debug "generic::HandlePosition: drop_target=$drop_target,\ + # _drop_target=$_drop_target, rootX=$rootX, rootY=$rootY" + + if {![info exists _drag_source] && ![string length $_drag_source]} { + # debug "generic::HandlePosition: no or empty _drag_source:\ + # return refuse_drop" + return refuse_drop + } + + if {$drag_source ne "" && $drag_source ne $_drag_source} { + debug "generic position event from unexpected source: $_drag_source\ + != $drag_source" + return refuse_drop + } + + set _pressedkeys $pressedkeys + + ## Does the new drop target support any of our new types? + # foreach {common_drag_source_types common_drop_target_types} \ + # [GetWindowCommonTypes $drop_target $_typelist] {break} + foreach {drop_target common_drag_source_types common_drop_target_types} \ + [FindWindowWithCommonTypes $drop_target $_typelist] {break} + set data [GetDroppedData $time] + + # debug "\t($_drop_target) -> ($drop_target)" + if {$drop_target != $_drop_target} { + if {[string length $_drop_target]} { + ## Call the <> event. + # debug "\t<> on $_drop_target" + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $_drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A \{$_action\} %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D \{\} %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + uplevel \#0 $cmd + } + } + set _drop_target $drop_target + set _action refuse_drop + + if {[llength $common_drag_source_types]} { + set _action [lindex $_actionlist 0] + set _common_drag_source_types $common_drag_source_types + set _common_drop_target_types $common_drop_target_types + ## Drop target supports at least one type. Send a <>. + # puts "<> -> $drop_target" + set cmd [bind $drop_target <>] + if {[string length $cmd]} { + focus $drop_target + set cmd [string map [list %W $drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + } + } + } + + set _drop_target {} + if {[llength $common_drag_source_types]} { + set _common_drag_source_types $common_drag_source_types + set _common_drop_target_types $common_drop_target_types + set _drop_target $drop_target + ## Drop target supports at least one type. Send a <>. + set cmd [bind $drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + } + } + # Return values: copy, move, link, ask, private, refuse_drop, default + # debug "generic::HandlePosition: ACTION: $_action" + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + return $_action +};# generic::HandlePosition + +# ---------------------------------------------------------------------------- +# Command generic::HandleLeave +# ---------------------------------------------------------------------------- +proc generic::HandleLeave { } { + variable _types + variable _typelist + variable _codelist + variable _actionlist + variable _pressedkeys + variable _action + variable _common_drag_source_types + variable _common_drop_target_types + variable _drag_source + variable _drop_target + variable _last_mouse_root_x + variable _last_mouse_root_y + if {![info exists _drop_target]} {set _drop_target {}} + # debug "generic::HandleLeave: _drop_target=$_drop_target" + if {[info exists _drop_target] && [string length $_drop_target]} { + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $_drop_target \ + %X $_last_mouse_root_x %Y $_last_mouse_root_y \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A \{$_action\} %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D \{\} %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + } + } + foreach var {_types _typelist _actionlist _pressedkeys _action + _common_drag_source_types _common_drop_target_types + _drag_source _drop_target} { + set $var {} + } +};# generic::HandleLeave + +# ---------------------------------------------------------------------------- +# Command generic::HandleDrop +# ---------------------------------------------------------------------------- +proc generic::HandleDrop {drop_target drag_source pressedkeys rootX rootY time } { + variable _types + variable _typelist + variable _codelist + variable _actionlist + variable _pressedkeys + variable _action + variable _common_drag_source_types + variable _common_drop_target_types + variable _drag_source + variable _drop_target + variable _last_mouse_root_x + variable _last_mouse_root_y + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + + set _pressedkeys $pressedkeys + + # puts "generic::HandleDrop: $time" + + if {![info exists _drag_source] && ![string length $_drag_source]} { + return refuse_drop + } + if {![info exists _drop_target] && ![string length $_drop_target]} { + return refuse_drop + } + if {![llength $_common_drag_source_types]} {return refuse_drop} + ## Get the dropped data. + set data [GetDroppedData $time] + ## Try to select the most specific <> event. + foreach type [concat $_common_drag_source_types $_common_drop_target_types] { + set type [platform_independent_type $type] + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $_drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + # Return values: copy, move, link, ask, private, refuse_drop + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + return $_action + } + } + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $_drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + } + # Return values: copy, move, link, ask, private, refuse_drop + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + return $_action +};# generic::HandleDrop + +# ---------------------------------------------------------------------------- +# Command generic::GetWindowCommonTypes +# ---------------------------------------------------------------------------- +proc generic::GetWindowCommonTypes { win typelist } { + set types [bind $win <>] + # debug ">> Accepted types: $win $_types" + set common_drag_source_types {} + set common_drop_target_types {} + if {[llength $types]} { + ## Examine the drop target types, to find at least one match with the drag + ## source types... + set supported_types [supported_types $typelist] + foreach type $types { + foreach matched [lsearch -glob -all -inline $supported_types $type] { + ## Drop target supports this type. + lappend common_drag_source_types $matched + lappend common_drop_target_types $type + } + } + } + list $common_drag_source_types $common_drop_target_types +};# generic::GetWindowCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::FindWindowWithCommonTypes +# ---------------------------------------------------------------------------- +proc generic::FindWindowWithCommonTypes { win typelist } { + set toplevel [winfo toplevel $win] + while {![string equal $win $toplevel]} { + foreach {common_drag_source_types common_drop_target_types} \ + [GetWindowCommonTypes $win $typelist] {break} + if {[llength $common_drag_source_types]} { + return [list $win $common_drag_source_types $common_drop_target_types] + } + set win [winfo parent $win] + } + ## We have reached the toplevel, which may be also a target (SF Bug #30) + foreach {common_drag_source_types common_drop_target_types} \ + [GetWindowCommonTypes $win $typelist] {break} + if {[llength $common_drag_source_types]} { + return [list $win $common_drag_source_types $common_drop_target_types] + } + return { {} {} {} } +};# generic::FindWindowWithCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::GetDroppedData +# ---------------------------------------------------------------------------- +proc generic::GetDroppedData { time } { + variable _dropped_data + return $_dropped_data +};# generic::GetDroppedData + +# ---------------------------------------------------------------------------- +# Command generic::SetDroppedData +# ---------------------------------------------------------------------------- +proc generic::SetDroppedData { data } { + variable _dropped_data + set _dropped_data $data +};# generic::SetDroppedData + +# ---------------------------------------------------------------------------- +# Command generic::GetDragSource +# ---------------------------------------------------------------------------- +proc generic::GetDragSource { } { + variable _drag_source + return $_drag_source +};# generic::GetDragSource + +# ---------------------------------------------------------------------------- +# Command generic::GetDropTarget +# ---------------------------------------------------------------------------- +proc generic::GetDropTarget { } { + variable _drop_target + return $_drop_target +};# generic::GetDropTarget + +# ---------------------------------------------------------------------------- +# Command generic::GetDragSourceCommonTypes +# ---------------------------------------------------------------------------- +proc generic::GetDragSourceCommonTypes { } { + variable _common_drag_source_types + return $_common_drag_source_types +};# generic::GetDragSourceCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::GetDropTargetCommonTypes +# ---------------------------------------------------------------------------- +proc generic::GetDropTargetCommonTypes { } { + variable _common_drag_source_types + return $_common_drag_source_types +};# generic::GetDropTargetCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::platform_specific_types +# ---------------------------------------------------------------------------- +proc generic::platform_specific_types { types } { + set new_types {} + foreach type $types { + set new_types [concat $new_types [platform_specific_type $type]] + } + return $new_types +}; # generic::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command generic::platform_specific_type +# ---------------------------------------------------------------------------- +proc generic::platform_specific_type { type } { + variable _tkdnd2platform + if {[dict exists $_tkdnd2platform $type]} { + return [dict get $_tkdnd2platform $type] + } + list $type +}; # generic::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + set new_types {} + foreach type $types { + set new_types [concat $new_types [platform_independent_type $type]] + } + return $new_types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command generic::platform_independent_type +# ---------------------------------------------------------------------------- +proc generic::platform_independent_type { type } { + variable _platform2tkdnd + if {[dict exists $_platform2tkdnd $type]} { + return [dict get $_platform2tkdnd $type] + } + return $type +}; # generic::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command generic::supported_types +# ---------------------------------------------------------------------------- +proc generic::supported_types { types } { + set new_types {} + foreach type $types { + if {[supported_type $type]} {lappend new_types $type} + } + return $new_types +}; # generic::supported_types + +# ---------------------------------------------------------------------------- +# Command generic::supported_type +# ---------------------------------------------------------------------------- +proc generic::supported_type { type } { + variable _platform2tkdnd + if {[dict exists $_platform2tkdnd $type]} { + return 1 + } + return 0 +}; # generic::supported_type diff --git a/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_macosx.tcl b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_macosx.tcl new file mode 100644 index 0000000000000000000000000000000000000000..307f6da2e94286d01dc9e068fffebe46de3c43f3 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_macosx.tcl @@ -0,0 +1,144 @@ +# +# tkdnd_macosx.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. + +# This software is copyrighted by: +# Georgios Petasis, Athens, Greece. +# e-mail: petasisg@yahoo.gr, petasis@iit.demokritos.gr +# +# Mac portions (c) 2009 Kevin Walzer/WordTech Communications LLC, +# kw@codebykevin.com +# +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +#basic API for Mac Drag and Drop + +#two data types supported: strings and file paths + +#two commands at C level: ::tkdnd::macdnd::registerdragwidget, ::tkdnd::macdnd::unregisterdragwidget + +#data retrieval mechanism: text or file paths are copied from drag clipboard to system clipboard and retrieved via [clipboard get]; array of file paths is converted to single tab-separated string, can be split into Tcl list + +if {[tk windowingsystem] eq "aqua" && "AppKit" ni [winfo server .]} { + error {TkAqua Cocoa required} +} + +namespace eval macdnd { + + proc initialise { } { + ## Mapping from platform types to TkDND types... + ::tkdnd::generic::initialise_platform_to_tkdnd_types [list \ + NSPasteboardTypeString DND_Text \ + NSFilenamesPboardType DND_Files \ + NSPasteboardTypeHTML DND_HTML \ + ] + };# initialise + +};# namespace macdnd + +# ---------------------------------------------------------------------------- +# Command macdnd::HandleEnter +# ---------------------------------------------------------------------------- +proc macdnd::HandleEnter { path drag_source typelist { data {} } } { + variable _pressedkeys + variable _actionlist + set _pressedkeys 1 + set _actionlist { copy move link ask private } + ::tkdnd::generic::SetDroppedData $data + ::tkdnd::generic::HandleEnter $path $drag_source $typelist $typelist \ + $_actionlist $_pressedkeys +};# macdnd::HandleEnter + +# ---------------------------------------------------------------------------- +# Command macdnd::HandlePosition +# ---------------------------------------------------------------------------- +proc macdnd::HandlePosition { drop_target rootX rootY {drag_source {}} } { + variable _pressedkeys + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + ::tkdnd::generic::HandlePosition $drop_target $drag_source \ + $_pressedkeys $rootX $rootY +};# macdnd::HandlePosition + +# ---------------------------------------------------------------------------- +# Command macdnd::HandleLeave +# ---------------------------------------------------------------------------- +proc macdnd::HandleLeave { args } { + ::tkdnd::generic::HandleLeave +};# macdnd::HandleLeave + +# ---------------------------------------------------------------------------- +# Command macdnd::HandleDrop +# ---------------------------------------------------------------------------- +proc macdnd::HandleDrop { drop_target data args } { + variable _pressedkeys + variable _last_mouse_root_x + variable _last_mouse_root_y + ## Get the dropped data... + ::tkdnd::generic::SetDroppedData $data + ::tkdnd::generic::HandleDrop {} {} $_pressedkeys \ + $_last_mouse_root_x $_last_mouse_root_y 0 +};# macdnd::HandleDrop + +# ---------------------------------------------------------------------------- +# Command macdnd::GetDragSourceCommonTypes +# ---------------------------------------------------------------------------- +proc macdnd::GetDragSourceCommonTypes { } { + ::tkdnd::generic::GetDragSourceCommonTypes +};# macdnd::GetDragSourceCommonTypes + +# ---------------------------------------------------------------------------- +# Command macdnd::platform_specific_types +# ---------------------------------------------------------------------------- +proc macdnd::platform_specific_types { types } { + ::tkdnd::generic::platform_specific_types $types +}; # macdnd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command macdnd::platform_specific_type +# ---------------------------------------------------------------------------- +proc macdnd::platform_specific_type { type } { + ::tkdnd::generic::platform_specific_type $type +}; # macdnd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + ::tkdnd::generic::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command macdnd::platform_independent_type +# ---------------------------------------------------------------------------- +proc macdnd::platform_independent_type { type } { + ::tkdnd::generic::platform_independent_type $type +}; # macdnd::platform_independent_type diff --git a/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_unix.tcl b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_unix.tcl new file mode 100644 index 0000000000000000000000000000000000000000..56d17c4db718274df4b3b7a14f0d8e055a1002b6 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_unix.tcl @@ -0,0 +1,810 @@ +# +# tkdnd_unix.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval xdnd { + variable _dragging 0 + + proc initialise { } { + ## Mapping from platform types to TkDND types... + ::tkdnd::generic::initialise_platform_to_tkdnd_types [list \ + text/plain\;charset=utf-8 DND_Text \ + UTF8_STRING DND_Text \ + text/plain DND_Text \ + STRING DND_Text \ + TEXT DND_Text \ + COMPOUND_TEXT DND_Text \ + text/uri-list DND_Files \ + text/html\;charset=utf-8 DND_HTML \ + text/html DND_HTML \ + application/x-color DND_Color \ + ] + };# initialise + +};# namespace xdnd + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndEnter +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndEnter { path drag_source typelist time { data {} } } { + variable _pressedkeys + variable _actionlist + variable _typelist + set _pressedkeys 1 + set _actionlist { copy move link ask private } + set _typelist $typelist + # puts "xdnd::HandleXdndEnter: $time" + ::tkdnd::generic::SetDroppedData $data + ::tkdnd::generic::HandleEnter $path $drag_source $typelist $typelist \ + $_actionlist $_pressedkeys +};# xdnd::HandleXdndEnter + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndPosition +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndPosition { drop_target rootX rootY time {drag_source {}} } { + variable _pressedkeys + variable _typelist + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + # puts "xdnd::HandleXdndPosition: $time" + ## Get the dropped data... + catch { + ::tkdnd::generic::SetDroppedData [GetPositionData $drop_target $_typelist $time] + } + ::tkdnd::generic::HandlePosition $drop_target $drag_source \ + $_pressedkeys $rootX $rootY +};# xdnd::HandleXdndPosition + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndLeave +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndLeave { } { + ::tkdnd::generic::HandleLeave +};# xdnd::HandleXdndLeave + +# ---------------------------------------------------------------------------- +# Command xdnd::_HandleXdndDrop +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndDrop { time } { + variable _pressedkeys + variable _last_mouse_root_x + variable _last_mouse_root_y + ## Get the dropped data... + ::tkdnd::generic::SetDroppedData [GetDroppedData \ + [::tkdnd::generic::GetDragSource] [::tkdnd::generic::GetDropTarget] \ + [::tkdnd::generic::GetDragSourceCommonTypes] $time] + ::tkdnd::generic::HandleDrop {} {} $_pressedkeys \ + $_last_mouse_root_x $_last_mouse_root_y $time +};# xdnd::HandleXdndDrop + +# ---------------------------------------------------------------------------- +# Command xdnd::GetPositionData +# ---------------------------------------------------------------------------- +proc xdnd::GetPositionData { drop_target typelist time } { + foreach {drop_target common_drag_source_types common_drop_target_types} \ + [::tkdnd::generic::FindWindowWithCommonTypes $drop_target $typelist] {break} + GetDroppedData [::tkdnd::generic::GetDragSource] $drop_target \ + $common_drag_source_types $time +};# xdnd::GetPositionData + +# ---------------------------------------------------------------------------- +# Command xdnd::GetDroppedData +# ---------------------------------------------------------------------------- +proc xdnd::GetDroppedData { _drag_source _drop_target _common_drag_source_types time } { + if {![llength $_common_drag_source_types]} { + error "no common data types between the drag source and drop target widgets" + } + ## Is drag source in this application? + if {[catch {winfo pathname -displayof $_drop_target $_drag_source} p]} { + set _use_tk_selection 0 + } else { + set _use_tk_selection 1 + } + foreach type $_common_drag_source_types { + # puts "TYPE: $type ($_drop_target)" + # _get_selection $_drop_target $time $type + if {$_use_tk_selection} { + if {![catch { + selection get -displayof $_drop_target -selection XdndSelection \ + -type $type + } result options]} { + return [normalise_data $type $result] + } + } else { + # puts "_selection_get -displayof $_drop_target -selection XdndSelection \ + # -type $type -time $time" + #after 100 [list focus -force $_drop_target] + #after 50 [list raise [winfo toplevel $_drop_target]] + if {![catch { + _selection_get -displayof $_drop_target -selection XdndSelection \ + -type $type -time $time + } result options]} { + return [normalise_data $type $result] + } + } + } + return -options $options $result +};# xdnd::GetDroppedData + +# ---------------------------------------------------------------------------- +# Command xdnd::platform_specific_types +# ---------------------------------------------------------------------------- +proc xdnd::platform_specific_types { types } { + ::tkdnd::generic::platform_specific_types $types +}; # xdnd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command xdnd::platform_specific_type +# ---------------------------------------------------------------------------- +proc xdnd::platform_specific_type { type } { + ::tkdnd::generic::platform_specific_type $type +}; # xdnd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + ::tkdnd::generic::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command xdnd::platform_independent_type +# ---------------------------------------------------------------------------- +proc xdnd::platform_independent_type { type } { + ::tkdnd::generic::platform_independent_type $type +}; # xdnd::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command xdnd::_normalise_data +# ---------------------------------------------------------------------------- +proc xdnd::normalise_data { type data } { + # Tk knows how to interpret the following types: + # STRING, TEXT, COMPOUND_TEXT + # UTF8_STRING + # Else, it returns a list of 8 or 32 bit numbers... + switch -glob $type { + STRING - UTF8_STRING - TEXT - COMPOUND_TEXT {return $data} + text/html { + if {[catch { + encoding convertfrom unicode $data + } string]} { + set string $data + } + return [string map {\r\n \n} $string] + } + text/html\;charset=utf-8 - + text/plain\;charset=utf-8 - + text/plain { + if {[catch { + encoding convertfrom utf-8 [tkdnd::bytes_to_string $data] + } string]} { + set string $data + } + return [string map {\r\n \n} $string] + } + text/uri-list* { + if {[catch { + encoding convertfrom utf-8 [tkdnd::bytes_to_string $data] + } string]} { + set string $data + } + ## Get rid of \r\n + set string [string trim [string map {\r\n \n} $string]] + set files {} + foreach quoted_file [split $string] { + set file [tkdnd::urn_unquote $quoted_file] + switch -glob $file { + \#* {} + file://* {lappend files [string range $file 7 end]} + ftp://* - + https://* - + http://* {lappend files $quoted_file} + default {lappend files $file} + } + } + return $files + } + application/x-color { + return $data + } + text/x-moz-url - + application/q-iconlist - + default {return $data} + } +}; # xdnd::normalise_data + +############################################################################# +## +## XDND drag implementation +## +############################################################################# + +# ---------------------------------------------------------------------------- +# Command xdnd::_selection_ownership_lost +# ---------------------------------------------------------------------------- +proc xdnd::_selection_ownership_lost {} { + variable _dragging + set _dragging 0 +};# _selection_ownership_lost + +# ---------------------------------------------------------------------------- +# Command xdnd::_dodragdrop +# ---------------------------------------------------------------------------- +proc xdnd::_dodragdrop { source actions types data button } { + variable _dragging + + # puts "xdnd::_dodragdrop: source: $source, actions: $actions, types: $types,\ + # data: \"$data\", button: $button" + if {$_dragging} { + ## We are in the middle of another drag operation... + error "another drag operation in progress" + } + + variable _dodragdrop_drag_source $source + variable _dodragdrop_drop_target 0 + variable _dodragdrop_drop_target_proxy 0 + variable _dodragdrop_actions $actions + variable _dodragdrop_action_descriptions $actions + variable _dodragdrop_actions_len [llength $actions] + variable _dodragdrop_types $types + variable _dodragdrop_types_len [llength $types] + variable _dodragdrop_data $data + variable _dodragdrop_transfer_data {} + variable _dodragdrop_button $button + variable _dodragdrop_time 0 + variable _dodragdrop_default_action refuse_drop + variable _dodragdrop_waiting_status 0 + variable _dodragdrop_drop_target_accepts_drop 0 + variable _dodragdrop_drop_target_accepts_action refuse_drop + variable _dodragdrop_current_cursor $_dodragdrop_default_action + variable _dodragdrop_drop_occured 0 + variable _dodragdrop_selection_requestor 0 + + ## + ## If we have more than 3 types, the property XdndTypeList must be set on + ## the drag source widget... + ## + if {$_dodragdrop_types_len > 3} { + _announce_type_list $_dodragdrop_drag_source $_dodragdrop_types + } + + ## + ## Announce the actions & their descriptions on the XdndActionList & + ## XdndActionDescription properties... + ## + _announce_action_list $_dodragdrop_drag_source $_dodragdrop_actions \ + $_dodragdrop_action_descriptions + + ## + ## Arrange selection handlers for our drag source, and all the supported types + ## + registerSelectionHandler $source $types + + ## + ## Step 1: When a drag begins, the source takes ownership of XdndSelection. + ## + selection own -command ::tkdnd::xdnd::_selection_ownership_lost \ + -selection XdndSelection $source + set _dragging 1 + + ## Grab the mouse pointer... + _grab_pointer $source $_dodragdrop_default_action + + ## Register our generic event handler... + # The generic event callback will report events by modifying variable + # ::xdnd::_dodragdrop_event: a dict with event information will be set as + # the value of the variable... + _register_generic_event_handler + + ## Set a timeout for debugging purposes... + # after 60000 {set ::tkdnd::xdnd::_dragging 0} + + tkwait variable ::tkdnd::xdnd::_dragging + _SendXdndLeave + + set _dragging 0 + _ungrab_pointer $source + _unregister_generic_event_handler + catch {selection clear -selection XdndSelection} + unregisterSelectionHandler $source $types + return $_dodragdrop_drop_target_accepts_action +};# xdnd::_dodragdrop + +# ---------------------------------------------------------------------------- +# Command xdnd::_process_drag_events +# ---------------------------------------------------------------------------- +proc xdnd::_process_drag_events {event} { + # The return value from proc is normally 0. A non-zero return value indicates + # that the event is not to be handled further; that is, proc has done all + # processing that is to be allowed for the event + variable _dragging + if {!$_dragging} {return 0} + # puts $event + + variable _dodragdrop_time + set time [dict get $event time] + set type [dict get $event type] + if {$time < $_dodragdrop_time && ![string equal $type SelectionRequest]} { + return 0 + } + set _dodragdrop_time $time + + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_default_action + switch $type { + MotionNotify { + set rootx [dict get $event x_root] + set rooty [dict get $event y_root] + set window [_find_drop_target_window $_dodragdrop_drag_source \ + $rootx $rooty] + if {[string length $window]} { + ## Examine the modifiers to suggest an action... + set _dodragdrop_default_action [_default_action $event] + ## Is it a Tk widget? + # set path [winfo containing $rootx $rooty] + # puts "Window under mouse: $window ($path)" + if {$_dodragdrop_drop_target != $window} { + ## Send XdndLeave to $_dodragdrop_drop_target + _SendXdndLeave + ## Is there a proxy? If not, _find_drop_target_proxy returns the + ## target window, so we always get a valid "proxy". + set proxy [_find_drop_target_proxy $_dodragdrop_drag_source $window] + ## Send XdndEnter to $window + _SendXdndEnter $window $proxy + ## Send XdndPosition to $_dodragdrop_drop_target + _SendXdndPosition $rootx $rooty $_dodragdrop_default_action + } else { + ## Send XdndPosition to $_dodragdrop_drop_target + _SendXdndPosition $rootx $rooty $_dodragdrop_default_action + } + } else { + ## No window under the mouse. Send XdndLeave to $_dodragdrop_drop_target + _SendXdndLeave + } + } + ButtonPress { + } + ButtonRelease { + variable _dodragdrop_button + set button [dict get $event button] + if {$button == $_dodragdrop_button} { + ## The button that initiated the drag was released. Trigger drop... + _SendXdndDrop + } + return 1 + } + KeyPress { + } + KeyRelease { + set keysym [dict get $event keysym] + switch $keysym { + Escape { + ## The user has pressed escape. Abort... + if {$_dragging} {set _dragging 0} + } + } + } + SelectionRequest { + variable _dodragdrop_selection_requestor + variable _dodragdrop_selection_property + variable _dodragdrop_selection_selection + variable _dodragdrop_selection_target + variable _dodragdrop_selection_time + set _dodragdrop_selection_requestor [dict get $event requestor] + set _dodragdrop_selection_property [dict get $event property] + set _dodragdrop_selection_selection [dict get $event selection] + set _dodragdrop_selection_target [dict get $event target] + set _dodragdrop_selection_time $time + return 0 + } + default { + return 0 + } + } + return 0 +};# _process_drag_events + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndEnter +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndEnter {window proxy} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_types + variable _dodragdrop_waiting_status + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_target > 0} _SendXdndLeave + if {$_dodragdrop_drop_occured} return + set _dodragdrop_drop_target $window + set _dodragdrop_drop_target_proxy $proxy + set _dodragdrop_waiting_status 0 + if {$_dodragdrop_drop_target < 1} return + # puts "XdndEnter: $_dodragdrop_drop_target $_dodragdrop_drop_target_proxy" + _send_XdndEnter $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy $_dodragdrop_types +};# xdnd::_SendXdndEnter + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndPosition +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndPosition {rootx rooty action} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + if {$_dodragdrop_drop_target < 1} return + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} return + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_waiting_status + ## Arrange a new XdndPosition, to be send periodically... + variable _dodragdrop_xdnd_position_heartbeat + catch {after cancel $_dodragdrop_xdnd_position_heartbeat} + set _dodragdrop_xdnd_position_heartbeat [after 200 \ + [list ::tkdnd::xdnd::_SendXdndPosition $rootx $rooty $action]] + if {$_dodragdrop_waiting_status} {return} + # puts "XdndPosition: $_dodragdrop_drop_target $rootx $rooty $action" + _send_XdndPosition $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy $rootx $rooty $action + set _dodragdrop_waiting_status 1 +};# xdnd::_SendXdndPosition + +# ---------------------------------------------------------------------------- +# Command xdnd::_HandleXdndStatus +# ---------------------------------------------------------------------------- +proc xdnd::_HandleXdndStatus {event} { + variable _dodragdrop_drop_target + variable _dodragdrop_waiting_status + + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + set _dodragdrop_waiting_status 0 + foreach key {target accept want_position action x y w h} { + set $key [dict get $event $key] + } + set _dodragdrop_drop_target_accepts_drop $accept + set _dodragdrop_drop_target_accepts_action $action + if {$_dodragdrop_drop_target < 1} return + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} return + _update_cursor + # puts "XdndStatus: $event" +};# xdnd::_HandleXdndStatus + +# ---------------------------------------------------------------------------- +# Command xdnd::_HandleXdndFinished +# ---------------------------------------------------------------------------- +proc xdnd::_HandleXdndFinished {event} { + variable _dodragdrop_xdnd_finished_event_after_id + catch {after cancel $_dodragdrop_xdnd_finished_event_after_id} + set _dodragdrop_xdnd_finished_event_after_id {} + variable _dodragdrop_drop_target + set _dodragdrop_drop_target 0 + variable _dragging + if {$_dragging} {set _dragging 0} + + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + if {[dict size $event]} { + foreach key {target accept action} { + set $key [dict get $event $key] + } + set _dodragdrop_drop_target_accepts_drop $accept + set _dodragdrop_drop_target_accepts_action $action + } else { + set _dodragdrop_drop_target_accepts_drop 0 + } + if {!$_dodragdrop_drop_target_accepts_drop} { + set _dodragdrop_drop_target_accepts_action refuse_drop + } + # puts "XdndFinished: $event" +};# xdnd::_HandleXdndFinished + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndLeave +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndLeave {} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + if {$_dodragdrop_drop_target < 1} return + variable _dodragdrop_drop_target_proxy + # puts "XdndLeave: $_dodragdrop_drop_target" + _send_XdndLeave $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy + set _dodragdrop_drop_target 0 + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + set _dodragdrop_drop_target_accepts_drop 0 + set _dodragdrop_drop_target_accepts_action refuse_drop + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} return + _update_cursor +};# xdnd::_SendXdndLeave + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndDrop +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndDrop {} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + if {$_dodragdrop_drop_target < 1} { + ## The mouse has been released over a widget that does not accept drops. + _HandleXdndFinished {} + return + } + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} {return} + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + + set _dodragdrop_drop_occured 1 + _update_cursor clock + + if {!$_dodragdrop_drop_target_accepts_drop} { + _SendXdndLeave + _HandleXdndFinished {} + return + } + # puts "XdndDrop: $_dodragdrop_drop_target" + variable _dodragdrop_drop_timestamp + set _dodragdrop_drop_timestamp [_send_XdndDrop \ + $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy] + set _dodragdrop_drop_target 0 + # puts "XdndDrop: $_dodragdrop_drop_target" + ## Arrange a timeout for receiving XdndFinished... + variable _dodragdrop_xdnd_finished_event_after_id + set _dodragdrop_xdnd_finished_event_after_id \ + [after 10000 [list ::tkdnd::xdnd::_HandleXdndFinished {}]] +};# xdnd::_SendXdndDrop + +# ---------------------------------------------------------------------------- +# Command xdnd::_update_cursor +# ---------------------------------------------------------------------------- +proc xdnd::_update_cursor { {cursor {}}} { + # puts "_update_cursor $cursor" + variable _dodragdrop_current_cursor + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + + if {![string length $cursor]} { + set cursor refuse_drop + if {$_dodragdrop_drop_target_accepts_drop} { + set cursor $_dodragdrop_drop_target_accepts_action + } + } + if {![string equal $cursor $_dodragdrop_current_cursor]} { + _set_pointer_cursor $_dodragdrop_drag_source $cursor + set _dodragdrop_current_cursor $cursor + } +};# xdnd::_update_cursor + +# ---------------------------------------------------------------------------- +# Command xdnd::_default_action +# ---------------------------------------------------------------------------- +proc xdnd::_default_action {event} { + variable _dodragdrop_actions + variable _dodragdrop_actions_len + if {$_dodragdrop_actions_len == 1} {return [lindex $_dodragdrop_actions 0]} + + set alt [dict get $event Alt] + set shift [dict get $event Shift] + set control [dict get $event Control] + + if {$shift && $control && [lsearch $_dodragdrop_actions link] != -1} { + return link + } elseif {$control && [lsearch $_dodragdrop_actions copy] != -1} { + return copy + } elseif {$shift && [lsearch $_dodragdrop_actions move] != -1} { + return move + } elseif {$alt && [lsearch $_dodragdrop_actions link] != -1} { + return link + } + return default +};# xdnd::_default_action + +# ---------------------------------------------------------------------------- +# Command xdnd::getFormatForType +# ---------------------------------------------------------------------------- +proc xdnd::getFormatForType {type} { + switch -glob [string tolower $type] { + text/plain\;charset=utf-8 - + text/html\;charset=utf-8 - + utf8_string {set format UTF8_STRING} + text/html - + text/plain - + string - + text - + compound_text {set format STRING} + text/uri-list* {set format UTF8_STRING} + application/x-color {set format $type} + default {set format $type} + } + return $format +};# xdnd::getFormatForType + +# ---------------------------------------------------------------------------- +# Command xdnd::registerSelectionHandler +# ---------------------------------------------------------------------------- +proc xdnd::registerSelectionHandler {source types} { + foreach type $types { + selection handle -selection XdndSelection \ + -type $type \ + -format [getFormatForType $type] \ + $source [list ::tkdnd::xdnd::_SendData $type] + } +};# xdnd::registerSelectionHandler + +# ---------------------------------------------------------------------------- +# Command xdnd::unregisterSelectionHandler +# ---------------------------------------------------------------------------- +proc xdnd::unregisterSelectionHandler {source types} { + foreach type $types { + catch { + selection handle -selection XdndSelection \ + -type $type \ + -format [getFormatForType $type] \ + $source {} + } + } +};# xdnd::unregisterSelectionHandler + +# ---------------------------------------------------------------------------- +# Command xdnd::_convert_to_unsigned +# ---------------------------------------------------------------------------- +proc xdnd::_convert_to_unsigned {data format} { + switch $format { + 8 { set mask 0xff } + 16 { set mask 0xffff } + 32 { set mask 0xffffff } + default {error "unsupported format $format"} + } + ## Convert signed integer into unsigned... + set d [list] + foreach num $data { + lappend d [expr { $num & $mask }] + } + return $d +};# xdnd::_convert_to_unsigned + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendData +# ---------------------------------------------------------------------------- +proc xdnd::_SendData {type offset bytes args} { + variable _dodragdrop_drag_source + variable _dodragdrop_types + variable _dodragdrop_data + variable _dodragdrop_transfer_data + + ## The variable _dodragdrop_data contains a list of data, one for each + ## type in the _dodragdrop_types variable. We have to search types, and find + ## the corresponding entry in the _dodragdrop_data list. + set index [lsearch $_dodragdrop_types $type] + if {$index < 0} { + error "unable to locate data suitable for type \"$type\"" + } + set typed_data [lindex $_dodragdrop_data $index] + set format 8 + if {$offset == 0} { + ## Prepare the data to be transferred... + switch -glob $type { + text/plain* - UTF8_STRING - STRING - TEXT - COMPOUND_TEXT { + binary scan [encoding convertto utf-8 $typed_data] \ + c* _dodragdrop_transfer_data + set _dodragdrop_transfer_data \ + [_convert_to_unsigned $_dodragdrop_transfer_data $format] + } + text/uri-list* { + set files [list] + foreach file $typed_data { + switch -glob $file { + *://* {lappend files $file} + default {lappend files file://$file} + } + } + binary scan [encoding convertto utf-8 "[join $files \r\n]\r\n"] \ + c* _dodragdrop_transfer_data + set _dodragdrop_transfer_data \ + [_convert_to_unsigned $_dodragdrop_transfer_data $format] + } + application/x-color { + set format 16 + ## Try to understand the provided data: we accept a standard Tk colour, + ## or a list of 3 values (red green blue) or a list of 4 values + ## (red green blue opacity). + switch [llength $typed_data] { + 1 { set color [winfo rgb $_dodragdrop_drag_source $typed_data] + lappend color 65535 } + 3 { set color $typed_data; lappend color 65535 } + 4 { set color $typed_data } + default {error "unknown color data: \"$typed_data\""} + } + ## Convert the 4 elements into 16 bit values... + set _dodragdrop_transfer_data [list] + foreach c $color { + lappend _dodragdrop_transfer_data [format 0x%04X $c] + } + } + default { + set format 32 + binary scan $typed_data c* _dodragdrop_transfer_data + } + } + } + + ## + ## Data has been split into bytes. Count the bytes requested, and return them + ## + set data [lrange $_dodragdrop_transfer_data $offset [expr {$offset+$bytes-1}]] + switch $format { + 8 { + set data [encoding convertfrom utf-8 [binary format c* $data]] + } + 16 { + variable _dodragdrop_selection_requestor + if {$_dodragdrop_selection_requestor} { + ## Tk selection cannot process this format (only 8 & 32 supported). + ## Call our XChangeProperty... + set numItems [llength $data] + variable _dodragdrop_selection_property + variable _dodragdrop_selection_selection + variable _dodragdrop_selection_target + variable _dodragdrop_selection_time + XChangeProperty $_dodragdrop_drag_source \ + $_dodragdrop_selection_requestor \ + $_dodragdrop_selection_property \ + $_dodragdrop_selection_target \ + $format \ + $_dodragdrop_selection_time \ + $data $numItems + return -code break + } + } + 32 { + } + default { + error "unsupported format $format" + } + } + # puts "SendData: $type $offset $bytes $args ($typed_data)" + # puts " $data" + return $data +};# xdnd::_SendData diff --git a/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_utils.tcl b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_utils.tcl new file mode 100644 index 0000000000000000000000000000000000000000..ee961ddb1ca29b383496111eadc2ccdce7776b08 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_utils.tcl @@ -0,0 +1,252 @@ +# +# tkdnd_utils.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +package require tkdnd +namespace eval ::tkdnd { + namespace eval utils { + };# namespace ::tkdnd::utils + namespace eval text { + variable _drag_tag tkdnd::drag::selection::tag + variable _state {} + variable _drag_source_widget {} + variable _drop_target_widget {} + variable _now_dragging 0 + };# namespace ::tkdnd::text +};# namespace ::tkdnd + +bind TkDND_Drag_Text1 {tkdnd::text::_begin_drag clear 1 %W %s %X %Y %x %y} +bind TkDND_Drag_Text1 {tkdnd::text::_begin_drag motion 1 %W %s %X %Y %x %y} +bind TkDND_Drag_Text1 {tkdnd::text::_TextAutoScan %W %x %y} +bind TkDND_Drag_Text1 {tkdnd::text::_begin_drag reset 1 %W %s %X %Y %x %y} +bind TkDND_Drag_Text2 {tkdnd::text::_begin_drag clear 2 %W %s %X %Y %x %y} +bind TkDND_Drag_Text2 {tkdnd::text::_begin_drag motion 2 %W %s %X %Y %x %y} +bind TkDND_Drag_Text2 {tkdnd::text::_begin_drag reset 2 %W %s %X %Y %x %y} +bind TkDND_Drag_Text3 {tkdnd::text::_begin_drag clear 3 %W %s %X %Y %x %y} +bind TkDND_Drag_Text3 {tkdnd::text::_begin_drag motion 3 %W %s %X %Y %x %y} +bind TkDND_Drag_Text3 {tkdnd::text::_begin_drag reset 3 %W %s %X %Y %x %y} + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::drag_source +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::drag_source { mode path { types DND_Text } { event 1 } { tagprefix TkDND_Drag_Text } { tag sel } } { + switch -exact -- $mode { + register { + $path tag bind $tag \ + "tkdnd::text::_begin_drag press ${event} %W %s %X %Y %x %y" + ## Set a binding to the widget, to put selection as data... + bind $path <> "::tkdnd::text::DragInitCmd $path {%t} $tag" + ## Set a binding to the widget, to remove selection if action is move... + bind $path <> "::tkdnd::text::DragEndCmd $path %A $tag" + } + unregister { + $path tag bind $tag {} + bind $path <> {} + bind $path <> {} + } + } + ::tkdnd::drag_source $mode $path $types $event $tagprefix +};# ::tkdnd::text::drag_source + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::drop_target +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::drop_target { mode path { types DND_Text } } { + switch -exact -- $mode { + register { + bind $path <> "::tkdnd::text::DropPosition $path %X %Y %A %a %m" + bind $path <> "::tkdnd::text::Drop $path %D %X %Y %A %a %m" + } + unregister { + bind $path <> {} + bind $path <> {} + bind $path <> {} + bind $path <> {} + } + } + ::tkdnd::drop_target $mode $path $types +};# ::tkdnd::text::drop_target + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::DragInitCmd +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::DragInitCmd { path { types DND_Text } { tag sel } { actions { copy move } } } { + ## Save the selection indices... + variable _drag_source_widget + variable _drop_target_widget + set _drag_source_widget $path + set _drop_target_widget {} + _save_selection $path $tag + list $actions $types [$path get $tag.first $tag.last] +};# ::tkdnd::text::DragInitCmd + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::DragEndCmd +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::DragEndCmd { path action { tag sel } } { + variable _drag_source_widget + variable _drop_target_widget + set _drag_source_widget {} + set _drop_target_widget {} + _restore_selection $path $tag + switch -exact -- $action { + move { + ## Delete the original selected text... + variable _selection_first + variable _selection_last + $path delete $_selection_first $_selection_last + } + } +};# ::tkdnd::text::DragEndCmd + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::DropPosition +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::DropPosition { path X Y action actions keys} { + variable _drag_source_widget + variable _drop_target_widget + set _drop_target_widget $path + ## This check is primitive, a more accurate one is needed! + if {$path eq $_drag_source_widget} { + ## This is a drag within the same widget! Set action to move... + if {"move" in $actions} {set action move} + } + incr X -[winfo rootx $path] + incr Y -[winfo rooty $path] + $path mark set insert @$X,$Y; update + return $action +};# ::tkdnd::text::DropPosition + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::Drop +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::Drop { path data X Y action actions keys } { + incr X -[winfo rootx $path] + incr Y -[winfo rooty $path] + $path mark set insert @$X,$Y + $path insert [$path index insert] $data + return $action +};# ::tkdnd::text::Drop + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::_save_selection +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::_save_selection { path tag} { + variable _drag_tag + variable _selection_first + variable _selection_last + variable _selection_tag $tag + set _selection_first [$path index $tag.first] + set _selection_last [$path index $tag.last] + $path tag add $_drag_tag $_selection_first $_selection_last + $path tag configure $_drag_tag \ + -background [$path tag cget $tag -background] \ + -foreground [$path tag cget $tag -foreground] +};# tkdnd::text::_save_selection + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::_restore_selection +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::_restore_selection { path tag} { + variable _drag_tag + variable _selection_first + variable _selection_last + $path tag delete $_drag_tag + $path tag remove $tag 0.0 end + #$path tag add $tag $_selection_first $_selection_last +};# tkdnd::text::_restore_selection + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::_begin_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::_begin_drag { event button source state X Y x y } { + variable _drop_target_widget + variable _state + # puts "::tkdnd::text::_begin_drag $event $button $source $state $X $Y $x $y" + + switch -exact -- $event { + clear { + switch -exact -- $_state { + press { + ## Do not execute other bindings, as they will erase selection... + return -code break + } + } + set _state clear + } + motion { + variable _now_dragging + if {$_now_dragging} {return -code break} + if { [string equal $_state "press"] } { + variable _x0; variable _y0 + if { abs($_x0-$X) > ${::tkdnd::_dx} || abs($_y0-$Y) > ${::tkdnd::_dy} } { + set _state "done" + set _drop_target_widget {} + set _now_dragging 1 + set code [catch { + ::tkdnd::_init_drag $button $source $state $X $Y $x $y + } info options] + set _drop_target_widget {} + set _now_dragging 0 + if {$code != 0} { + ## Something strange occurred... + return -options $options $info + } + } + return -code break + } + set _state clear + } + press { + variable _x0; variable _y0 + set _x0 $X + set _y0 $Y + set _state "press" + } + reset { + set _state {} + } + } + if {$source eq $_drop_target_widget} {return -code break} + return -code continue +};# tkdnd::text::_begin_drag + +proc tkdnd::text::_TextAutoScan {w x y} { + variable _now_dragging + if {$_now_dragging} {return -code break} + return -code continue +};# tkdnd::text::_TextAutoScan diff --git a/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_windows.tcl b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_windows.tcl new file mode 100644 index 0000000000000000000000000000000000000000..a1d01f3a2c438eaf3f676437d4d4ba89b3ba64f0 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx64/tkdnd_windows.tcl @@ -0,0 +1,167 @@ +# +# tkdnd_windows.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval olednd { + + proc initialise { } { + ## Mapping from platform types to TkDND types... + ::tkdnd::generic::initialise_platform_to_tkdnd_types [list \ + CF_UNICODETEXT DND_Text \ + CF_TEXT DND_Text \ + CF_HDROP DND_Files \ + UniformResourceLocator DND_URL \ + CF_HTML DND_HTML \ + {HTML Format} DND_HTML \ + CF_RTF DND_RTF \ + CF_RTFTEXT DND_RTF \ + {Rich Text Format} DND_RTF \ + ] + # FileGroupDescriptorW DND_Files \ + # FileGroupDescriptor DND_Files \ + + ## Mapping from TkDND types to platform types... + ::tkdnd::generic::initialise_tkdnd_to_platform_types [list \ + DND_Text {CF_UNICODETEXT CF_TEXT} \ + DND_Files {CF_HDROP} \ + DND_URL {UniformResourceLocator UniformResourceLocatorW} \ + DND_HTML {CF_HTML {HTML Format}} \ + DND_RTF {CF_RTF CF_RTFTEXT {Rich Text Format}} \ + ] + };# initialise + +};# namespace olednd + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDragEnter +# ---------------------------------------------------------------------------- +proc olednd::HandleDragEnter { drop_target typelist actionlist pressedkeys + rootX rootY codelist { data {} } } { + ::tkdnd::generic::SetDroppedData $data + focus $drop_target + ::tkdnd::generic::HandleEnter $drop_target 0 $typelist \ + $codelist $actionlist $pressedkeys + set action [::tkdnd::generic::HandlePosition $drop_target {} \ + $pressedkeys $rootX $rootY] + if {$::tkdnd::_auto_update} {update idletasks} + return $action +};# olednd::HandleDragEnter + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDragOver +# ---------------------------------------------------------------------------- +proc olednd::HandleDragOver { drop_target pressedkeys rootX rootY } { + set action [::tkdnd::generic::HandlePosition $drop_target {} \ + $pressedkeys $rootX $rootY] + if {$::tkdnd::_auto_update} {update idletasks} + return $action +};# olednd::HandleDragOver + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDragLeave +# ---------------------------------------------------------------------------- +proc olednd::HandleDragLeave { drop_target } { + ::tkdnd::generic::HandleLeave + if {$::tkdnd::_auto_update} {update idletasks} +};# olednd::HandleDragLeave + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDrop +# ---------------------------------------------------------------------------- +proc olednd::HandleDrop { drop_target pressedkeys rootX rootY type data } { + ::tkdnd::generic::SetDroppedData [normalise_data $type $data] + set action [::tkdnd::generic::HandleDrop $drop_target {} \ + $pressedkeys $rootX $rootY 0] + if {$::tkdnd::_auto_update} {update idletasks} + return $action +};# olednd::HandleDrop + +# ---------------------------------------------------------------------------- +# Command olednd::GetDataType +# ---------------------------------------------------------------------------- +proc olednd::GetDataType { drop_target typelist } { + foreach {drop_target common_drag_source_types common_drop_target_types} \ + [::tkdnd::generic::FindWindowWithCommonTypes $drop_target $typelist] {break} + lindex $common_drag_source_types 0 +};# olednd::GetDataType + +# ---------------------------------------------------------------------------- +# Command olednd::GetDragSourceCommonTypes +# ---------------------------------------------------------------------------- +proc olednd::GetDragSourceCommonTypes { drop_target } { + ::tkdnd::generic::GetDragSourceCommonTypes +};# olednd::GetDragSourceCommonTypes + +# ---------------------------------------------------------------------------- +# Command olednd::platform_specific_types +# ---------------------------------------------------------------------------- +proc olednd::platform_specific_types { types } { + ::tkdnd::generic::platform_specific_types $types +}; # olednd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command olednd::platform_specific_type +# ---------------------------------------------------------------------------- +proc olednd::platform_specific_type { type } { + ::tkdnd::generic::platform_specific_type $type +}; # olednd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + ::tkdnd::generic::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command olednd::platform_independent_type +# ---------------------------------------------------------------------------- +proc olednd::platform_independent_type { type } { + ::tkdnd::generic::platform_independent_type $type +}; # olednd::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command olednd::normalise_data +# ---------------------------------------------------------------------------- +proc olednd::normalise_data { type data } { + switch [lindex [::tkdnd::generic::platform_independent_type $type] 0] { + DND_Text {return $data} + DND_Files {return $data} + DND_HTML {return [encoding convertfrom utf-8 $data]} + default {return $data} + } +}; # olednd::normalise_data diff --git a/gui_data/tkinterdnd2/tkdnd/osx_arm/libtkdnd2.9.3.dylib b/gui_data/tkinterdnd2/tkdnd/osx_arm/libtkdnd2.9.3.dylib new file mode 100644 index 0000000000000000000000000000000000000000..9efdec82229fc40f474900a00c93a862f7038262 Binary files /dev/null and b/gui_data/tkinterdnd2/tkdnd/osx_arm/libtkdnd2.9.3.dylib differ diff --git a/gui_data/tkinterdnd2/tkdnd/osx_arm/pkgIndex.tcl b/gui_data/tkinterdnd2/tkdnd/osx_arm/pkgIndex.tcl new file mode 100644 index 0000000000000000000000000000000000000000..35b4d1c1a4f61c538ee65f39373093008e334bd5 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx_arm/pkgIndex.tcl @@ -0,0 +1,62 @@ +# +# Tcl package index file +# + +namespace eval ::tkdnd { + ## Check if a debug level must be set... + if {[info exists ::TKDND_DEBUG_LEVEL]} { + variable _debug_level $::TKDND_DEBUG_LEVEL + } elseif {[info exists ::env(TKDND_DEBUG_LEVEL)]} { + variable _debug_level $::env(TKDND_DEBUG_LEVEL) + } else { + variable _debug_level 0 + } + + # ---------------------------------------------------------------------------- + # Command tkdnd::debug_enabled: returns the requested debug level (0 = no debug). + # ---------------------------------------------------------------------------- + proc debug_enabled { {level {}} } { + variable _debug_level + if {$level != {}} { + if {[string is integer -strict $level]} { + set _debug_level $level + } elseif {[string is true $level]} { + set _debug_level 1 + } + } + return $_debug_level + };# debug_enabled + + # ---------------------------------------------------------------------------- + # Command tkdnd::source: source a Tcl fileInitialise the TkDND package. + # ---------------------------------------------------------------------------- + proc source { filename { encoding utf-8 } } { + variable _package_dir + # If in debug mode, enable debug statements... + set dbg_lvl [debug_enabled] + if {$dbg_lvl} { + puts "tkdnd::source (debug level $dbg_lvl) $filename" + set fd [open $filename r] + fconfigure $fd -encoding $encoding + set script [read $fd] + close $fd + set map {} + for {set lvl 0} {$lvl <= $dbg_lvl} {incr lvl} { + lappend map "#DBG$lvl " {} + } + lappend map {#DBG } {} + set script [string map $map $script] + return [eval $script] + } + ::source -encoding $encoding $filename + };# source + +}; # namespace ::tkdnd + +package ifneeded tkdnd 2.9.3 \ + "tkdnd::source \{$dir/tkdnd.tcl\} ; \ + tkdnd::initialise \{$dir\} libtkdnd2.9.3.dylib tkdnd" + +package ifneeded tkdnd::utils 2.9.3 \ + "tkdnd::source \{$dir/tkdnd_utils.tcl\} ; \ + package provide tkdnd::utils 2.9.3" diff --git a/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd.tcl b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd.tcl new file mode 100644 index 0000000000000000000000000000000000000000..05f62bf8ae8a652a248fddae31a63704f76b912c --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd.tcl @@ -0,0 +1,539 @@ +# +# tkdnd.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +package require Tk + +namespace eval ::tkdnd { + variable _package_dir {} + variable _topw ".drag" + variable _tabops + variable _state + variable _x0 + variable _y0 + variable _platform_namespace + variable _drop_file_temp_dir + variable _auto_update 1 + variable _dx 3 ;# The difference in pixels before a drag is initiated. + variable _dy 3 ;# The difference in pixels before a drag is initiated. + + variable _windowingsystem + + if {[info exists ::TKDND_DEBUG_LEVEL]} { + variable _debug_level $::TKDND_DEBUG_LEVEL + } elseif {[info exists ::env(TKDND_DEBUG_LEVEL)]} { + variable _debug_level $::env(TKDND_DEBUG_LEVEL) + } else { + variable _debug_level 0 + } + + bind TkDND_Drag1 {tkdnd::_begin_drag press 1 %W %s %X %Y %x %y} + bind TkDND_Drag1 {tkdnd::_begin_drag motion 1 %W %s %X %Y %x %y} + bind TkDND_Drag2 {tkdnd::_begin_drag press 2 %W %s %X %Y %x %y} + bind TkDND_Drag2 {tkdnd::_begin_drag motion 2 %W %s %X %Y %x %y} + bind TkDND_Drag3 {tkdnd::_begin_drag press 3 %W %s %X %Y %x %y} + bind TkDND_Drag3 {tkdnd::_begin_drag motion 3 %W %s %X %Y %x %y} + + # ---------------------------------------------------------------------------- + # Command tkdnd::debug_enabled: returns the requested debug level (0 = no debug). + # ---------------------------------------------------------------------------- + proc debug_enabled { {level {}} } { + variable _debug_level + if {$level != {}} { + if {[string is integer -strict $level]} { + set _debug_level $level + } elseif {[string is true $level]} { + set _debug_level 1 + } + } + return $_debug_level + };# debug_enabled + + # ---------------------------------------------------------------------------- + # Command tkdnd::source: source a Tcl fileInitialise the TkDND package. + # ---------------------------------------------------------------------------- + proc source { filename { encoding utf-8 } } { + variable _package_dir + # If in debug mode, enable debug statements... + set dbg_lvl [debug_enabled] + if {$dbg_lvl} { + puts "tkdnd::source (debug level $dbg_lvl) $filename" + set fd [open $filename r] + fconfigure $fd -encoding $encoding + set script [read $fd] + close $fd + set map {} + for {set lvl 0} {$lvl <= $dbg_lvl} {incr lvl} { + lappend map "\#\D\B\G$lvl " {} ;# Do not remove these \\ + } + lappend map "\#\D\B\G\ " {} ;# Do not remove these \\ + set script [string map $map $script] + return [eval $script] + } + ::source -encoding $encoding $filename + };# source + + # ---------------------------------------------------------------------------- + # Command tkdnd::initialise: Initialise the TkDND package. + # ---------------------------------------------------------------------------- + proc initialise { dir PKG_LIB_FILE PACKAGE_NAME} { + variable _package_dir + variable _platform_namespace + variable _drop_file_temp_dir + variable _windowingsystem + global env + + set _package_dir $dir + + switch [tk windowingsystem] { + x11 { + set _windowingsystem x11 + } + win32 - + windows { + set _windowingsystem windows + } + aqua { + set _windowingsystem aqua + } + default { + error "unknown Tk windowing system" + } + } + + ## Get User's home directory: We try to locate the proper path from a set of + ## environmental variables... + foreach var {HOME HOMEPATH USERPROFILE ALLUSERSPROFILE APPDATA} { + if {[info exists env($var)]} { + if {[file isdirectory $env($var)]} { + set UserHomeDir $env($var) + break + } + } + } + + ## Should use [tk windowingsystem] instead of tcl platform array: + ## OS X returns "unix," but that's not useful because it has its own + ## windowing system, aqua + ## Under windows we have to also combine HOMEDRIVE & HOMEPATH... + if {![info exists UserHomeDir] && + [string equal $_windowingsystem windows] && + [info exists env(HOMEDRIVE)] && [info exists env(HOMEPATH)]} { + if {[file isdirectory $env(HOMEDRIVE)$env(HOMEPATH)]} { + set UserHomeDir $env(HOMEDRIVE)$env(HOMEPATH) + } + } + ## Have we located the needed path? + if {![info exists UserHomeDir]} { + set UserHomeDir [pwd] + } + set UserHomeDir [file normalize $UserHomeDir] + + ## Try to locate a temporary directory... + foreach var {TKDND_TEMP_DIR TEMP TMP} { + if {[info exists env($var)]} { + if {[file isdirectory $env($var)] && [file writable $env($var)]} { + set _drop_file_temp_dir $env($var) + break + } + } + } + if {![info exists _drop_file_temp_dir]} { + foreach _dir [list "$UserHomeDir/Local Settings/Temp" \ + "$UserHomeDir/AppData/Local/Temp" \ + /tmp \ + C:/WINDOWS/Temp C:/Temp C:/tmp \ + D:/WINDOWS/Temp D:/Temp D:/tmp] { + if {[file isdirectory $_dir] && [file writable $_dir]} { + set _drop_file_temp_dir $_dir + break + } + } + } + if {![info exists _drop_file_temp_dir]} { + set _drop_file_temp_dir $UserHomeDir + } + set _drop_file_temp_dir [file native $_drop_file_temp_dir] + + source $dir/tkdnd_generic.tcl + switch $_windowingsystem { + x11 { + source $dir/tkdnd_unix.tcl + set _platform_namespace xdnd + } + win32 - + windows { + source $dir/tkdnd_windows.tcl + set _platform_namespace olednd + } + aqua { + source $dir/tkdnd_macosx.tcl + set _platform_namespace macdnd + } + default { + error "unknown Tk windowing system" + } + } + load $dir/$PKG_LIB_FILE $PACKAGE_NAME + source $dir/tkdnd_compat.tcl + ${_platform_namespace}::initialise + };# initialise + + proc GetDropFileTempDirectory { } { + variable _drop_file_temp_dir + return $_drop_file_temp_dir + } + proc SetDropFileTempDirectory { dir } { + variable _drop_file_temp_dir + set _drop_file_temp_dir $dir + } + + proc debug {msg} { + puts $msg + };# debug + +};# namespace ::tkdnd + +# ---------------------------------------------------------------------------- +# Command tkdnd::drag_source +# ---------------------------------------------------------------------------- +proc ::tkdnd::drag_source { mode path { types {} } { event 1 } + { tagprefix TkDND_Drag } } { + #DBG debug "::tkdnd::drag_source $mode $path $types $event $tagprefix" + foreach single_event $event { + set tags [bindtags $path] + set idx [lsearch $tags ${tagprefix}$single_event] + switch -- $mode { + register { + if { $idx != -1 } { + ## No need to do anything! + # bindtags $path [lreplace $tags $idx $idx ${tagprefix}$single_event] + } else { + bindtags $path [linsert $tags 1 ${tagprefix}$single_event] + } + _drag_source_update_types $path $types + } + unregister { + if { $idx != -1 } { + bindtags $path [lreplace $tags $idx $idx] + } + } + } + } +};# tkdnd::drag_source + +proc ::tkdnd::_drag_source_update_types { path types } { + set types [platform_specific_types $types] + set old_types [bind $path <>] + foreach type $types { + if {[lsearch $old_types $type] < 0} {lappend old_types $type} + } + bind $path <> $old_types +};# ::tkdnd::_drag_source_update_types + +# ---------------------------------------------------------------------------- +# Command tkdnd::drop_target +# ---------------------------------------------------------------------------- +proc ::tkdnd::drop_target { mode path { types {} } } { + variable _windowingsystem + set types [platform_specific_types $types] + switch -- $mode { + register { + switch $_windowingsystem { + x11 { + _register_types $path [winfo toplevel $path] $types + } + win32 - + windows { + _RegisterDragDrop $path + bind $path {+ tkdnd::_RevokeDragDrop %W} + } + aqua { + macdnd::registerdragwidget [winfo toplevel $path] $types + } + default { + error "unknown Tk windowing system" + } + } + set old_types [bind $path <>] + set new_types {} + foreach type $types { + if {[lsearch -exact $old_types $type] < 0} {lappend new_types $type} + } + if {[llength $new_types]} { + bind $path <> [concat $old_types $new_types] + } + } + unregister { + switch $_windowingsystem { + x11 { + } + win32 - + windows { + _RevokeDragDrop $path + } + aqua { + error todo + } + default { + error "unknown Tk windowing system" + } + } + bind $path <> {} + } + } +};# tkdnd::drop_target + +# ---------------------------------------------------------------------------- +# Command tkdnd::_begin_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::_begin_drag { event button source state X Y x y } { + variable _x0 + variable _y0 + variable _state + + switch -- $event { + press { + set _x0 $X + set _y0 $Y + set _state "press" + } + motion { + if { ![info exists _state] } { + # This is just extra protection. There seem to be + # rare cases where the motion comes before the press. + return + } + if { [string equal $_state "press"] } { + variable _dx + variable _dy + if { abs($_x0-$X) > ${_dx} || abs($_y0-$Y) > ${_dy} } { + set _state "done" + _init_drag $button $source $state $X $Y $x $y + } + } + } + } +};# tkdnd::_begin_drag + +# ---------------------------------------------------------------------------- +# Command tkdnd::_init_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::_init_drag { button source state rootX rootY X Y } { + #DBG debug "::tkdnd::_init_drag $button $source $state $rootX $rootY $X $Y" + # Call the <> binding. + set cmd [bind $source <>] + #DBG debug "CMD: $cmd" + if {[string length $cmd]} { + set cmd [string map [list %W [list $source] \ + %X $rootX %Y $rootY %x $X %y $Y \ + %S $state %e <> %A \{\} %% % \ + %b \{$button\} \ + %t \{[bind $source <>]\}] $cmd] + set code [catch {uplevel \#0 $cmd} info options] + #DBG debug "CODE: $code ---- $info" + switch -exact -- $code { + 0 {} + 3 - 4 { + # FRINK: nocheck + return + } + default { + return -options $options $info + } + } + + set len [llength $info] + if {$len == 3} { + foreach { actions types _data } $info { break } + set types [platform_specific_types $types] + set data [list] + foreach type $types { + lappend data $_data + } + unset _data + } elseif {$len == 2} { + foreach { actions _data } $info { break } + set data [list]; set types [list] + foreach {t d} $_data { + foreach t [platform_specific_types $t] { + lappend types $t; lappend data $d + } + } + unset _data t d + } else { + foreach { actions } $info { break } + if {$len == 1 && [string equal [lindex $actions 0] "refuse_drop"]} { + return + } + error "not enough items in the result of the <>\ + event binding. Either 2 or 3 items are expected. The command + executed was: \"$cmd\"\nResult was: \"$info\"" + } + set action refuse_drop + + ## Custom Cursors... + # Call the <> binding. + set cursor_map [bind $source <>] + + variable _windowingsystem + #DBG debug "Source: \"$source\"" + #DBG debug "Types: \"[join $types {", "}]\"" + #DBG debug "Actions: \"[join $actions {", "}]\"" + #DBG debug "Button: \"$button\"" + #DBG debug "Data: \"[string range $data 0 100]\"" + #DBG debug "CursorMap: \"[string range $cursor_map 0 100]\"" + switch $_windowingsystem { + x11 { + set action [xdnd::_dodragdrop $source $actions $types $data $button $cursor_map] + } + win32 - + windows { + set action [_DoDragDrop $source $actions $types $data $button] + } + aqua { + set action [macdnd::dodragdrop $source $actions $types $data $button] + } + default { + error "unknown Tk windowing system" + } + } + ## Call _end_drag to notify the widget of the result of the drag + ## operation... + _end_drag $button $source {} $action {} $data {} $state $rootX $rootY $X $Y + } +};# tkdnd::_init_drag + +# ---------------------------------------------------------------------------- +# Command tkdnd::_end_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::_end_drag { button source target action type data result + state rootX rootY X Y } { + set rootX 0 + set rootY 0 + # Call the <> binding. + set cmd [bind $source <>] + if {[string length $cmd]} { + set cmd [string map [list %W [list $source] \ + %X $rootX %Y $rootY %x $X %y $Y %% % \ + %b \{$button\} \ + %S $state %e <> %A \{$action\}] $cmd] + set info [uplevel \#0 $cmd] + # if { $info != "" } { + # variable _windowingsystem + # foreach { actions types data } $info { break } + # set types [platform_specific_types $types] + # switch $_windowingsystem { + # x11 { + # error "dragging from Tk widgets not yet supported" + # } + # win32 - + # windows { + # set action [_DoDragDrop $source $actions $types $data $button] + # } + # aqua { + # macdnd::dodragdrop $source $actions $types $data + # } + # default { + # error "unknown Tk windowing system" + # } + # } + # ## Call _end_drag to notify the widget of the result of the drag + # ## operation... + # _end_drag $button $source {} $action {} $data {} $state $rootX $rootY + # } + } +};# tkdnd::_end_drag + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_specific_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_specific_types { types } { + variable _platform_namespace + ${_platform_namespace}::platform_specific_types $types +}; # tkdnd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + variable _platform_namespace + ${_platform_namespace}::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_specific_type +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_specific_type { type } { + variable _platform_namespace + ${_platform_namespace}::platform_specific_type $type +}; # tkdnd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_type +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_type { type } { + variable _platform_namespace + ${_platform_namespace}::platform_independent_type $type +}; # tkdnd::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::bytes_to_string +# ---------------------------------------------------------------------------- +proc ::tkdnd::bytes_to_string { bytes } { + set string {} + foreach byte $bytes { + append string [binary format c $byte] + } + return $string +};# tkdnd::bytes_to_string + +# ---------------------------------------------------------------------------- +# Command tkdnd::urn_unquote +# ---------------------------------------------------------------------------- +proc ::tkdnd::urn_unquote {url} { + set result "" + set start 0 + while {[regexp -start $start -indices {%[0-9a-fA-F]{2}} $url match]} { + foreach {first last} $match break + append result [string range $url $start [expr {$first - 1}]] + append result [format %c 0x[string range $url [incr first] $last]] + set start [incr last] + } + append result [string range $url $start end] + return [encoding convertfrom utf-8 $result] +};# tkdnd::urn_unquote diff --git a/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_compat.tcl b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_compat.tcl new file mode 100644 index 0000000000000000000000000000000000000000..efc96f7bb2fe74a9bafd1e79681c275c8ea0f8fc --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_compat.tcl @@ -0,0 +1,160 @@ +# +# tkdnd_compat.tcl -- +# +# This file implements some utility procedures, to support older versions +# of the TkDND package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval compat { + +};# namespace compat + +# ---------------------------------------------------------------------------- +# Command ::dnd +# ---------------------------------------------------------------------------- +proc ::dnd {method window args} { + switch $method { + bindtarget { + switch [llength $args] { + 0 {return [tkdnd::compat::bindtarget0 $window]} + 1 {return [tkdnd::compat::bindtarget1 $window [lindex $args 0]]} + 2 {return [tkdnd::compat::bindtarget2 $window [lindex $args 0] \ + [lindex $args 1]]} + 3 {return [tkdnd::compat::bindtarget3 $window [lindex $args 0] \ + [lindex $args 1] [lindex $args 2]]} + 4 {return [tkdnd::compat::bindtarget4 $window [lindex $args 0] \ + [lindex $args 1] [lindex $args 2] [lindex $args 3]]} + } + } + cleartarget { + return [tkdnd::compat::cleartarget $window] + } + bindsource { + switch [llength $args] { + 0 {return [tkdnd::compat::bindsource0 $window]} + 1 {return [tkdnd::compat::bindsource1 $window [lindex $args 0]]} + 2 {return [tkdnd::compat::bindsource2 $window [lindex $args 0] \ + [lindex $args 1]]} + 3 {return [tkdnd::compat::bindsource3 $window [lindex $args 0] \ + [lindex $args 1] [lindex $args 2]]} + } + } + clearsource { + return [tkdnd::compat::clearsource $window] + } + drag { + return [tkdnd::_init_drag 1 $window "press" 0 0 0 0] + } + } + error "invalid number of arguments!" +};# ::dnd + +# ---------------------------------------------------------------------------- +# Command compat::bindtarget +# ---------------------------------------------------------------------------- +proc compat::bindtarget0 {window} { + return [bind $window <>] +};# compat::bindtarget0 + +proc compat::bindtarget1 {window type} { + return [bindtarget2 $window $type ] +};# compat::bindtarget1 + +proc compat::bindtarget2 {window type event} { + switch $event { + {return [bind $window <>]} + {return [bind $window <>]} + {return [bind $window <>]} + {return [bind $window <>]} + } +};# compat::bindtarget2 + +proc compat::bindtarget3 {window type event script} { + set type [normalise_type $type] + ::tkdnd::drop_target register $window [list $type] + switch $event { + {return [bind $window <> $script]} + {return [bind $window <> $script]} + {return [bind $window <> $script]} + {return [bind $window <> $script]} + } +};# compat::bindtarget3 + +proc compat::bindtarget4 {window type event script priority} { + return [bindtarget3 $window $type $event $script] +};# compat::bindtarget4 + +proc compat::normalise_type { type } { + switch $type { + text/plain - + {text/plain;charset=UTF-8} - + Text {return DND_Text} + text/uri-list - + Files {return DND_Files} + default {return $type} + } +};# compat::normalise_type + +# ---------------------------------------------------------------------------- +# Command compat::bindsource +# ---------------------------------------------------------------------------- +proc compat::bindsource0 {window} { + return [bind $window <>] +};# compat::bindsource0 + +proc compat::bindsource1 {window type} { + return [bindsource2 $window $type ] +};# compat::bindsource1 + +proc compat::bindsource2 {window type script} { + set type [normalise_type $type] + ::tkdnd::drag_source register $window $type + bind $window <> "list {copy} {%t} \[$script\]" +};# compat::bindsource2 + +proc compat::bindsource3 {window type script priority} { + return [bindsource2 $window $type $script] +};# compat::bindsource3 + +# ---------------------------------------------------------------------------- +# Command compat::cleartarget +# ---------------------------------------------------------------------------- +proc compat::cleartarget {window} { +};# compat::cleartarget + +# ---------------------------------------------------------------------------- +# Command compat::clearsource +# ---------------------------------------------------------------------------- +proc compat::clearsource {window} { +};# compat::clearsource diff --git a/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_generic.tcl b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_generic.tcl new file mode 100644 index 0000000000000000000000000000000000000000..2ffcb98c21ba1d6a2a37c6798daaf909b5155d93 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_generic.tcl @@ -0,0 +1,587 @@ +# +# tkdnd_generic.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval generic { + variable _types {} + variable _typelist {} + variable _codelist {} + variable _actionlist {} + variable _pressedkeys {} + variable _pressedmods {} + variable _action {} + variable _common_drag_source_types {} + variable _common_drop_target_types {} + variable _drag_source {} + variable _drop_target {} + + variable _last_mouse_root_x 0 + variable _last_mouse_root_y 0 + + variable _tkdnd2platform + variable _platform2tkdnd + + variable _integer_test entier + if {[catch {string is entier 1234}]} { + set _integer_test integer + } + + proc debug { msg } { + tkdnd::debug $msg + };# debug + + proc initialise { } { + };# initialise + + proc initialise_platform_to_tkdnd_types { types } { + variable _platform2tkdnd + variable _tkdnd2platform + # set _platform2tkdnd [dict create {*}$types] ;# {*} not available in 8.4 + set _platform2tkdnd [dict create] + foreach {p t} $types { + dict set _platform2tkdnd $p $t + } + set _tkdnd2platform [dict create] + foreach type [dict keys $_platform2tkdnd] { + dict lappend _tkdnd2platform [dict get $_platform2tkdnd $type] $type + } + };# initialise_platform_to_tkdnd_types + + proc initialise_tkdnd_to_platform_types { types } { + variable _tkdnd2platform + # set _tkdnd2platform [dict create {*}$types] ;# {*} not available in 8.4 + set _tkdnd2platform [dict create] + foreach {t p} $types { + dict set _tkdnd2platform $t $p + } + };# initialise_tkdnd_to_platform_types + +};# namespace generic + +# ---------------------------------------------------------------------------- +# Command generic::SetPressedKeys +# ---------------------------------------------------------------------------- +proc generic::SetPressedKeys { pressedkeys } { + variable _pressedkeys + variable _pressedmods + variable _integer_test + set keys {} + set mods {} + foreach {b} $pressedkeys { + if {[string is $_integer_test -strict $b]} { + lappend keys $b + } else { + lappend mods $b + } + } + set _pressedkeys $keys + set _pressedmods $mods +};# generic::SetPressedKeys + +# ---------------------------------------------------------------------------- +# Command generic::HandleEnter +# ---------------------------------------------------------------------------- +proc generic::HandleEnter { drop_target drag_source typelist codelist + actionlist pressedkeys } { + variable _typelist; set _typelist $typelist + variable _action; set _action refuse_drop + variable _common_drag_source_types; set _common_drag_source_types {} + variable _common_drop_target_types; set _common_drop_target_types {} + variable _actionlist + variable _drag_source; set _drag_source $drag_source + variable _drop_target; set _drop_target {} + variable _actionlist; set _actionlist $actionlist + variable _codelist set _codelist $codelist + + variable _last_mouse_root_x; set _last_mouse_root_x 0 + variable _last_mouse_root_y; set _last_mouse_root_y 0 + SetPressedKeys $pressedkeys + #DBG debug "\n===============================================================" + #DBG debug "generic::HandleEnter: drop_target=$drop_target,\ + #DBG drag_source=$drag_source,\ + #DBG typelist=$typelist" + #DBG debug "generic::HandleEnter: ACTION: default" + return default +};# generic::HandleEnter + +# ---------------------------------------------------------------------------- +# Command generic::HandlePosition +# ---------------------------------------------------------------------------- +proc generic::HandlePosition { drop_target drag_source pressedkeys + rootX rootY { action {} } { time 0 } } { + variable _types + variable _typelist + variable _codelist + variable _actionlist + variable _pressedkeys + variable _pressedmods + variable _action + variable _common_drag_source_types + variable _common_drop_target_types + variable _drag_source + variable _drop_target + + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + + #DBG debug "generic::HandlePosition: drop_target=$drop_target,\ + #DBG _drop_target=$_drop_target, rootX=$rootX, rootY=$rootY" + + if {![info exists _drag_source] || ![string length $_drag_source]} { + #DBG debug "generic::HandlePosition: no or empty _drag_source:\ + #DBG return refuse_drop" + return refuse_drop + } + + if {$drag_source ne "" && $drag_source ne $_drag_source} { + #DBG debug "generic position event from unexpected source: $_drag_source\ + #DBG != $drag_source" + return refuse_drop + } + + SetPressedKeys $pressedkeys + + ## Does the new drop target support any of our new types? + # foreach {common_drag_source_types common_drop_target_types} \ + # [GetWindowCommonTypes $drop_target $_typelist] {break} + foreach {drop_target common_drag_source_types common_drop_target_types} \ + [FindWindowWithCommonTypes $drop_target $_typelist] {break} + set data [GetDroppedData $time] + + #DBG debug "\t($_drop_target) -> ($drop_target)" + if {$drop_target != $_drop_target} { + if {[string length $_drop_target]} { + ## Call the <> event. + #DBG debug "\t<> on $_drop_target" + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set widgetX 0; set widgetY 0 + catch {set widgetX [expr {$rootX - [winfo rootx $_drop_target]}]} + catch {set widgetY [expr {$rootY - [winfo rooty $_drop_target]}]} + set cmd [string map [list %W [list $_drop_target] \ + %X $rootX %Y $rootY %x $widgetX %y $widgetY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A \{$_action\} %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedmods\} \ + %D \{\} %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + uplevel \#0 $cmd + } + } + set _drop_target $drop_target + set _action refuse_drop + + if {[llength $common_drag_source_types]} { + switch -exact -- $action { + default - {} { set _action [lindex $_actionlist 0] } + default { set _action $action } + } + set _common_drag_source_types $common_drag_source_types + set _common_drop_target_types $common_drop_target_types + ## Drop target supports at least one type. Send a <>. + #DBG debug "<> -> $drop_target" + set cmd [bind $drop_target <>] + if {[string length $cmd]} { + set widgetX 0; set widgetY 0 + catch {set widgetX [expr {$rootX - [winfo rootx $drop_target]}]} + catch {set widgetY [expr {$rootY - [winfo rooty $drop_target]}]} + focus $drop_target + set cmd [string map [list %W [list $drop_target] \ + %X $rootX %Y $rootY %x $widgetX %y $widgetY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedmods\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + } + } + } + + set _drop_target {} + if {[llength $common_drag_source_types]} { + set _common_drag_source_types $common_drag_source_types + set _common_drop_target_types $common_drop_target_types + set _drop_target $drop_target + ## Drop target supports at least one type. Send a <>. + set cmd [bind $drop_target <>] + if {[string length $cmd]} { + switch -exact -- $action { + default - {} { set _action [lindex $_actionlist 0] } + default { set _action $action } + } + set widgetX 0; set widgetY 0 + catch {set widgetX [expr {$rootX - [winfo rootx $drop_target]}]} + catch {set widgetY [expr {$rootY - [winfo rooty $drop_target]}]} + set cmd [string map [list %W [list $drop_target] \ + %X $rootX %Y $rootY %x $widgetX %y $widgetY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedmods\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + } + } + # Return values: copy, move, link, ask, private, refuse_drop, default + #DBG debug "generic::HandlePosition: ACTION: $_action" + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + return $_action +};# generic::HandlePosition + +# ---------------------------------------------------------------------------- +# Command generic::HandleLeave +# ---------------------------------------------------------------------------- +proc generic::HandleLeave { } { + variable _types + variable _typelist + variable _codelist + variable _actionlist + variable _pressedkeys + variable _pressedmods + variable _action + variable _common_drag_source_types + variable _common_drop_target_types + variable _drag_source + variable _drop_target + variable _last_mouse_root_x + variable _last_mouse_root_y + if {![info exists _drop_target]} {set _drop_target {}} + #DBG debug "generic::HandleLeave: _drop_target=$_drop_target" + if {[info exists _drop_target] && [string length $_drop_target]} { + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set widgetX 0; set widgetY 0 + catch {set widgetX [expr {$_last_mouse_root_x - [winfo rootx $_drop_target]}]} + catch {set widgetY [expr {$_last_mouse_root_y - [winfo rooty $_drop_target]}]} + set cmd [string map [list %W [list $_drop_target] \ + %X $_last_mouse_root_x %Y $_last_mouse_root_y %x $widgetX %y $widgetY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A \{$_action\} %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedmods\} \ + %D \{\} %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + } + } + foreach var {_types _typelist _actionlist _pressedkeys _pressedmods _action + _common_drag_source_types _common_drop_target_types + _drag_source _drop_target} { + set $var {} + } +};# generic::HandleLeave + +# ---------------------------------------------------------------------------- +# Command generic::HandleDrop +# ---------------------------------------------------------------------------- +proc generic::HandleDrop {drop_target drag_source pressedkeys rootX rootY time } { + variable _types + variable _typelist + variable _codelist + variable _actionlist + variable _pressedkeys + variable _pressedmods + variable _action + variable _common_drag_source_types + variable _common_drop_target_types + variable _drag_source + variable _drop_target + variable _last_mouse_root_x + variable _last_mouse_root_y + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + + SetPressedKeys $pressedkeys + + #DBG debug "generic::HandleDrop: $time" + + if {![info exists _drag_source] && ![string length $_drag_source]} { + return refuse_drop + } + if {![info exists _drop_target] && ![string length $_drop_target]} { + return refuse_drop + } + if {![llength $_common_drag_source_types]} {return refuse_drop} + ## Get the dropped data. + set data [GetDroppedData $time] + ## Try to select the most specific <> event. + foreach type [concat $_common_drag_source_types $_common_drop_target_types] { + set type [platform_independent_type $type] + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set widgetX 0; set widgetY 0 + catch {set widgetX [expr {$rootX - [winfo rootx $_drop_target]}]} + catch {set widgetY [expr {$rootY - [winfo rooty $_drop_target]}]} + set cmd [string map [list %W [list $_drop_target] \ + %X $rootX %Y $rootY %x $widgetX %y $widgetY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedmods\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + # Return values: copy, move, link, ask, private, refuse_drop + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + return $_action + } + } + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set widgetX 0; set widgetY 0 + catch {set widgetX [expr {$rootX - [winfo rootx $_drop_target]}]} + catch {set widgetY [expr {$rootY - [winfo rooty $_drop_target]}]} + set cmd [string map [list %W [list $_drop_target] \ + %X $rootX %Y $rootY %x $widgetX %y $widgetY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedmods\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + } + # Return values: copy, move, link, ask, private, refuse_drop + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + return $_action +};# generic::HandleDrop + +# ---------------------------------------------------------------------------- +# Command generic::GetWindowCommonTypes +# ---------------------------------------------------------------------------- +proc generic::GetWindowCommonTypes { win typelist } { + set types [bind $win <>] + #DBG debug ">> Accepted types: $win $_types" + set common_drag_source_types {} + set common_drop_target_types {} + if {[llength $types]} { + ## Examine the drop target types, to find at least one match with the drag + ## source types... + set supported_types [supported_types $typelist] + foreach type $types { + foreach matched [lsearch -glob -all -inline $supported_types $type] { + ## Drop target supports this type. + lappend common_drag_source_types $matched + lappend common_drop_target_types $type + } + } + } + list $common_drag_source_types $common_drop_target_types +};# generic::GetWindowCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::FindWindowWithCommonTypes +# ---------------------------------------------------------------------------- +proc generic::FindWindowWithCommonTypes { win typelist } { + set toplevel [winfo toplevel $win] + while {![string equal $win $toplevel]} { + foreach {common_drag_source_types common_drop_target_types} \ + [GetWindowCommonTypes $win $typelist] {break} + if {[llength $common_drag_source_types]} { + return [list $win $common_drag_source_types $common_drop_target_types] + } + set win [winfo parent $win] + } + ## We have reached the toplevel, which may be also a target (SF Bug #30) + foreach {common_drag_source_types common_drop_target_types} \ + [GetWindowCommonTypes $win $typelist] {break} + if {[llength $common_drag_source_types]} { + return [list $win $common_drag_source_types $common_drop_target_types] + } + return { {} {} {} } +};# generic::FindWindowWithCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::GetDroppedData +# ---------------------------------------------------------------------------- +proc generic::GetDroppedData { time } { + variable _dropped_data + return $_dropped_data +};# generic::GetDroppedData + +# ---------------------------------------------------------------------------- +# Command generic::SetDroppedData +# ---------------------------------------------------------------------------- +proc generic::SetDroppedData { data } { + variable _dropped_data + set _dropped_data $data +};# generic::SetDroppedData + +# ---------------------------------------------------------------------------- +# Command generic::GetDragSource +# ---------------------------------------------------------------------------- +proc generic::GetDragSource { } { + variable _drag_source + return $_drag_source +};# generic::GetDragSource + +# ---------------------------------------------------------------------------- +# Command generic::GetDropTarget +# ---------------------------------------------------------------------------- +proc generic::GetDropTarget { } { + variable _drop_target + return $_drop_target +};# generic::GetDropTarget + +# ---------------------------------------------------------------------------- +# Command generic::GetDragSourceCommonTypes +# ---------------------------------------------------------------------------- +proc generic::GetDragSourceCommonTypes { } { + variable _common_drag_source_types + return $_common_drag_source_types +};# generic::GetDragSourceCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::GetDropTargetCommonTypes +# ---------------------------------------------------------------------------- +proc generic::GetDropTargetCommonTypes { } { + variable _common_drag_source_types + return $_common_drag_source_types +};# generic::GetDropTargetCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::platform_specific_types +# ---------------------------------------------------------------------------- +proc generic::platform_specific_types { types } { + set new_types {} + foreach type $types { + set new_types [concat $new_types [platform_specific_type $type]] + } + return $new_types +}; # generic::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command generic::platform_specific_type +# ---------------------------------------------------------------------------- +proc generic::platform_specific_type { type } { + variable _tkdnd2platform + if {[dict exists $_tkdnd2platform $type]} { + return [dict get $_tkdnd2platform $type] + } + list $type +}; # generic::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command generic::platform_independent_types +# ---------------------------------------------------------------------------- +proc generic::platform_independent_types { types } { + set new_types {} + foreach type $types { + set new_types [concat $new_types [platform_independent_type $type]] + } + return $new_types +}; # generic::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command generic::platform_independent_type +# ---------------------------------------------------------------------------- +proc generic::platform_independent_type { type } { + variable _platform2tkdnd + if {[dict exists $_platform2tkdnd $type]} { + return [dict get $_platform2tkdnd $type] + } + return $type +}; # generic::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command generic::supported_types +# ---------------------------------------------------------------------------- +proc generic::supported_types { types } { + set new_types {} + foreach type $types { + if {[supported_type $type]} {lappend new_types $type} + } + return $new_types +}; # generic::supported_types + +# ---------------------------------------------------------------------------- +# Command generic::supported_type +# ---------------------------------------------------------------------------- +proc generic::supported_type { type } { + variable _platform2tkdnd + if {[dict exists $_platform2tkdnd $type]} { + return 1 + } + return 0 +}; # generic::supported_type diff --git a/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_macosx.tcl b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_macosx.tcl new file mode 100644 index 0000000000000000000000000000000000000000..307f6da2e94286d01dc9e068fffebe46de3c43f3 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_macosx.tcl @@ -0,0 +1,144 @@ +# +# tkdnd_macosx.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. + +# This software is copyrighted by: +# Georgios Petasis, Athens, Greece. +# e-mail: petasisg@yahoo.gr, petasis@iit.demokritos.gr +# +# Mac portions (c) 2009 Kevin Walzer/WordTech Communications LLC, +# kw@codebykevin.com +# +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +#basic API for Mac Drag and Drop + +#two data types supported: strings and file paths + +#two commands at C level: ::tkdnd::macdnd::registerdragwidget, ::tkdnd::macdnd::unregisterdragwidget + +#data retrieval mechanism: text or file paths are copied from drag clipboard to system clipboard and retrieved via [clipboard get]; array of file paths is converted to single tab-separated string, can be split into Tcl list + +if {[tk windowingsystem] eq "aqua" && "AppKit" ni [winfo server .]} { + error {TkAqua Cocoa required} +} + +namespace eval macdnd { + + proc initialise { } { + ## Mapping from platform types to TkDND types... + ::tkdnd::generic::initialise_platform_to_tkdnd_types [list \ + NSPasteboardTypeString DND_Text \ + NSFilenamesPboardType DND_Files \ + NSPasteboardTypeHTML DND_HTML \ + ] + };# initialise + +};# namespace macdnd + +# ---------------------------------------------------------------------------- +# Command macdnd::HandleEnter +# ---------------------------------------------------------------------------- +proc macdnd::HandleEnter { path drag_source typelist { data {} } } { + variable _pressedkeys + variable _actionlist + set _pressedkeys 1 + set _actionlist { copy move link ask private } + ::tkdnd::generic::SetDroppedData $data + ::tkdnd::generic::HandleEnter $path $drag_source $typelist $typelist \ + $_actionlist $_pressedkeys +};# macdnd::HandleEnter + +# ---------------------------------------------------------------------------- +# Command macdnd::HandlePosition +# ---------------------------------------------------------------------------- +proc macdnd::HandlePosition { drop_target rootX rootY {drag_source {}} } { + variable _pressedkeys + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + ::tkdnd::generic::HandlePosition $drop_target $drag_source \ + $_pressedkeys $rootX $rootY +};# macdnd::HandlePosition + +# ---------------------------------------------------------------------------- +# Command macdnd::HandleLeave +# ---------------------------------------------------------------------------- +proc macdnd::HandleLeave { args } { + ::tkdnd::generic::HandleLeave +};# macdnd::HandleLeave + +# ---------------------------------------------------------------------------- +# Command macdnd::HandleDrop +# ---------------------------------------------------------------------------- +proc macdnd::HandleDrop { drop_target data args } { + variable _pressedkeys + variable _last_mouse_root_x + variable _last_mouse_root_y + ## Get the dropped data... + ::tkdnd::generic::SetDroppedData $data + ::tkdnd::generic::HandleDrop {} {} $_pressedkeys \ + $_last_mouse_root_x $_last_mouse_root_y 0 +};# macdnd::HandleDrop + +# ---------------------------------------------------------------------------- +# Command macdnd::GetDragSourceCommonTypes +# ---------------------------------------------------------------------------- +proc macdnd::GetDragSourceCommonTypes { } { + ::tkdnd::generic::GetDragSourceCommonTypes +};# macdnd::GetDragSourceCommonTypes + +# ---------------------------------------------------------------------------- +# Command macdnd::platform_specific_types +# ---------------------------------------------------------------------------- +proc macdnd::platform_specific_types { types } { + ::tkdnd::generic::platform_specific_types $types +}; # macdnd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command macdnd::platform_specific_type +# ---------------------------------------------------------------------------- +proc macdnd::platform_specific_type { type } { + ::tkdnd::generic::platform_specific_type $type +}; # macdnd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + ::tkdnd::generic::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command macdnd::platform_independent_type +# ---------------------------------------------------------------------------- +proc macdnd::platform_independent_type { type } { + ::tkdnd::generic::platform_independent_type $type +}; # macdnd::platform_independent_type diff --git a/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_unix.tcl b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_unix.tcl new file mode 100644 index 0000000000000000000000000000000000000000..4446f21c9b55ddb174d3891998f907a109c4a2df --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_unix.tcl @@ -0,0 +1,883 @@ +# +# tkdnd_unix.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval xdnd { + variable _dragging 0 + + proc debug { msg } { + tkdnd::debug $msg + };# debug + + proc initialise { } { + ## Mapping from platform types to TkDND types... + ::tkdnd::generic::initialise_platform_to_tkdnd_types [list \ + text/plain\;charset=utf-8 DND_Text \ + UTF8_STRING DND_Text \ + text/plain DND_Text \ + STRING DND_Text \ + TEXT DND_Text \ + COMPOUND_TEXT DND_Text \ + text/uri-list DND_Files \ + text/html\;charset=utf-8 DND_HTML \ + text/html DND_HTML \ + application/x-color DND_Color \ + ] + };# initialise + +};# namespace xdnd + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndEnter +# ---------------------------------------------------------------------------- +proc xdnd::GetPressedKeys { drop_target } { + #DBG debug "xdnd::GetPressedKeys: $drop_target" + if {[catch {set dict [_keyboard_get_state $drop_target]}]} { + return {} + } + set pressedkeys {} + for {set b 1} {$b <= 5} {incr b} { + if {[dict get $dict $b]} {lappend pressedkeys $b} + } + foreach {k l} {Alt alt Shift shift Control ctrl Lock caps_lock + Mod1 mod1 Mod2 mod2 Mod3 mod3 Mod4 mod4 Mod5 mod5} { + if {[dict get $dict $k]} {lappend pressedkeys $l} + } + #DBG debug "xdnd::GetPressedKeys: $drop_target -> $pressedkeys" + return $pressedkeys +};# xdnd::GetPressedKeys + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndEnter +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndEnter { drop_target drag_source typelist time + { data {} } } { + variable _pressedkeys + variable _actionlist + variable _typelist + set _pressedkeys [GetPressedKeys $drop_target] + set _actionlist { copy move link ask private } + set _typelist $typelist + #DBG debug "xdnd::HandleXdndEnter: $time" + ::tkdnd::generic::SetDroppedData $data + ::tkdnd::generic::HandleEnter $drop_target $drag_source $typelist $typelist \ + $_actionlist $_pressedkeys +};# xdnd::HandleXdndEnter + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndPosition +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndPosition { drop_target rootX rootY time + { drag_source {} } { action default } } { + variable _pressedkeys + variable _typelist + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + set _pressedkeys [GetPressedKeys $drop_target] + #DBG debug "xdnd::HandleXdndPosition: $time" + ## Get the dropped data... + catch { + ::tkdnd::generic::SetDroppedData [GetPositionData $drop_target $_typelist $time] + } + ::tkdnd::generic::HandlePosition $drop_target $drag_source \ + $_pressedkeys $rootX $rootY $action +};# xdnd::HandleXdndPosition + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndLeave +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndLeave { } { + #DBG debug "xdnd::HandleXdndLeave" + ::tkdnd::generic::HandleLeave +};# xdnd::HandleXdndLeave + +# ---------------------------------------------------------------------------- +# Command xdnd::_HandleXdndDrop +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndDrop { time } { + variable _pressedkeys + variable _last_mouse_root_x + variable _last_mouse_root_y + set _pressedkeys [GetPressedKeys [::tkdnd::generic::GetDropTarget]] + #DBG debug "xdnd::HandleXdndDrop: $time" + ## Get the dropped data... + ::tkdnd::generic::SetDroppedData [GetDroppedData \ + [::tkdnd::generic::GetDragSource] [::tkdnd::generic::GetDropTarget] \ + [::tkdnd::generic::GetDragSourceCommonTypes] $time] + ::tkdnd::generic::HandleDrop {} {} $_pressedkeys \ + $_last_mouse_root_x $_last_mouse_root_y $time +};# xdnd::HandleXdndDrop + +# ---------------------------------------------------------------------------- +# Command xdnd::GetPositionData +# ---------------------------------------------------------------------------- +proc xdnd::GetPositionData { drop_target typelist time } { + foreach {drop_target common_drag_source_types common_drop_target_types} \ + [::tkdnd::generic::FindWindowWithCommonTypes $drop_target $typelist] {break} + GetDroppedData [::tkdnd::generic::GetDragSource] $drop_target \ + $common_drag_source_types $time +};# xdnd::GetPositionData + +# ---------------------------------------------------------------------------- +# Command xdnd::GetDroppedData +# ---------------------------------------------------------------------------- +proc xdnd::GetDroppedData { _drag_source _drop_target _common_drag_source_types time } { + if {![llength $_common_drag_source_types]} { + return -code error "no common data types between the drag source and drop target widgets" + } + ## Is drag source in this application? + if {[catch {winfo pathname -displayof $_drop_target $_drag_source} p]} { + set _use_tk_selection 0 + } else { + set _use_tk_selection 1 + } + foreach type $_common_drag_source_types { + #DBG debug "TYPE: $type ($_drop_target)" + # _get_selection $_drop_target $time $type + if {$_use_tk_selection} { + if {![catch { + selection get -displayof $_drop_target -selection XdndSelection \ + -type $type + } result options]} { + return [normalise_data $type $result] + } + } else { + #DBG debug "_selection_get -displayof $_drop_target -selection XdndSelection \ + # -type $type -time $time" + #after 100 [list focus -force $_drop_target] + #after 50 [list raise [winfo toplevel $_drop_target]] + if {![catch { + _selection_get -displayof $_drop_target -selection XdndSelection \ + -type $type -time $time + } result options]} { + return [normalise_data $type $result] + } + } + } + return -options $options $result +};# xdnd::GetDroppedData + +# ---------------------------------------------------------------------------- +# Command xdnd::platform_specific_types +# ---------------------------------------------------------------------------- +proc xdnd::platform_specific_types { types } { + ::tkdnd::generic::platform_specific_types $types +}; # xdnd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command xdnd::platform_specific_type +# ---------------------------------------------------------------------------- +proc xdnd::platform_specific_type { type } { + ::tkdnd::generic::platform_specific_type $type +}; # xdnd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + ::tkdnd::generic::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command xdnd::platform_independent_type +# ---------------------------------------------------------------------------- +proc xdnd::platform_independent_type { type } { + ::tkdnd::generic::platform_independent_type $type +}; # xdnd::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command xdnd::_normalise_data +# ---------------------------------------------------------------------------- +proc xdnd::normalise_data { type data } { + # Tk knows how to interpret the following types: + # STRING, TEXT, COMPOUND_TEXT + # UTF8_STRING + # Else, it returns a list of 8 or 32 bit numbers... + switch -glob $type { + STRING - UTF8_STRING - TEXT - COMPOUND_TEXT {return $data} + text/html { + if {[catch { + encoding convertfrom unicode $data + } string]} { + set string $data + } + return [string map {\r\n \n} $string] + } + text/html\;charset=utf-8 - + text/plain\;charset=utf-8 - + text/plain { + if {[catch { + encoding convertfrom utf-8 [tkdnd::bytes_to_string $data] + } string]} { + set string $data + } + return [string map {\r\n \n} $string] + } + text/uri-list* { + if {[catch { + encoding convertfrom utf-8 [tkdnd::bytes_to_string $data] + } string]} { + set string $data + } + ## Get rid of \r\n + set string [string trim [string map {\r\n \n} $string]] + set files {} + foreach quoted_file [split $string] { + set file [tkdnd::urn_unquote $quoted_file] + switch -glob $file { + \#* {} + file://* {lappend files [string range $file 7 end]} + ftp://* - + https://* - + http://* {lappend files $quoted_file} + default {lappend files $file} + } + } + return $files + } + application/x-color { + return $data + } + text/x-moz-url - + application/q-iconlist - + default {return $data} + } +}; # xdnd::normalise_data + +############################################################################# +## +## XDND drag implementation +## +############################################################################# + +# ---------------------------------------------------------------------------- +# Command xdnd::_selection_ownership_lost +# ---------------------------------------------------------------------------- +proc xdnd::_selection_ownership_lost {} { + variable _dragging + set _dragging 0 +};# _selection_ownership_lost + +# ---------------------------------------------------------------------------- +# Command xdnd::_dodragdrop +# ---------------------------------------------------------------------------- +proc xdnd::_dodragdrop { source actions types data button { cursor_map {} } } { + variable _dragging + + #DBG debug "xdnd::_dodragdrop: source: $source, actions: $actions, types: $types,\ + #DBG data: \"$data\", button: $button" + if {$_dragging} { + ## We are in the middle of another drag operation... + error "another drag operation in progress" + } + + variable _dodragdrop_drag_source $source + variable _dodragdrop_drop_target 0 + variable _dodragdrop_drop_target_proxy 0 + variable _dodragdrop_actions $actions + variable _dodragdrop_action_descriptions $actions + variable _dodragdrop_actions_len [llength $actions] + variable _dodragdrop_types $types + variable _dodragdrop_types_len [llength $types] + variable _dodragdrop_data $data + variable _dodragdrop_transfer_data {} + variable _dodragdrop_button $button + variable _dodragdrop_time 0 + variable _dodragdrop_default_action refuse_drop + variable _dodragdrop_waiting_status 0 + variable _dodragdrop_drop_target_accepts_drop 0 + variable _dodragdrop_drop_target_accepts_action refuse_drop + variable _dodragdrop_current_cursor $_dodragdrop_default_action + variable _dodragdrop_drop_occured 0 + variable _dodragdrop_selection_requestor 0 + variable _dodragdrop_cursor_map $cursor_map + + ## + ## If we have more than 3 types, the property XdndTypeList must be set on + ## the drag source widget... + ## + if {$_dodragdrop_types_len > 3} { + _announce_type_list $_dodragdrop_drag_source $_dodragdrop_types + } + + ## + ## Announce the actions & their descriptions on the XdndActionList & + ## XdndActionDescription properties... + ## + _announce_action_list $_dodragdrop_drag_source $_dodragdrop_actions \ + $_dodragdrop_action_descriptions + + ## + ## Arrange selection handlers for our drag source, and all the supported types + ## + #DBG debug "xdnd::_dodragdrop: registerSelectionHandler $source $types" + registerSelectionHandler $source $types + + ## + ## Step 1: When a drag begins, the source takes ownership of XdndSelection. + ## + #DBG debug "xdnd::_dodragdrop: selection own $source" + selection own -command ::tkdnd::xdnd::_selection_ownership_lost \ + -selection XdndSelection $source + set _dragging 1 + + ## Grab the mouse pointer... + #DBG debug "xdnd::_dodragdrop: _grab_pointer $source [_get_mapped_cursor $_dodragdrop_default_action]" + _grab_pointer $source [_get_mapped_cursor $_dodragdrop_default_action] + + ## Register our generic event handler... + # The generic event callback will report events by modifying variable + # ::xdnd::_dodragdrop_event: a dict with event information will be set as + # the value of the variable... + #DBG debug "xdnd::_dodragdrop: _register_generic_event_handler" + _register_generic_event_handler + + ## Set a timeout for debugging purposes... + # after 60000 {set ::tkdnd::xdnd::_dragging 0} + + #DBG debug "xdnd::_dodragdrop: waiting drag action to finish..." + tkwait variable ::tkdnd::xdnd::_dragging + #DBG debug "xdnd::_dodragdrop: drag action finished!" + _SendXdndLeave + + set _dragging 0 + #DBG debug "xdnd::_dodragdrop: _ungrab_pointer $source" + _ungrab_pointer $source + #DBG debug "xdnd::_dodragdrop: _unregister_generic_event_handler" + _unregister_generic_event_handler + catch {selection clear -selection XdndSelection} + #DBG debug "xdnd::_dodragdrop: unregisterSelectionHandler $source $types" + unregisterSelectionHandler $source $types + return $_dodragdrop_drop_target_accepts_action +};# xdnd::_dodragdrop + +# ---------------------------------------------------------------------------- +# Command xdnd::_process_drag_events +# ---------------------------------------------------------------------------- +proc xdnd::_process_drag_events {event} { + # The return value from proc is normally 0. A non-zero return value indicates + # that the event is not to be handled further; that is, proc has done all + # processing that is to be allowed for the event + variable _dragging + if {!$_dragging} {return 0} + #DBG debug "xdnd::_process_drag_events: $event" + + variable _dodragdrop_time + set time [dict get $event time] + set type [dict get $event type] + if {$time < $_dodragdrop_time && ![string equal $type SelectionRequest]} { + #DBG debug "xdnd::_process_drag_events: return 0 (1)" + return 0 + } + set _dodragdrop_time $time + + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_default_action + switch $type { + MotionNotify { + set rootx [dict get $event x_root] + set rooty [dict get $event y_root] + set window [_find_drop_target_window $_dodragdrop_drag_source \ + $rootx $rooty] + if {[string length $window]} { + ## Examine the modifiers to suggest an action... + set _dodragdrop_default_action [_default_action $event] + ## Is it a Tk widget? + #DBG set path [winfo containing $rootx $rooty] + #DBG debug "Window under mouse: $window ($path)" + if {$_dodragdrop_drop_target != $window} { + ## Send XdndLeave to $_dodragdrop_drop_target + _SendXdndLeave + ## Is there a proxy? If not, _find_drop_target_proxy returns the + ## target window, so we always get a valid "proxy". + set proxy [_find_drop_target_proxy $_dodragdrop_drag_source $window] + ## Send XdndEnter to $window + _SendXdndEnter $window $proxy + ## Send XdndPosition to $_dodragdrop_drop_target + _SendXdndPosition $rootx $rooty $_dodragdrop_default_action + } else { + ## Send XdndPosition to $_dodragdrop_drop_target + _SendXdndPosition $rootx $rooty $_dodragdrop_default_action + } + } else { + ## No window under the mouse. Send XdndLeave to $_dodragdrop_drop_target + _SendXdndLeave + } + } + ButtonPress { + } + ButtonRelease { + variable _dodragdrop_button + set button [dict get $event button] + if {$button == $_dodragdrop_button} { + ## The button that initiated the drag was released. Trigger drop... + #DBG debug "xdnd::_process_drag_events: _SendXdndDrop" + _SendXdndDrop + } + #DBG debug "xdnd::_process_drag_events: return 1 (2)" + # return 1 ;# Returning non-zero is not a good idea... + return 0 + } + KeyPress { + } + KeyRelease { + set keysym [dict get $event keysym] + switch $keysym { + Escape { + ## The user has pressed escape. Abort... + if {$_dragging} {set _dragging 0} + } + } + } + SelectionRequest { + variable _dodragdrop_selection_requestor + variable _dodragdrop_selection_property + variable _dodragdrop_selection_selection + variable _dodragdrop_selection_target + variable _dodragdrop_selection_time + set _dodragdrop_selection_requestor [dict get $event requestor] + set _dodragdrop_selection_property [dict get $event property] + set _dodragdrop_selection_selection [dict get $event selection] + set _dodragdrop_selection_target [dict get $event target] + set _dodragdrop_selection_time $time + #DBG debug "xdnd::_process_drag_events: return 0 (3)" + return 0 + } + default { + #DBG debug "xdnd::_process_drag_events: return 0 (4)" + return 0 + } + } + #DBG debug "xdnd::_process_drag_events: return 0 (5)" + return 0 +};# _process_drag_events + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndEnter +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndEnter {window proxy} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_types + variable _dodragdrop_waiting_status + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_target > 0} _SendXdndLeave + if {$_dodragdrop_drop_occured} return + set _dodragdrop_drop_target $window + set _dodragdrop_drop_target_proxy $proxy + set _dodragdrop_waiting_status 0 + if {$_dodragdrop_drop_target < 1} return + #DBG debug "XdndEnter: $_dodragdrop_drop_target $_dodragdrop_drop_target_proxy" + _send_XdndEnter $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy $_dodragdrop_types +};# xdnd::_SendXdndEnter + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndPosition +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndPosition {rootx rooty action} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + if {$_dodragdrop_drop_target < 1} return + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} return + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_waiting_status + ## Arrange a new XdndPosition, to be send periodically... + variable _dodragdrop_xdnd_position_heartbeat + catch {after cancel $_dodragdrop_xdnd_position_heartbeat} + set _dodragdrop_xdnd_position_heartbeat [after 200 \ + [list ::tkdnd::xdnd::_SendXdndPosition $rootx $rooty $action]] + if {$_dodragdrop_waiting_status} {return} + #DBG debug "XdndPosition: $_dodragdrop_drop_target $rootx $rooty $action" + _send_XdndPosition $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy $rootx $rooty $action + set _dodragdrop_waiting_status 1 +};# xdnd::_SendXdndPosition + +# ---------------------------------------------------------------------------- +# Command xdnd::_HandleXdndStatus +# ---------------------------------------------------------------------------- +proc xdnd::_HandleXdndStatus {event} { + variable _dodragdrop_drop_target + variable _dodragdrop_waiting_status + + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + set _dodragdrop_waiting_status 0 + foreach key {target accept want_position action x y w h} { + set $key [dict get $event $key] + } + set _dodragdrop_drop_target_accepts_drop $accept + set _dodragdrop_drop_target_accepts_action $action + if {$_dodragdrop_drop_target < 1} return + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} return + _update_cursor + #DBG debug "XdndStatus: $event" +};# xdnd::_HandleXdndStatus + +# ---------------------------------------------------------------------------- +# Command xdnd::_HandleXdndFinished +# ---------------------------------------------------------------------------- +proc xdnd::_HandleXdndFinished {event} { + variable _dodragdrop_xdnd_finished_event_after_id + catch {after cancel $_dodragdrop_xdnd_finished_event_after_id} + set _dodragdrop_xdnd_finished_event_after_id {} + variable _dodragdrop_drop_target + set _dodragdrop_drop_target 0 + variable _dragging + if {$_dragging} {set _dragging 0} + + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + if {[dict size $event]} { + foreach key {target accept action} { + set $key [dict get $event $key] + } + set _dodragdrop_drop_target_accepts_drop $accept + set _dodragdrop_drop_target_accepts_action $action + } else { + set _dodragdrop_drop_target_accepts_drop 0 + } + if {!$_dodragdrop_drop_target_accepts_drop} { + set _dodragdrop_drop_target_accepts_action refuse_drop + } + #DBG debug "XdndFinished: $event" +};# xdnd::_HandleXdndFinished + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndLeave +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndLeave {} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + if {$_dodragdrop_drop_target < 1} return + variable _dodragdrop_drop_target_proxy + #DBG debug "XdndLeave: $_dodragdrop_drop_target" + _send_XdndLeave $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy + set _dodragdrop_drop_target 0 + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + set _dodragdrop_drop_target_accepts_drop 0 + set _dodragdrop_drop_target_accepts_action refuse_drop + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} return + _update_cursor +};# xdnd::_SendXdndLeave + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndDrop +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndDrop {} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + if {$_dodragdrop_drop_target < 1} { + ## The mouse has been released over a widget that does not accept drops. + _HandleXdndFinished {} + return + } + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} {return} + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + + set _dodragdrop_drop_occured 1 + _update_cursor clock + + if {!$_dodragdrop_drop_target_accepts_drop} { + _SendXdndLeave + _HandleXdndFinished {} + return + } + #DBG debug "XdndDrop: $_dodragdrop_drop_target" + variable _dodragdrop_drop_timestamp + set _dodragdrop_drop_timestamp [_send_XdndDrop \ + $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy] + set _dodragdrop_drop_target 0 + #DBG debug "XdndDrop: $_dodragdrop_drop_target" + ## Arrange a timeout for receiving XdndFinished... + variable _dodragdrop_xdnd_finished_event_after_id + set _dodragdrop_xdnd_finished_event_after_id \ + [after 10000 [list ::tkdnd::xdnd::_HandleXdndFinished {}]] +};# xdnd::_SendXdndDrop + +# ---------------------------------------------------------------------------- +# Command xdnd::_update_cursor +# ---------------------------------------------------------------------------- +proc xdnd::_update_cursor { {cursor {}}} { + #DBG debug "_update_cursor $cursor" + variable _dodragdrop_current_cursor + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + + if {![string length $cursor]} { + set cursor refuse_drop + if {$_dodragdrop_drop_target_accepts_drop} { + set cursor $_dodragdrop_drop_target_accepts_action + } + } + if {![string equal $cursor $_dodragdrop_current_cursor]} { + _set_pointer_cursor $_dodragdrop_drag_source [_get_mapped_cursor $cursor] + set _dodragdrop_current_cursor $cursor + } +};# xdnd::_update_cursor + +# ---------------------------------------------------------------------------- +# Command xdnd::_get_mapped_cursor +# ---------------------------------------------------------------------------- +proc xdnd::_get_mapped_cursor { cursor } { + variable _dodragdrop_cursor_map + variable _dodragdrop_drag_source + ## Is there a custom cursor map? + if {[catch {dict get $_dodragdrop_cursor_map $cursor} mapped]} { + ## Do not report the error, ignore the mapping. + set mapped $cursor + } + ## Is there a cursor feedback command? + set cmd [bind $_dodragdrop_drag_source <>] + if {$cmd ne ""} { + set code [catch {uplevel \#0 $cmd \{$_dodragdrop_drag_source\} \{$cursor\} \{$mapped\}} info options] + #DBG debug "CODE: $code ---- $info" + switch -exact -- $code { + 0 {if {$info ne ""} {set mapped $info}} + default { + return -options $options $info + } + } + } + return $mapped +};# xdnd::_get_mapped_cursor + +# ---------------------------------------------------------------------------- +# Command xdnd::_default_action +# ---------------------------------------------------------------------------- +proc xdnd::_default_action {event} { + variable _dodragdrop_actions + variable _dodragdrop_actions_len + if {$_dodragdrop_actions_len == 1} {return [lindex $_dodragdrop_actions 0]} + + set alt [dict get $event Alt] + set shift [dict get $event Shift] + set control [dict get $event Control] + + if {$shift && $control && [lsearch $_dodragdrop_actions link] != -1} { + return link + } elseif {$control && [lsearch $_dodragdrop_actions copy] != -1} { + return copy + } elseif {$shift && [lsearch $_dodragdrop_actions move] != -1} { + return move + } elseif {$alt && [lsearch $_dodragdrop_actions link] != -1} { + return link + } + return default +};# xdnd::_default_action + +# ---------------------------------------------------------------------------- +# Command xdnd::getFormatForType +# ---------------------------------------------------------------------------- +proc xdnd::getFormatForType {type} { + switch -glob [string tolower $type] { + text/plain\;charset=utf-8 - + text/html\;charset=utf-8 - + utf8_string {set format UTF8_STRING} + text/html - + text/plain - + string - + text - + compound_text {set format STRING} + text/uri-list* {set format UTF8_STRING} + application/x-color {set format $type} + default {set format $type} + } + return $format +};# xdnd::getFormatForType + +# ---------------------------------------------------------------------------- +# Command xdnd::registerSelectionHandler +# ---------------------------------------------------------------------------- +proc xdnd::registerSelectionHandler {source types} { + foreach type $types { + selection handle -selection XdndSelection \ + -type $type \ + -format [getFormatForType $type] \ + $source [list ::tkdnd::xdnd::_SendData $type] + } +};# xdnd::registerSelectionHandler + +# ---------------------------------------------------------------------------- +# Command xdnd::unregisterSelectionHandler +# ---------------------------------------------------------------------------- +proc xdnd::unregisterSelectionHandler {source types} { + foreach type $types { + catch { + selection handle -selection XdndSelection \ + -type $type \ + -format [getFormatForType $type] \ + $source {} + } + } +};# xdnd::unregisterSelectionHandler + +# ---------------------------------------------------------------------------- +# Command xdnd::_convert_to_unsigned +# ---------------------------------------------------------------------------- +proc xdnd::_convert_to_unsigned {data format} { + switch $format { + 8 { set mask 0xff } + 16 { set mask 0xffff } + 32 { set mask 0xffffff } + default {error "unsupported format $format"} + } + ## Convert signed integer into unsigned... + set d [list] + foreach num $data { + lappend d [expr { $num & $mask }] + } + return $d +};# xdnd::_convert_to_unsigned + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendData +# ---------------------------------------------------------------------------- +proc xdnd::_SendData {type offset bytes args} { + variable _dodragdrop_drag_source + variable _dodragdrop_types + variable _dodragdrop_data + variable _dodragdrop_transfer_data + + ## The variable _dodragdrop_data contains a list of data, one for each + ## type in the _dodragdrop_types variable. We have to search types, and find + ## the corresponding entry in the _dodragdrop_data list. + set index [lsearch $_dodragdrop_types $type] + if {$index < 0} { + error "unable to locate data suitable for type \"$type\"" + } + set typed_data [lindex $_dodragdrop_data $index] + set format 8 + if {$offset == 0} { + ## Prepare the data to be transferred... + switch -glob $type { + text/plain* - UTF8_STRING - STRING - TEXT - COMPOUND_TEXT { + binary scan [encoding convertto utf-8 $typed_data] \ + c* _dodragdrop_transfer_data + set _dodragdrop_transfer_data \ + [_convert_to_unsigned $_dodragdrop_transfer_data $format] + } + text/uri-list* { + set files [list] + foreach file $typed_data { + switch -glob $file { + *://* {lappend files $file} + default {lappend files file://$file} + } + } + binary scan [encoding convertto utf-8 "[join $files \r\n]\r\n"] \ + c* _dodragdrop_transfer_data + set _dodragdrop_transfer_data \ + [_convert_to_unsigned $_dodragdrop_transfer_data $format] + } + application/x-color { + set format 16 + ## Try to understand the provided data: we accept a standard Tk colour, + ## or a list of 3 values (red green blue) or a list of 4 values + ## (red green blue opacity). + switch [llength $typed_data] { + 1 { set color [winfo rgb $_dodragdrop_drag_source $typed_data] + lappend color 65535 } + 3 { set color $typed_data; lappend color 65535 } + 4 { set color $typed_data } + default {error "unknown color data: \"$typed_data\""} + } + ## Convert the 4 elements into 16 bit values... + set _dodragdrop_transfer_data [list] + foreach c $color { + lappend _dodragdrop_transfer_data [format 0x%04X $c] + } + } + default { + set format 32 + binary scan $typed_data c* _dodragdrop_transfer_data + } + } + } + + ## + ## Data has been split into bytes. Count the bytes requested, and return them + ## + set data [lrange $_dodragdrop_transfer_data $offset [expr {$offset+$bytes-1}]] + switch $format { + 8 { + set data [encoding convertfrom utf-8 [binary format c* $data]] + } + 16 { + variable _dodragdrop_selection_requestor + if {$_dodragdrop_selection_requestor} { + ## Tk selection cannot process this format (only 8 & 32 supported). + ## Call our XChangeProperty... + set numItems [llength $data] + variable _dodragdrop_selection_property + variable _dodragdrop_selection_selection + variable _dodragdrop_selection_target + variable _dodragdrop_selection_time + XChangeProperty $_dodragdrop_drag_source \ + $_dodragdrop_selection_requestor \ + $_dodragdrop_selection_property \ + $_dodragdrop_selection_target \ + $format \ + $_dodragdrop_selection_time \ + $data $numItems + return -code break + } + } + 32 { + } + default { + error "unsupported format $format" + } + } + #DBG debug "SendData: $type $offset $bytes $args ($typed_data)" + #DBG debug " $data" + return $data +};# xdnd::_SendData diff --git a/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_utils.tcl b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_utils.tcl new file mode 100644 index 0000000000000000000000000000000000000000..ef1e50266805dae1fe37fa22f1572963387938c8 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_utils.tcl @@ -0,0 +1,256 @@ +# +# tkdnd_utils.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +package require tkdnd +namespace eval ::tkdnd { + namespace eval utils { + };# namespace ::tkdnd::utils + namespace eval text { + variable _drag_tag tkdnd::drag::selection::tag + variable _state {} + variable _drag_source_widget {} + variable _drop_target_widget {} + variable _now_dragging 0 + };# namespace ::tkdnd::text +};# namespace ::tkdnd + +bind TkDND_Drag_Text1 {tkdnd::text::_begin_drag clear 1 %W %s %X %Y %x %y} +bind TkDND_Drag_Text1 {tkdnd::text::_begin_drag motion 1 %W %s %X %Y %x %y} +bind TkDND_Drag_Text1 {tkdnd::text::_TextAutoScan %W %x %y} +bind TkDND_Drag_Text1 {tkdnd::text::_begin_drag reset 1 %W %s %X %Y %x %y} +bind TkDND_Drag_Text2 {tkdnd::text::_begin_drag clear 2 %W %s %X %Y %x %y} +bind TkDND_Drag_Text2 {tkdnd::text::_begin_drag motion 2 %W %s %X %Y %x %y} +bind TkDND_Drag_Text2 {tkdnd::text::_begin_drag reset 2 %W %s %X %Y %x %y} +bind TkDND_Drag_Text3 {tkdnd::text::_begin_drag clear 3 %W %s %X %Y %x %y} +bind TkDND_Drag_Text3 {tkdnd::text::_begin_drag motion 3 %W %s %X %Y %x %y} +bind TkDND_Drag_Text3 {tkdnd::text::_begin_drag reset 3 %W %s %X %Y %x %y} + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::drag_source +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::drag_source { mode path { types DND_Text } { event 1 } { tagprefix TkDND_Drag_Text } { tag sel } } { + switch -exact -- $mode { + register { + $path tag bind $tag \ + [list tkdnd::text::_begin_drag press ${event} %W %s %X %Y %x %y] + ## Set a binding to the widget, to put selection as data... + bind $path <> \ + [list ::tkdnd::text::DragInitCmd $path %t $tag] + ## Set a binding to the widget, to remove selection if action is move... + bind $path <> \ + [list ::tkdnd::text::DragEndCmd $path %A $tag] + } + unregister { + $path tag bind $tag {} + bind $path <> {} + bind $path <> {} + } + } + ::tkdnd::drag_source $mode $path $types $event $tagprefix +};# ::tkdnd::text::drag_source + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::drop_target +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::drop_target { mode path { types DND_Text } } { + switch -exact -- $mode { + register { + bind $path <> \ + [list ::tkdnd::text::DropPosition $path %X %Y %A %a %m] + bind $path <> \ + [list ::tkdnd::text::Drop $path %D %X %Y %A %a %m] + } + unregister { + bind $path <> {} + bind $path <> {} + bind $path <> {} + bind $path <> {} + } + } + ::tkdnd::drop_target $mode $path $types +};# ::tkdnd::text::drop_target + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::DragInitCmd +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::DragInitCmd { path { types DND_Text } { tag sel } { actions { copy move } } } { + ## Save the selection indices... + variable _drag_source_widget + variable _drop_target_widget + set _drag_source_widget $path + set _drop_target_widget {} + _save_selection $path $tag + list $actions $types [$path get $tag.first $tag.last] +};# ::tkdnd::text::DragInitCmd + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::DragEndCmd +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::DragEndCmd { path action { tag sel } } { + variable _drag_source_widget + variable _drop_target_widget + set _drag_source_widget {} + set _drop_target_widget {} + _restore_selection $path $tag + switch -exact -- $action { + move { + ## Delete the original selected text... + variable _selection_first + variable _selection_last + $path delete $_selection_first $_selection_last + } + } +};# ::tkdnd::text::DragEndCmd + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::DropPosition +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::DropPosition { path X Y action actions keys} { + variable _drag_source_widget + variable _drop_target_widget + set _drop_target_widget $path + ## This check is primitive, a more accurate one is needed! + if {$path eq $_drag_source_widget} { + ## This is a drag within the same widget! Set action to move... + if {"move" in $actions} {set action move} + } + incr X -[winfo rootx $path] + incr Y -[winfo rooty $path] + $path mark set insert @$X,$Y; update + return $action +};# ::tkdnd::text::DropPosition + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::Drop +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::Drop { path data X Y action actions keys } { + incr X -[winfo rootx $path] + incr Y -[winfo rooty $path] + $path mark set insert @$X,$Y + $path insert [$path index insert] $data + return $action +};# ::tkdnd::text::Drop + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::_save_selection +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::_save_selection { path tag} { + variable _drag_tag + variable _selection_first + variable _selection_last + variable _selection_tag $tag + set _selection_first [$path index $tag.first] + set _selection_last [$path index $tag.last] + $path tag add $_drag_tag $_selection_first $_selection_last + $path tag configure $_drag_tag \ + -background [$path tag cget $tag -background] \ + -foreground [$path tag cget $tag -foreground] +};# tkdnd::text::_save_selection + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::_restore_selection +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::_restore_selection { path tag} { + variable _drag_tag + variable _selection_first + variable _selection_last + $path tag delete $_drag_tag + $path tag remove $tag 0.0 end + #$path tag add $tag $_selection_first $_selection_last +};# tkdnd::text::_restore_selection + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::_begin_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::_begin_drag { event button source state X Y x y } { + variable _drop_target_widget + variable _state + # puts "::tkdnd::text::_begin_drag $event $button $source $state $X $Y $x $y" + + switch -exact -- $event { + clear { + switch -exact -- $_state { + press { + ## Do not execute other bindings, as they will erase selection... + return -code break + } + } + set _state clear + } + motion { + variable _now_dragging + if {$_now_dragging} {return -code break} + if { [string equal $_state "press"] } { + variable _x0; variable _y0 + if { abs($_x0-$X) > ${::tkdnd::_dx} || abs($_y0-$Y) > ${::tkdnd::_dy} } { + set _state "done" + set _drop_target_widget {} + set _now_dragging 1 + set code [catch { + ::tkdnd::_init_drag $button $source $state $X $Y $x $y + } info options] + set _drop_target_widget {} + set _now_dragging 0 + if {$code != 0} { + ## Something strange occurred... + return -options $options $info + } + } + return -code break + } + set _state clear + } + press { + variable _x0; variable _y0 + set _x0 $X + set _y0 $Y + set _state "press" + } + reset { + set _state {} + } + } + if {$source eq $_drop_target_widget} {return -code break} + return -code continue +};# tkdnd::text::_begin_drag + +proc ::tkdnd::text::_TextAutoScan {w x y} { + variable _now_dragging + if {$_now_dragging} {return -code break} + return -code continue +};# tkdnd::text::_TextAutoScan diff --git a/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_windows.tcl b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_windows.tcl new file mode 100644 index 0000000000000000000000000000000000000000..a1d01f3a2c438eaf3f676437d4d4ba89b3ba64f0 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/osx_arm/tkdnd_windows.tcl @@ -0,0 +1,167 @@ +# +# tkdnd_windows.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval olednd { + + proc initialise { } { + ## Mapping from platform types to TkDND types... + ::tkdnd::generic::initialise_platform_to_tkdnd_types [list \ + CF_UNICODETEXT DND_Text \ + CF_TEXT DND_Text \ + CF_HDROP DND_Files \ + UniformResourceLocator DND_URL \ + CF_HTML DND_HTML \ + {HTML Format} DND_HTML \ + CF_RTF DND_RTF \ + CF_RTFTEXT DND_RTF \ + {Rich Text Format} DND_RTF \ + ] + # FileGroupDescriptorW DND_Files \ + # FileGroupDescriptor DND_Files \ + + ## Mapping from TkDND types to platform types... + ::tkdnd::generic::initialise_tkdnd_to_platform_types [list \ + DND_Text {CF_UNICODETEXT CF_TEXT} \ + DND_Files {CF_HDROP} \ + DND_URL {UniformResourceLocator UniformResourceLocatorW} \ + DND_HTML {CF_HTML {HTML Format}} \ + DND_RTF {CF_RTF CF_RTFTEXT {Rich Text Format}} \ + ] + };# initialise + +};# namespace olednd + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDragEnter +# ---------------------------------------------------------------------------- +proc olednd::HandleDragEnter { drop_target typelist actionlist pressedkeys + rootX rootY codelist { data {} } } { + ::tkdnd::generic::SetDroppedData $data + focus $drop_target + ::tkdnd::generic::HandleEnter $drop_target 0 $typelist \ + $codelist $actionlist $pressedkeys + set action [::tkdnd::generic::HandlePosition $drop_target {} \ + $pressedkeys $rootX $rootY] + if {$::tkdnd::_auto_update} {update idletasks} + return $action +};# olednd::HandleDragEnter + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDragOver +# ---------------------------------------------------------------------------- +proc olednd::HandleDragOver { drop_target pressedkeys rootX rootY } { + set action [::tkdnd::generic::HandlePosition $drop_target {} \ + $pressedkeys $rootX $rootY] + if {$::tkdnd::_auto_update} {update idletasks} + return $action +};# olednd::HandleDragOver + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDragLeave +# ---------------------------------------------------------------------------- +proc olednd::HandleDragLeave { drop_target } { + ::tkdnd::generic::HandleLeave + if {$::tkdnd::_auto_update} {update idletasks} +};# olednd::HandleDragLeave + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDrop +# ---------------------------------------------------------------------------- +proc olednd::HandleDrop { drop_target pressedkeys rootX rootY type data } { + ::tkdnd::generic::SetDroppedData [normalise_data $type $data] + set action [::tkdnd::generic::HandleDrop $drop_target {} \ + $pressedkeys $rootX $rootY 0] + if {$::tkdnd::_auto_update} {update idletasks} + return $action +};# olednd::HandleDrop + +# ---------------------------------------------------------------------------- +# Command olednd::GetDataType +# ---------------------------------------------------------------------------- +proc olednd::GetDataType { drop_target typelist } { + foreach {drop_target common_drag_source_types common_drop_target_types} \ + [::tkdnd::generic::FindWindowWithCommonTypes $drop_target $typelist] {break} + lindex $common_drag_source_types 0 +};# olednd::GetDataType + +# ---------------------------------------------------------------------------- +# Command olednd::GetDragSourceCommonTypes +# ---------------------------------------------------------------------------- +proc olednd::GetDragSourceCommonTypes { drop_target } { + ::tkdnd::generic::GetDragSourceCommonTypes +};# olednd::GetDragSourceCommonTypes + +# ---------------------------------------------------------------------------- +# Command olednd::platform_specific_types +# ---------------------------------------------------------------------------- +proc olednd::platform_specific_types { types } { + ::tkdnd::generic::platform_specific_types $types +}; # olednd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command olednd::platform_specific_type +# ---------------------------------------------------------------------------- +proc olednd::platform_specific_type { type } { + ::tkdnd::generic::platform_specific_type $type +}; # olednd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + ::tkdnd::generic::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command olednd::platform_independent_type +# ---------------------------------------------------------------------------- +proc olednd::platform_independent_type { type } { + ::tkdnd::generic::platform_independent_type $type +}; # olednd::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command olednd::normalise_data +# ---------------------------------------------------------------------------- +proc olednd::normalise_data { type data } { + switch [lindex [::tkdnd::generic::platform_independent_type $type] 0] { + DND_Text {return $data} + DND_Files {return $data} + DND_HTML {return [encoding convertfrom utf-8 $data]} + default {return $data} + } +}; # olednd::normalise_data diff --git a/gui_data/tkinterdnd2/tkdnd/win64/libtkdnd2.9.2.dll b/gui_data/tkinterdnd2/tkdnd/win64/libtkdnd2.9.2.dll new file mode 100644 index 0000000000000000000000000000000000000000..c9cc5abbed923f273a5d22ca7590e45e771b11b5 Binary files /dev/null and b/gui_data/tkinterdnd2/tkdnd/win64/libtkdnd2.9.2.dll differ diff --git a/gui_data/tkinterdnd2/tkdnd/win64/pkgIndex.tcl b/gui_data/tkinterdnd2/tkdnd/win64/pkgIndex.tcl new file mode 100644 index 0000000000000000000000000000000000000000..733ae7d328e59ecd3e70d7a421e4277a29884723 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/win64/pkgIndex.tcl @@ -0,0 +1,7 @@ +package ifneeded tkdnd 2.9.2 \ + "source \{$dir/tkdnd.tcl\} ; \ + tkdnd::initialise \{$dir\} libtkdnd2.9.2[info sharedlibextension] tkdnd" + +package ifneeded tkdnd::utils 2.9.2 \ + "source \{$dir/tkdnd_utils.tcl\} ; \ + package provide tkdnd::utils 2.9.2" \ No newline at end of file diff --git a/gui_data/tkinterdnd2/tkdnd/win64/tkdnd.tcl b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd.tcl new file mode 100644 index 0000000000000000000000000000000000000000..12d1dd289de6b78e83922a1b1653ef6165dc70db --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd.tcl @@ -0,0 +1,469 @@ +# +# tkdnd.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +package require Tk + +namespace eval ::tkdnd { + variable _topw ".drag" + variable _tabops + variable _state + variable _x0 + variable _y0 + variable _platform_namespace + variable _drop_file_temp_dir + variable _auto_update 1 + variable _dx 3 ;# The difference in pixels before a drag is initiated. + variable _dy 3 ;# The difference in pixels before a drag is initiated. + + variable _windowingsystem + + bind TkDND_Drag1 {tkdnd::_begin_drag press 1 %W %s %X %Y %x %y} + bind TkDND_Drag1 {tkdnd::_begin_drag motion 1 %W %s %X %Y %x %y} + bind TkDND_Drag2 {tkdnd::_begin_drag press 2 %W %s %X %Y %x %y} + bind TkDND_Drag2 {tkdnd::_begin_drag motion 2 %W %s %X %Y %x %y} + bind TkDND_Drag3 {tkdnd::_begin_drag press 3 %W %s %X %Y %x %y} + bind TkDND_Drag3 {tkdnd::_begin_drag motion 3 %W %s %X %Y %x %y} + + # ---------------------------------------------------------------------------- + # Command tkdnd::initialise: Initialise the TkDND package. + # ---------------------------------------------------------------------------- + proc initialise { dir PKG_LIB_FILE PACKAGE_NAME} { + variable _platform_namespace + variable _drop_file_temp_dir + variable _windowingsystem + global env + + switch [tk windowingsystem] { + x11 { + set _windowingsystem x11 + } + win32 - + windows { + set _windowingsystem windows + } + aqua { + set _windowingsystem aqua + } + default { + error "unknown Tk windowing system" + } + } + + ## Get User's home directory: We try to locate the proper path from a set of + ## environmental variables... + foreach var {HOME HOMEPATH USERPROFILE ALLUSERSPROFILE APPDATA} { + if {[info exists env($var)]} { + if {[file isdirectory $env($var)]} { + set UserHomeDir $env($var) + break + } + } + } + + ## Should use [tk windowingsystem] instead of tcl platform array: + ## OS X returns "unix," but that's not useful because it has its own + ## windowing system, aqua + ## Under windows we have to also combine HOMEDRIVE & HOMEPATH... + if {![info exists UserHomeDir] && + [string equal $_windowingsystem windows] && + [info exists env(HOMEDRIVE)] && [info exists env(HOMEPATH)]} { + if {[file isdirectory $env(HOMEDRIVE)$env(HOMEPATH)]} { + set UserHomeDir $env(HOMEDRIVE)$env(HOMEPATH) + } + } + ## Have we located the needed path? + if {![info exists UserHomeDir]} { + set UserHomeDir [pwd] + } + set UserHomeDir [file normalize $UserHomeDir] + + ## Try to locate a temporary directory... + foreach var {TKDND_TEMP_DIR TEMP TMP} { + if {[info exists env($var)]} { + if {[file isdirectory $env($var)] && [file writable $env($var)]} { + set _drop_file_temp_dir $env($var) + break + } + } + } + if {![info exists _drop_file_temp_dir]} { + foreach _dir [list "$UserHomeDir/Local Settings/Temp" \ + "$UserHomeDir/AppData/Local/Temp" \ + /tmp \ + C:/WINDOWS/Temp C:/Temp C:/tmp \ + D:/WINDOWS/Temp D:/Temp D:/tmp] { + if {[file isdirectory $_dir] && [file writable $_dir]} { + set _drop_file_temp_dir $_dir + break + } + } + } + if {![info exists _drop_file_temp_dir]} { + set _drop_file_temp_dir $UserHomeDir + } + set _drop_file_temp_dir [file native $_drop_file_temp_dir] + + source $dir/tkdnd_generic.tcl + switch $_windowingsystem { + x11 { + source $dir/tkdnd_unix.tcl + set _platform_namespace xdnd + } + win32 - + windows { + source $dir/tkdnd_windows.tcl + set _platform_namespace olednd + } + aqua { + source $dir/tkdnd_macosx.tcl + set _platform_namespace macdnd + } + default { + error "unknown Tk windowing system" + } + } + load $dir/$PKG_LIB_FILE $PACKAGE_NAME + source $dir/tkdnd_compat.tcl + ${_platform_namespace}::initialise + };# initialise + + proc GetDropFileTempDirectory { } { + variable _drop_file_temp_dir + return $_drop_file_temp_dir + } + proc SetDropFileTempDirectory { dir } { + variable _drop_file_temp_dir + set _drop_file_temp_dir $dir + } + +};# namespace ::tkdnd + +# ---------------------------------------------------------------------------- +# Command tkdnd::drag_source +# ---------------------------------------------------------------------------- +proc ::tkdnd::drag_source { mode path { types {} } { event 1 } + { tagprefix TkDND_Drag } } { + set tags [bindtags $path] + set idx [lsearch $tags ${tagprefix}$event] + switch -- $mode { + register { + if { $idx != -1 } { + ## No need to do anything! + # bindtags $path [lreplace $tags $idx $idx ${tagprefix}$event] + } else { + bindtags $path [linsert $tags 1 ${tagprefix}$event] + } + _drag_source_update_types $path $types + } + unregister { + if { $idx != -1 } { + bindtags $path [lreplace $tags $idx $idx] + } + } + } +};# tkdnd::drag_source + +proc ::tkdnd::_drag_source_update_types { path types } { + set types [platform_specific_types $types] + set old_types [bind $path <>] + foreach type $types { + if {[lsearch $old_types $type] < 0} {lappend old_types $type} + } + bind $path <> $old_types +};# ::tkdnd::_drag_source_update_types + +# ---------------------------------------------------------------------------- +# Command tkdnd::drop_target +# ---------------------------------------------------------------------------- +proc ::tkdnd::drop_target { mode path { types {} } } { + variable _windowingsystem + set types [platform_specific_types $types] + switch -- $mode { + register { + switch $_windowingsystem { + x11 { + _register_types $path [winfo toplevel $path] $types + } + win32 - + windows { + _RegisterDragDrop $path + bind $path {+ tkdnd::_RevokeDragDrop %W} + } + aqua { + macdnd::registerdragwidget [winfo toplevel $path] $types + } + default { + error "unknown Tk windowing system" + } + } + set old_types [bind $path <>] + set new_types {} + foreach type $types { + if {[lsearch -exact $old_types $type] < 0} {lappend new_types $type} + } + if {[llength $new_types]} { + bind $path <> [concat $old_types $new_types] + } + } + unregister { + switch $_windowingsystem { + x11 { + } + win32 - + windows { + _RevokeDragDrop $path + } + aqua { + error todo + } + default { + error "unknown Tk windowing system" + } + } + bind $path <> {} + } + } +};# tkdnd::drop_target + +# ---------------------------------------------------------------------------- +# Command tkdnd::_begin_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::_begin_drag { event button source state X Y x y } { + variable _x0 + variable _y0 + variable _state + + switch -- $event { + press { + set _x0 $X + set _y0 $Y + set _state "press" + } + motion { + if { ![info exists _state] } { + # This is just extra protection. There seem to be + # rare cases where the motion comes before the press. + return + } + if { [string equal $_state "press"] } { + variable _dx + variable _dy + if { abs($_x0-$X) > ${_dx} || abs($_y0-$Y) > ${_dy} } { + set _state "done" + _init_drag $button $source $state $X $Y $x $y + } + } + } + } +};# tkdnd::_begin_drag + +# ---------------------------------------------------------------------------- +# Command tkdnd::_init_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::_init_drag { button source state rootX rootY X Y } { + # Call the <> binding. + set cmd [bind $source <>] + # puts "CMD: $cmd" + if {[string length $cmd]} { + set cmd [string map [list %W $source %X $rootX %Y $rootY %x $X %y $Y \ + %S $state %e <> %A \{\} %% % \ + %t [bind $source <>]] $cmd] + set code [catch {uplevel \#0 $cmd} info options] + # puts "CODE: $code ---- $info" + switch -exact -- $code { + 0 {} + 3 - 4 { + # FRINK: nocheck + return + } + default { + return -options $options $info + } + } + + set len [llength $info] + if {$len == 3} { + foreach { actions types _data } $info { break } + set types [platform_specific_types $types] + set data [list] + foreach type $types { + lappend data $_data + } + unset _data + } elseif {$len == 2} { + foreach { actions _data } $info { break } + set data [list]; set types [list] + foreach {t d} $_data { + foreach t [platform_specific_types $t] { + lappend types $t; lappend data $d + } + } + unset _data t d + } else { + if {$len == 1 && [string equal [lindex $actions 0] "refuse_drop"]} { + return + } + error "not enough items in the result of the <>\ + event binding. Either 2 or 3 items are expected. The command + executed was: \"$cmd\"\nResult was: \"$info\"" + } + set action refuse_drop + variable _windowingsystem + # puts "Source: \"$source\"" + # puts "Types: \"[join $types {", "}]\"" + # puts "Actions: \"[join $actions {", "}]\"" + # puts "Button: \"$button\"" + # puts "Data: \"[string range $data 0 100]\"" + switch $_windowingsystem { + x11 { + set action [xdnd::_dodragdrop $source $actions $types $data $button] + } + win32 - + windows { + set action [_DoDragDrop $source $actions $types $data $button] + } + aqua { + set action [macdnd::dodragdrop $source $actions $types $data $button] + } + default { + error "unknown Tk windowing system" + } + } + ## Call _end_drag to notify the widget of the result of the drag + ## operation... + _end_drag $button $source {} $action {} $data {} $state $rootX $rootY $X $Y + } +};# tkdnd::_init_drag + +# ---------------------------------------------------------------------------- +# Command tkdnd::_end_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::_end_drag { button source target action type data result + state rootX rootY X Y } { + set rootX 0 + set rootY 0 + # Call the <> binding. + set cmd [bind $source <>] + if {[string length $cmd]} { + set cmd [string map [list %W $source %X $rootX %Y $rootY %x $X %y $Y %% % \ + %S $state %e <> %A \{$action\}] $cmd] + set info [uplevel \#0 $cmd] + # if { $info != "" } { + # variable _windowingsystem + # foreach { actions types data } $info { break } + # set types [platform_specific_types $types] + # switch $_windowingsystem { + # x11 { + # error "dragging from Tk widgets not yet supported" + # } + # win32 - + # windows { + # set action [_DoDragDrop $source $actions $types $data $button] + # } + # aqua { + # macdnd::dodragdrop $source $actions $types $data + # } + # default { + # error "unknown Tk windowing system" + # } + # } + # ## Call _end_drag to notify the widget of the result of the drag + # ## operation... + # _end_drag $button $source {} $action {} $data {} $state $rootX $rootY + # } + } +};# tkdnd::_end_drag + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_specific_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_specific_types { types } { + variable _platform_namespace + ${_platform_namespace}::platform_specific_types $types +}; # tkdnd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + variable _platform_namespace + ${_platform_namespace}::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_specific_type +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_specific_type { type } { + variable _platform_namespace + ${_platform_namespace}::platform_specific_type $type +}; # tkdnd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_type +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_type { type } { + variable _platform_namespace + ${_platform_namespace}::platform_independent_type $type +}; # tkdnd::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::bytes_to_string +# ---------------------------------------------------------------------------- +proc ::tkdnd::bytes_to_string { bytes } { + set string {} + foreach byte $bytes { + append string [binary format c $byte] + } + return $string +};# tkdnd::bytes_to_string + +# ---------------------------------------------------------------------------- +# Command tkdnd::urn_unquote +# ---------------------------------------------------------------------------- +proc ::tkdnd::urn_unquote {url} { + set result "" + set start 0 + while {[regexp -start $start -indices {%[0-9a-fA-F]{2}} $url match]} { + foreach {first last} $match break + append result [string range $url $start [expr {$first - 1}]] + append result [format %c 0x[string range $url [incr first] $last]] + set start [incr last] + } + append result [string range $url $start end] + return [encoding convertfrom utf-8 $result] +};# tkdnd::urn_unquote diff --git a/gui_data/tkinterdnd2/tkdnd/win64/tkdnd2.9.2.lib b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd2.9.2.lib new file mode 100644 index 0000000000000000000000000000000000000000..c5a956bfdf01319ce0574a1ae86e13454ce22a5f Binary files /dev/null and b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd2.9.2.lib differ diff --git a/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_compat.tcl b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_compat.tcl new file mode 100644 index 0000000000000000000000000000000000000000..efc96f7bb2fe74a9bafd1e79681c275c8ea0f8fc --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_compat.tcl @@ -0,0 +1,160 @@ +# +# tkdnd_compat.tcl -- +# +# This file implements some utility procedures, to support older versions +# of the TkDND package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval compat { + +};# namespace compat + +# ---------------------------------------------------------------------------- +# Command ::dnd +# ---------------------------------------------------------------------------- +proc ::dnd {method window args} { + switch $method { + bindtarget { + switch [llength $args] { + 0 {return [tkdnd::compat::bindtarget0 $window]} + 1 {return [tkdnd::compat::bindtarget1 $window [lindex $args 0]]} + 2 {return [tkdnd::compat::bindtarget2 $window [lindex $args 0] \ + [lindex $args 1]]} + 3 {return [tkdnd::compat::bindtarget3 $window [lindex $args 0] \ + [lindex $args 1] [lindex $args 2]]} + 4 {return [tkdnd::compat::bindtarget4 $window [lindex $args 0] \ + [lindex $args 1] [lindex $args 2] [lindex $args 3]]} + } + } + cleartarget { + return [tkdnd::compat::cleartarget $window] + } + bindsource { + switch [llength $args] { + 0 {return [tkdnd::compat::bindsource0 $window]} + 1 {return [tkdnd::compat::bindsource1 $window [lindex $args 0]]} + 2 {return [tkdnd::compat::bindsource2 $window [lindex $args 0] \ + [lindex $args 1]]} + 3 {return [tkdnd::compat::bindsource3 $window [lindex $args 0] \ + [lindex $args 1] [lindex $args 2]]} + } + } + clearsource { + return [tkdnd::compat::clearsource $window] + } + drag { + return [tkdnd::_init_drag 1 $window "press" 0 0 0 0] + } + } + error "invalid number of arguments!" +};# ::dnd + +# ---------------------------------------------------------------------------- +# Command compat::bindtarget +# ---------------------------------------------------------------------------- +proc compat::bindtarget0 {window} { + return [bind $window <>] +};# compat::bindtarget0 + +proc compat::bindtarget1 {window type} { + return [bindtarget2 $window $type ] +};# compat::bindtarget1 + +proc compat::bindtarget2 {window type event} { + switch $event { + {return [bind $window <>]} + {return [bind $window <>]} + {return [bind $window <>]} + {return [bind $window <>]} + } +};# compat::bindtarget2 + +proc compat::bindtarget3 {window type event script} { + set type [normalise_type $type] + ::tkdnd::drop_target register $window [list $type] + switch $event { + {return [bind $window <> $script]} + {return [bind $window <> $script]} + {return [bind $window <> $script]} + {return [bind $window <> $script]} + } +};# compat::bindtarget3 + +proc compat::bindtarget4 {window type event script priority} { + return [bindtarget3 $window $type $event $script] +};# compat::bindtarget4 + +proc compat::normalise_type { type } { + switch $type { + text/plain - + {text/plain;charset=UTF-8} - + Text {return DND_Text} + text/uri-list - + Files {return DND_Files} + default {return $type} + } +};# compat::normalise_type + +# ---------------------------------------------------------------------------- +# Command compat::bindsource +# ---------------------------------------------------------------------------- +proc compat::bindsource0 {window} { + return [bind $window <>] +};# compat::bindsource0 + +proc compat::bindsource1 {window type} { + return [bindsource2 $window $type ] +};# compat::bindsource1 + +proc compat::bindsource2 {window type script} { + set type [normalise_type $type] + ::tkdnd::drag_source register $window $type + bind $window <> "list {copy} {%t} \[$script\]" +};# compat::bindsource2 + +proc compat::bindsource3 {window type script priority} { + return [bindsource2 $window $type $script] +};# compat::bindsource3 + +# ---------------------------------------------------------------------------- +# Command compat::cleartarget +# ---------------------------------------------------------------------------- +proc compat::cleartarget {window} { +};# compat::cleartarget + +# ---------------------------------------------------------------------------- +# Command compat::clearsource +# ---------------------------------------------------------------------------- +proc compat::clearsource {window} { +};# compat::clearsource diff --git a/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_generic.tcl b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_generic.tcl new file mode 100644 index 0000000000000000000000000000000000000000..698b464fc68e8a2e0e681f5bac32c4c63338f2c3 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_generic.tcl @@ -0,0 +1,520 @@ +# +# tkdnd_generic.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval generic { + variable _types {} + variable _typelist {} + variable _codelist {} + variable _actionlist {} + variable _pressedkeys {} + variable _action {} + variable _common_drag_source_types {} + variable _common_drop_target_types {} + variable _drag_source {} + variable _drop_target {} + + variable _last_mouse_root_x 0 + variable _last_mouse_root_y 0 + + variable _tkdnd2platform + variable _platform2tkdnd + + proc debug {msg} { + puts $msg + };# debug + + proc initialise { } { + };# initialise + + proc initialise_platform_to_tkdnd_types { types } { + variable _platform2tkdnd + variable _tkdnd2platform + set _platform2tkdnd [dict create {*}$types] + set _tkdnd2platform [dict create] + foreach type [dict keys $_platform2tkdnd] { + dict lappend _tkdnd2platform [dict get $_platform2tkdnd $type] $type + } + };# initialise_platform_to_tkdnd_types + + proc initialise_tkdnd_to_platform_types { types } { + variable _tkdnd2platform + set _tkdnd2platform [dict create {*}$types] + };# initialise_tkdnd_to_platform_types + +};# namespace generic + +# ---------------------------------------------------------------------------- +# Command generic::HandleEnter +# ---------------------------------------------------------------------------- +proc generic::HandleEnter { drop_target drag_source typelist codelist + actionlist pressedkeys } { + variable _typelist; set _typelist $typelist + variable _pressedkeys; set _pressedkeys $pressedkeys + variable _action; set _action refuse_drop + variable _common_drag_source_types; set _common_drag_source_types {} + variable _common_drop_target_types; set _common_drop_target_types {} + variable _actionlist + variable _drag_source; set _drag_source $drag_source + variable _drop_target; set _drop_target {} + variable _actionlist; set _actionlist $actionlist + variable _codelist set _codelist $codelist + + variable _last_mouse_root_x; set _last_mouse_root_x 0 + variable _last_mouse_root_y; set _last_mouse_root_y 0 + # debug "\n===============================================================" + # debug "generic::HandleEnter: drop_target=$drop_target,\ + # drag_source=$drag_source,\ + # typelist=$typelist" + # debug "generic::HandleEnter: ACTION: default" + return default +};# generic::HandleEnter + +# ---------------------------------------------------------------------------- +# Command generic::HandlePosition +# ---------------------------------------------------------------------------- +proc generic::HandlePosition { drop_target drag_source pressedkeys + rootX rootY { time 0 } } { + variable _types + variable _typelist + variable _codelist + variable _actionlist + variable _pressedkeys + variable _action + variable _common_drag_source_types + variable _common_drop_target_types + variable _drag_source + variable _drop_target + + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + + # debug "generic::HandlePosition: drop_target=$drop_target,\ + # _drop_target=$_drop_target, rootX=$rootX, rootY=$rootY" + + if {![info exists _drag_source] && ![string length $_drag_source]} { + # debug "generic::HandlePosition: no or empty _drag_source:\ + # return refuse_drop" + return refuse_drop + } + + if {$drag_source ne "" && $drag_source ne $_drag_source} { + debug "generic position event from unexpected source: $_drag_source\ + != $drag_source" + return refuse_drop + } + + set _pressedkeys $pressedkeys + + ## Does the new drop target support any of our new types? + # foreach {common_drag_source_types common_drop_target_types} \ + # [GetWindowCommonTypes $drop_target $_typelist] {break} + foreach {drop_target common_drag_source_types common_drop_target_types} \ + [FindWindowWithCommonTypes $drop_target $_typelist] {break} + set data [GetDroppedData $time] + + # debug "\t($_drop_target) -> ($drop_target)" + if {$drop_target != $_drop_target} { + if {[string length $_drop_target]} { + ## Call the <> event. + # debug "\t<> on $_drop_target" + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $_drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A \{$_action\} %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D \{\} %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + uplevel \#0 $cmd + } + } + set _drop_target $drop_target + set _action refuse_drop + + if {[llength $common_drag_source_types]} { + set _action [lindex $_actionlist 0] + set _common_drag_source_types $common_drag_source_types + set _common_drop_target_types $common_drop_target_types + ## Drop target supports at least one type. Send a <>. + # puts "<> -> $drop_target" + set cmd [bind $drop_target <>] + if {[string length $cmd]} { + focus $drop_target + set cmd [string map [list %W $drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + } + } + } + + set _drop_target {} + if {[llength $common_drag_source_types]} { + set _common_drag_source_types $common_drag_source_types + set _common_drop_target_types $common_drop_target_types + set _drop_target $drop_target + ## Drop target supports at least one type. Send a <>. + set cmd [bind $drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + } + } + # Return values: copy, move, link, ask, private, refuse_drop, default + # debug "generic::HandlePosition: ACTION: $_action" + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + return $_action +};# generic::HandlePosition + +# ---------------------------------------------------------------------------- +# Command generic::HandleLeave +# ---------------------------------------------------------------------------- +proc generic::HandleLeave { } { + variable _types + variable _typelist + variable _codelist + variable _actionlist + variable _pressedkeys + variable _action + variable _common_drag_source_types + variable _common_drop_target_types + variable _drag_source + variable _drop_target + variable _last_mouse_root_x + variable _last_mouse_root_y + if {![info exists _drop_target]} {set _drop_target {}} + # debug "generic::HandleLeave: _drop_target=$_drop_target" + if {[info exists _drop_target] && [string length $_drop_target]} { + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $_drop_target \ + %X $_last_mouse_root_x %Y $_last_mouse_root_y \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A \{$_action\} %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D \{\} %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + } + } + foreach var {_types _typelist _actionlist _pressedkeys _action + _common_drag_source_types _common_drop_target_types + _drag_source _drop_target} { + set $var {} + } +};# generic::HandleLeave + +# ---------------------------------------------------------------------------- +# Command generic::HandleDrop +# ---------------------------------------------------------------------------- +proc generic::HandleDrop {drop_target drag_source pressedkeys rootX rootY time } { + variable _types + variable _typelist + variable _codelist + variable _actionlist + variable _pressedkeys + variable _action + variable _common_drag_source_types + variable _common_drop_target_types + variable _drag_source + variable _drop_target + variable _last_mouse_root_x + variable _last_mouse_root_y + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + + set _pressedkeys $pressedkeys + + # puts "generic::HandleDrop: $time" + + if {![info exists _drag_source] && ![string length $_drag_source]} { + return refuse_drop + } + if {![info exists _drop_target] && ![string length $_drop_target]} { + return refuse_drop + } + if {![llength $_common_drag_source_types]} {return refuse_drop} + ## Get the dropped data. + set data [GetDroppedData $time] + ## Try to select the most specific <> event. + foreach type [concat $_common_drag_source_types $_common_drop_target_types] { + set type [platform_independent_type $type] + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $_drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + # Return values: copy, move, link, ask, private, refuse_drop + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + return $_action + } + } + set cmd [bind $_drop_target <>] + if {[string length $cmd]} { + set cmd [string map [list %W $_drop_target %X $rootX %Y $rootY \ + %CST \{$_common_drag_source_types\} \ + %CTT \{$_common_drop_target_types\} \ + %CPT \{[lindex [platform_independent_type [lindex $_common_drag_source_types 0]] 0]\} \ + %ST \{$_typelist\} %TT \{$_types\} \ + %A $_action %a \{$_actionlist\} \ + %b \{$_pressedkeys\} %m \{$_pressedkeys\} \ + %D [list $data] %e <> \ + %L \{$_typelist\} %% % \ + %t \{$_typelist\} %T \{[lindex $_common_drag_source_types 0]\} \ + %c \{$_codelist\} %C \{[lindex $_codelist 0]\} \ + ] $cmd] + set _action [uplevel \#0 $cmd] + } + # Return values: copy, move, link, ask, private, refuse_drop + switch -exact -- $_action { + copy - move - link - ask - private - refuse_drop - default {} + default {set _action copy} + } + return $_action +};# generic::HandleDrop + +# ---------------------------------------------------------------------------- +# Command generic::GetWindowCommonTypes +# ---------------------------------------------------------------------------- +proc generic::GetWindowCommonTypes { win typelist } { + set types [bind $win <>] + # debug ">> Accepted types: $win $_types" + set common_drag_source_types {} + set common_drop_target_types {} + if {[llength $types]} { + ## Examine the drop target types, to find at least one match with the drag + ## source types... + set supported_types [supported_types $typelist] + foreach type $types { + foreach matched [lsearch -glob -all -inline $supported_types $type] { + ## Drop target supports this type. + lappend common_drag_source_types $matched + lappend common_drop_target_types $type + } + } + } + list $common_drag_source_types $common_drop_target_types +};# generic::GetWindowCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::FindWindowWithCommonTypes +# ---------------------------------------------------------------------------- +proc generic::FindWindowWithCommonTypes { win typelist } { + set toplevel [winfo toplevel $win] + while {![string equal $win $toplevel]} { + foreach {common_drag_source_types common_drop_target_types} \ + [GetWindowCommonTypes $win $typelist] {break} + if {[llength $common_drag_source_types]} { + return [list $win $common_drag_source_types $common_drop_target_types] + } + set win [winfo parent $win] + } + ## We have reached the toplevel, which may be also a target (SF Bug #30) + foreach {common_drag_source_types common_drop_target_types} \ + [GetWindowCommonTypes $win $typelist] {break} + if {[llength $common_drag_source_types]} { + return [list $win $common_drag_source_types $common_drop_target_types] + } + return { {} {} {} } +};# generic::FindWindowWithCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::GetDroppedData +# ---------------------------------------------------------------------------- +proc generic::GetDroppedData { time } { + variable _dropped_data + return $_dropped_data +};# generic::GetDroppedData + +# ---------------------------------------------------------------------------- +# Command generic::SetDroppedData +# ---------------------------------------------------------------------------- +proc generic::SetDroppedData { data } { + variable _dropped_data + set _dropped_data $data +};# generic::SetDroppedData + +# ---------------------------------------------------------------------------- +# Command generic::GetDragSource +# ---------------------------------------------------------------------------- +proc generic::GetDragSource { } { + variable _drag_source + return $_drag_source +};# generic::GetDragSource + +# ---------------------------------------------------------------------------- +# Command generic::GetDropTarget +# ---------------------------------------------------------------------------- +proc generic::GetDropTarget { } { + variable _drop_target + return $_drop_target +};# generic::GetDropTarget + +# ---------------------------------------------------------------------------- +# Command generic::GetDragSourceCommonTypes +# ---------------------------------------------------------------------------- +proc generic::GetDragSourceCommonTypes { } { + variable _common_drag_source_types + return $_common_drag_source_types +};# generic::GetDragSourceCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::GetDropTargetCommonTypes +# ---------------------------------------------------------------------------- +proc generic::GetDropTargetCommonTypes { } { + variable _common_drag_source_types + return $_common_drag_source_types +};# generic::GetDropTargetCommonTypes + +# ---------------------------------------------------------------------------- +# Command generic::platform_specific_types +# ---------------------------------------------------------------------------- +proc generic::platform_specific_types { types } { + set new_types {} + foreach type $types { + set new_types [concat $new_types [platform_specific_type $type]] + } + return $new_types +}; # generic::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command generic::platform_specific_type +# ---------------------------------------------------------------------------- +proc generic::platform_specific_type { type } { + variable _tkdnd2platform + if {[dict exists $_tkdnd2platform $type]} { + return [dict get $_tkdnd2platform $type] + } + list $type +}; # generic::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + set new_types {} + foreach type $types { + set new_types [concat $new_types [platform_independent_type $type]] + } + return $new_types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command generic::platform_independent_type +# ---------------------------------------------------------------------------- +proc generic::platform_independent_type { type } { + variable _platform2tkdnd + if {[dict exists $_platform2tkdnd $type]} { + return [dict get $_platform2tkdnd $type] + } + return $type +}; # generic::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command generic::supported_types +# ---------------------------------------------------------------------------- +proc generic::supported_types { types } { + set new_types {} + foreach type $types { + if {[supported_type $type]} {lappend new_types $type} + } + return $new_types +}; # generic::supported_types + +# ---------------------------------------------------------------------------- +# Command generic::supported_type +# ---------------------------------------------------------------------------- +proc generic::supported_type { type } { + variable _platform2tkdnd + if {[dict exists $_platform2tkdnd $type]} { + return 1 + } + return 0 +}; # generic::supported_type diff --git a/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_macosx.tcl b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_macosx.tcl new file mode 100644 index 0000000000000000000000000000000000000000..307f6da2e94286d01dc9e068fffebe46de3c43f3 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_macosx.tcl @@ -0,0 +1,144 @@ +# +# tkdnd_macosx.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. + +# This software is copyrighted by: +# Georgios Petasis, Athens, Greece. +# e-mail: petasisg@yahoo.gr, petasis@iit.demokritos.gr +# +# Mac portions (c) 2009 Kevin Walzer/WordTech Communications LLC, +# kw@codebykevin.com +# +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +#basic API for Mac Drag and Drop + +#two data types supported: strings and file paths + +#two commands at C level: ::tkdnd::macdnd::registerdragwidget, ::tkdnd::macdnd::unregisterdragwidget + +#data retrieval mechanism: text or file paths are copied from drag clipboard to system clipboard and retrieved via [clipboard get]; array of file paths is converted to single tab-separated string, can be split into Tcl list + +if {[tk windowingsystem] eq "aqua" && "AppKit" ni [winfo server .]} { + error {TkAqua Cocoa required} +} + +namespace eval macdnd { + + proc initialise { } { + ## Mapping from platform types to TkDND types... + ::tkdnd::generic::initialise_platform_to_tkdnd_types [list \ + NSPasteboardTypeString DND_Text \ + NSFilenamesPboardType DND_Files \ + NSPasteboardTypeHTML DND_HTML \ + ] + };# initialise + +};# namespace macdnd + +# ---------------------------------------------------------------------------- +# Command macdnd::HandleEnter +# ---------------------------------------------------------------------------- +proc macdnd::HandleEnter { path drag_source typelist { data {} } } { + variable _pressedkeys + variable _actionlist + set _pressedkeys 1 + set _actionlist { copy move link ask private } + ::tkdnd::generic::SetDroppedData $data + ::tkdnd::generic::HandleEnter $path $drag_source $typelist $typelist \ + $_actionlist $_pressedkeys +};# macdnd::HandleEnter + +# ---------------------------------------------------------------------------- +# Command macdnd::HandlePosition +# ---------------------------------------------------------------------------- +proc macdnd::HandlePosition { drop_target rootX rootY {drag_source {}} } { + variable _pressedkeys + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + ::tkdnd::generic::HandlePosition $drop_target $drag_source \ + $_pressedkeys $rootX $rootY +};# macdnd::HandlePosition + +# ---------------------------------------------------------------------------- +# Command macdnd::HandleLeave +# ---------------------------------------------------------------------------- +proc macdnd::HandleLeave { args } { + ::tkdnd::generic::HandleLeave +};# macdnd::HandleLeave + +# ---------------------------------------------------------------------------- +# Command macdnd::HandleDrop +# ---------------------------------------------------------------------------- +proc macdnd::HandleDrop { drop_target data args } { + variable _pressedkeys + variable _last_mouse_root_x + variable _last_mouse_root_y + ## Get the dropped data... + ::tkdnd::generic::SetDroppedData $data + ::tkdnd::generic::HandleDrop {} {} $_pressedkeys \ + $_last_mouse_root_x $_last_mouse_root_y 0 +};# macdnd::HandleDrop + +# ---------------------------------------------------------------------------- +# Command macdnd::GetDragSourceCommonTypes +# ---------------------------------------------------------------------------- +proc macdnd::GetDragSourceCommonTypes { } { + ::tkdnd::generic::GetDragSourceCommonTypes +};# macdnd::GetDragSourceCommonTypes + +# ---------------------------------------------------------------------------- +# Command macdnd::platform_specific_types +# ---------------------------------------------------------------------------- +proc macdnd::platform_specific_types { types } { + ::tkdnd::generic::platform_specific_types $types +}; # macdnd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command macdnd::platform_specific_type +# ---------------------------------------------------------------------------- +proc macdnd::platform_specific_type { type } { + ::tkdnd::generic::platform_specific_type $type +}; # macdnd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + ::tkdnd::generic::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command macdnd::platform_independent_type +# ---------------------------------------------------------------------------- +proc macdnd::platform_independent_type { type } { + ::tkdnd::generic::platform_independent_type $type +}; # macdnd::platform_independent_type diff --git a/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_unix.tcl b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_unix.tcl new file mode 100644 index 0000000000000000000000000000000000000000..56d17c4db718274df4b3b7a14f0d8e055a1002b6 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_unix.tcl @@ -0,0 +1,810 @@ +# +# tkdnd_unix.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval xdnd { + variable _dragging 0 + + proc initialise { } { + ## Mapping from platform types to TkDND types... + ::tkdnd::generic::initialise_platform_to_tkdnd_types [list \ + text/plain\;charset=utf-8 DND_Text \ + UTF8_STRING DND_Text \ + text/plain DND_Text \ + STRING DND_Text \ + TEXT DND_Text \ + COMPOUND_TEXT DND_Text \ + text/uri-list DND_Files \ + text/html\;charset=utf-8 DND_HTML \ + text/html DND_HTML \ + application/x-color DND_Color \ + ] + };# initialise + +};# namespace xdnd + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndEnter +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndEnter { path drag_source typelist time { data {} } } { + variable _pressedkeys + variable _actionlist + variable _typelist + set _pressedkeys 1 + set _actionlist { copy move link ask private } + set _typelist $typelist + # puts "xdnd::HandleXdndEnter: $time" + ::tkdnd::generic::SetDroppedData $data + ::tkdnd::generic::HandleEnter $path $drag_source $typelist $typelist \ + $_actionlist $_pressedkeys +};# xdnd::HandleXdndEnter + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndPosition +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndPosition { drop_target rootX rootY time {drag_source {}} } { + variable _pressedkeys + variable _typelist + variable _last_mouse_root_x; set _last_mouse_root_x $rootX + variable _last_mouse_root_y; set _last_mouse_root_y $rootY + # puts "xdnd::HandleXdndPosition: $time" + ## Get the dropped data... + catch { + ::tkdnd::generic::SetDroppedData [GetPositionData $drop_target $_typelist $time] + } + ::tkdnd::generic::HandlePosition $drop_target $drag_source \ + $_pressedkeys $rootX $rootY +};# xdnd::HandleXdndPosition + +# ---------------------------------------------------------------------------- +# Command xdnd::HandleXdndLeave +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndLeave { } { + ::tkdnd::generic::HandleLeave +};# xdnd::HandleXdndLeave + +# ---------------------------------------------------------------------------- +# Command xdnd::_HandleXdndDrop +# ---------------------------------------------------------------------------- +proc xdnd::HandleXdndDrop { time } { + variable _pressedkeys + variable _last_mouse_root_x + variable _last_mouse_root_y + ## Get the dropped data... + ::tkdnd::generic::SetDroppedData [GetDroppedData \ + [::tkdnd::generic::GetDragSource] [::tkdnd::generic::GetDropTarget] \ + [::tkdnd::generic::GetDragSourceCommonTypes] $time] + ::tkdnd::generic::HandleDrop {} {} $_pressedkeys \ + $_last_mouse_root_x $_last_mouse_root_y $time +};# xdnd::HandleXdndDrop + +# ---------------------------------------------------------------------------- +# Command xdnd::GetPositionData +# ---------------------------------------------------------------------------- +proc xdnd::GetPositionData { drop_target typelist time } { + foreach {drop_target common_drag_source_types common_drop_target_types} \ + [::tkdnd::generic::FindWindowWithCommonTypes $drop_target $typelist] {break} + GetDroppedData [::tkdnd::generic::GetDragSource] $drop_target \ + $common_drag_source_types $time +};# xdnd::GetPositionData + +# ---------------------------------------------------------------------------- +# Command xdnd::GetDroppedData +# ---------------------------------------------------------------------------- +proc xdnd::GetDroppedData { _drag_source _drop_target _common_drag_source_types time } { + if {![llength $_common_drag_source_types]} { + error "no common data types between the drag source and drop target widgets" + } + ## Is drag source in this application? + if {[catch {winfo pathname -displayof $_drop_target $_drag_source} p]} { + set _use_tk_selection 0 + } else { + set _use_tk_selection 1 + } + foreach type $_common_drag_source_types { + # puts "TYPE: $type ($_drop_target)" + # _get_selection $_drop_target $time $type + if {$_use_tk_selection} { + if {![catch { + selection get -displayof $_drop_target -selection XdndSelection \ + -type $type + } result options]} { + return [normalise_data $type $result] + } + } else { + # puts "_selection_get -displayof $_drop_target -selection XdndSelection \ + # -type $type -time $time" + #after 100 [list focus -force $_drop_target] + #after 50 [list raise [winfo toplevel $_drop_target]] + if {![catch { + _selection_get -displayof $_drop_target -selection XdndSelection \ + -type $type -time $time + } result options]} { + return [normalise_data $type $result] + } + } + } + return -options $options $result +};# xdnd::GetDroppedData + +# ---------------------------------------------------------------------------- +# Command xdnd::platform_specific_types +# ---------------------------------------------------------------------------- +proc xdnd::platform_specific_types { types } { + ::tkdnd::generic::platform_specific_types $types +}; # xdnd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command xdnd::platform_specific_type +# ---------------------------------------------------------------------------- +proc xdnd::platform_specific_type { type } { + ::tkdnd::generic::platform_specific_type $type +}; # xdnd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + ::tkdnd::generic::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command xdnd::platform_independent_type +# ---------------------------------------------------------------------------- +proc xdnd::platform_independent_type { type } { + ::tkdnd::generic::platform_independent_type $type +}; # xdnd::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command xdnd::_normalise_data +# ---------------------------------------------------------------------------- +proc xdnd::normalise_data { type data } { + # Tk knows how to interpret the following types: + # STRING, TEXT, COMPOUND_TEXT + # UTF8_STRING + # Else, it returns a list of 8 or 32 bit numbers... + switch -glob $type { + STRING - UTF8_STRING - TEXT - COMPOUND_TEXT {return $data} + text/html { + if {[catch { + encoding convertfrom unicode $data + } string]} { + set string $data + } + return [string map {\r\n \n} $string] + } + text/html\;charset=utf-8 - + text/plain\;charset=utf-8 - + text/plain { + if {[catch { + encoding convertfrom utf-8 [tkdnd::bytes_to_string $data] + } string]} { + set string $data + } + return [string map {\r\n \n} $string] + } + text/uri-list* { + if {[catch { + encoding convertfrom utf-8 [tkdnd::bytes_to_string $data] + } string]} { + set string $data + } + ## Get rid of \r\n + set string [string trim [string map {\r\n \n} $string]] + set files {} + foreach quoted_file [split $string] { + set file [tkdnd::urn_unquote $quoted_file] + switch -glob $file { + \#* {} + file://* {lappend files [string range $file 7 end]} + ftp://* - + https://* - + http://* {lappend files $quoted_file} + default {lappend files $file} + } + } + return $files + } + application/x-color { + return $data + } + text/x-moz-url - + application/q-iconlist - + default {return $data} + } +}; # xdnd::normalise_data + +############################################################################# +## +## XDND drag implementation +## +############################################################################# + +# ---------------------------------------------------------------------------- +# Command xdnd::_selection_ownership_lost +# ---------------------------------------------------------------------------- +proc xdnd::_selection_ownership_lost {} { + variable _dragging + set _dragging 0 +};# _selection_ownership_lost + +# ---------------------------------------------------------------------------- +# Command xdnd::_dodragdrop +# ---------------------------------------------------------------------------- +proc xdnd::_dodragdrop { source actions types data button } { + variable _dragging + + # puts "xdnd::_dodragdrop: source: $source, actions: $actions, types: $types,\ + # data: \"$data\", button: $button" + if {$_dragging} { + ## We are in the middle of another drag operation... + error "another drag operation in progress" + } + + variable _dodragdrop_drag_source $source + variable _dodragdrop_drop_target 0 + variable _dodragdrop_drop_target_proxy 0 + variable _dodragdrop_actions $actions + variable _dodragdrop_action_descriptions $actions + variable _dodragdrop_actions_len [llength $actions] + variable _dodragdrop_types $types + variable _dodragdrop_types_len [llength $types] + variable _dodragdrop_data $data + variable _dodragdrop_transfer_data {} + variable _dodragdrop_button $button + variable _dodragdrop_time 0 + variable _dodragdrop_default_action refuse_drop + variable _dodragdrop_waiting_status 0 + variable _dodragdrop_drop_target_accepts_drop 0 + variable _dodragdrop_drop_target_accepts_action refuse_drop + variable _dodragdrop_current_cursor $_dodragdrop_default_action + variable _dodragdrop_drop_occured 0 + variable _dodragdrop_selection_requestor 0 + + ## + ## If we have more than 3 types, the property XdndTypeList must be set on + ## the drag source widget... + ## + if {$_dodragdrop_types_len > 3} { + _announce_type_list $_dodragdrop_drag_source $_dodragdrop_types + } + + ## + ## Announce the actions & their descriptions on the XdndActionList & + ## XdndActionDescription properties... + ## + _announce_action_list $_dodragdrop_drag_source $_dodragdrop_actions \ + $_dodragdrop_action_descriptions + + ## + ## Arrange selection handlers for our drag source, and all the supported types + ## + registerSelectionHandler $source $types + + ## + ## Step 1: When a drag begins, the source takes ownership of XdndSelection. + ## + selection own -command ::tkdnd::xdnd::_selection_ownership_lost \ + -selection XdndSelection $source + set _dragging 1 + + ## Grab the mouse pointer... + _grab_pointer $source $_dodragdrop_default_action + + ## Register our generic event handler... + # The generic event callback will report events by modifying variable + # ::xdnd::_dodragdrop_event: a dict with event information will be set as + # the value of the variable... + _register_generic_event_handler + + ## Set a timeout for debugging purposes... + # after 60000 {set ::tkdnd::xdnd::_dragging 0} + + tkwait variable ::tkdnd::xdnd::_dragging + _SendXdndLeave + + set _dragging 0 + _ungrab_pointer $source + _unregister_generic_event_handler + catch {selection clear -selection XdndSelection} + unregisterSelectionHandler $source $types + return $_dodragdrop_drop_target_accepts_action +};# xdnd::_dodragdrop + +# ---------------------------------------------------------------------------- +# Command xdnd::_process_drag_events +# ---------------------------------------------------------------------------- +proc xdnd::_process_drag_events {event} { + # The return value from proc is normally 0. A non-zero return value indicates + # that the event is not to be handled further; that is, proc has done all + # processing that is to be allowed for the event + variable _dragging + if {!$_dragging} {return 0} + # puts $event + + variable _dodragdrop_time + set time [dict get $event time] + set type [dict get $event type] + if {$time < $_dodragdrop_time && ![string equal $type SelectionRequest]} { + return 0 + } + set _dodragdrop_time $time + + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_default_action + switch $type { + MotionNotify { + set rootx [dict get $event x_root] + set rooty [dict get $event y_root] + set window [_find_drop_target_window $_dodragdrop_drag_source \ + $rootx $rooty] + if {[string length $window]} { + ## Examine the modifiers to suggest an action... + set _dodragdrop_default_action [_default_action $event] + ## Is it a Tk widget? + # set path [winfo containing $rootx $rooty] + # puts "Window under mouse: $window ($path)" + if {$_dodragdrop_drop_target != $window} { + ## Send XdndLeave to $_dodragdrop_drop_target + _SendXdndLeave + ## Is there a proxy? If not, _find_drop_target_proxy returns the + ## target window, so we always get a valid "proxy". + set proxy [_find_drop_target_proxy $_dodragdrop_drag_source $window] + ## Send XdndEnter to $window + _SendXdndEnter $window $proxy + ## Send XdndPosition to $_dodragdrop_drop_target + _SendXdndPosition $rootx $rooty $_dodragdrop_default_action + } else { + ## Send XdndPosition to $_dodragdrop_drop_target + _SendXdndPosition $rootx $rooty $_dodragdrop_default_action + } + } else { + ## No window under the mouse. Send XdndLeave to $_dodragdrop_drop_target + _SendXdndLeave + } + } + ButtonPress { + } + ButtonRelease { + variable _dodragdrop_button + set button [dict get $event button] + if {$button == $_dodragdrop_button} { + ## The button that initiated the drag was released. Trigger drop... + _SendXdndDrop + } + return 1 + } + KeyPress { + } + KeyRelease { + set keysym [dict get $event keysym] + switch $keysym { + Escape { + ## The user has pressed escape. Abort... + if {$_dragging} {set _dragging 0} + } + } + } + SelectionRequest { + variable _dodragdrop_selection_requestor + variable _dodragdrop_selection_property + variable _dodragdrop_selection_selection + variable _dodragdrop_selection_target + variable _dodragdrop_selection_time + set _dodragdrop_selection_requestor [dict get $event requestor] + set _dodragdrop_selection_property [dict get $event property] + set _dodragdrop_selection_selection [dict get $event selection] + set _dodragdrop_selection_target [dict get $event target] + set _dodragdrop_selection_time $time + return 0 + } + default { + return 0 + } + } + return 0 +};# _process_drag_events + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndEnter +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndEnter {window proxy} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_types + variable _dodragdrop_waiting_status + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_target > 0} _SendXdndLeave + if {$_dodragdrop_drop_occured} return + set _dodragdrop_drop_target $window + set _dodragdrop_drop_target_proxy $proxy + set _dodragdrop_waiting_status 0 + if {$_dodragdrop_drop_target < 1} return + # puts "XdndEnter: $_dodragdrop_drop_target $_dodragdrop_drop_target_proxy" + _send_XdndEnter $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy $_dodragdrop_types +};# xdnd::_SendXdndEnter + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndPosition +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndPosition {rootx rooty action} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + if {$_dodragdrop_drop_target < 1} return + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} return + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_waiting_status + ## Arrange a new XdndPosition, to be send periodically... + variable _dodragdrop_xdnd_position_heartbeat + catch {after cancel $_dodragdrop_xdnd_position_heartbeat} + set _dodragdrop_xdnd_position_heartbeat [after 200 \ + [list ::tkdnd::xdnd::_SendXdndPosition $rootx $rooty $action]] + if {$_dodragdrop_waiting_status} {return} + # puts "XdndPosition: $_dodragdrop_drop_target $rootx $rooty $action" + _send_XdndPosition $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy $rootx $rooty $action + set _dodragdrop_waiting_status 1 +};# xdnd::_SendXdndPosition + +# ---------------------------------------------------------------------------- +# Command xdnd::_HandleXdndStatus +# ---------------------------------------------------------------------------- +proc xdnd::_HandleXdndStatus {event} { + variable _dodragdrop_drop_target + variable _dodragdrop_waiting_status + + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + set _dodragdrop_waiting_status 0 + foreach key {target accept want_position action x y w h} { + set $key [dict get $event $key] + } + set _dodragdrop_drop_target_accepts_drop $accept + set _dodragdrop_drop_target_accepts_action $action + if {$_dodragdrop_drop_target < 1} return + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} return + _update_cursor + # puts "XdndStatus: $event" +};# xdnd::_HandleXdndStatus + +# ---------------------------------------------------------------------------- +# Command xdnd::_HandleXdndFinished +# ---------------------------------------------------------------------------- +proc xdnd::_HandleXdndFinished {event} { + variable _dodragdrop_xdnd_finished_event_after_id + catch {after cancel $_dodragdrop_xdnd_finished_event_after_id} + set _dodragdrop_xdnd_finished_event_after_id {} + variable _dodragdrop_drop_target + set _dodragdrop_drop_target 0 + variable _dragging + if {$_dragging} {set _dragging 0} + + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + if {[dict size $event]} { + foreach key {target accept action} { + set $key [dict get $event $key] + } + set _dodragdrop_drop_target_accepts_drop $accept + set _dodragdrop_drop_target_accepts_action $action + } else { + set _dodragdrop_drop_target_accepts_drop 0 + } + if {!$_dodragdrop_drop_target_accepts_drop} { + set _dodragdrop_drop_target_accepts_action refuse_drop + } + # puts "XdndFinished: $event" +};# xdnd::_HandleXdndFinished + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndLeave +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndLeave {} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + if {$_dodragdrop_drop_target < 1} return + variable _dodragdrop_drop_target_proxy + # puts "XdndLeave: $_dodragdrop_drop_target" + _send_XdndLeave $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy + set _dodragdrop_drop_target 0 + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + set _dodragdrop_drop_target_accepts_drop 0 + set _dodragdrop_drop_target_accepts_action refuse_drop + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} return + _update_cursor +};# xdnd::_SendXdndLeave + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendXdndDrop +# ---------------------------------------------------------------------------- +proc xdnd::_SendXdndDrop {} { + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target + if {$_dodragdrop_drop_target < 1} { + ## The mouse has been released over a widget that does not accept drops. + _HandleXdndFinished {} + return + } + variable _dodragdrop_drop_occured + if {$_dodragdrop_drop_occured} {return} + variable _dodragdrop_drop_target_proxy + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + + set _dodragdrop_drop_occured 1 + _update_cursor clock + + if {!$_dodragdrop_drop_target_accepts_drop} { + _SendXdndLeave + _HandleXdndFinished {} + return + } + # puts "XdndDrop: $_dodragdrop_drop_target" + variable _dodragdrop_drop_timestamp + set _dodragdrop_drop_timestamp [_send_XdndDrop \ + $_dodragdrop_drag_source $_dodragdrop_drop_target \ + $_dodragdrop_drop_target_proxy] + set _dodragdrop_drop_target 0 + # puts "XdndDrop: $_dodragdrop_drop_target" + ## Arrange a timeout for receiving XdndFinished... + variable _dodragdrop_xdnd_finished_event_after_id + set _dodragdrop_xdnd_finished_event_after_id \ + [after 10000 [list ::tkdnd::xdnd::_HandleXdndFinished {}]] +};# xdnd::_SendXdndDrop + +# ---------------------------------------------------------------------------- +# Command xdnd::_update_cursor +# ---------------------------------------------------------------------------- +proc xdnd::_update_cursor { {cursor {}}} { + # puts "_update_cursor $cursor" + variable _dodragdrop_current_cursor + variable _dodragdrop_drag_source + variable _dodragdrop_drop_target_accepts_drop + variable _dodragdrop_drop_target_accepts_action + + if {![string length $cursor]} { + set cursor refuse_drop + if {$_dodragdrop_drop_target_accepts_drop} { + set cursor $_dodragdrop_drop_target_accepts_action + } + } + if {![string equal $cursor $_dodragdrop_current_cursor]} { + _set_pointer_cursor $_dodragdrop_drag_source $cursor + set _dodragdrop_current_cursor $cursor + } +};# xdnd::_update_cursor + +# ---------------------------------------------------------------------------- +# Command xdnd::_default_action +# ---------------------------------------------------------------------------- +proc xdnd::_default_action {event} { + variable _dodragdrop_actions + variable _dodragdrop_actions_len + if {$_dodragdrop_actions_len == 1} {return [lindex $_dodragdrop_actions 0]} + + set alt [dict get $event Alt] + set shift [dict get $event Shift] + set control [dict get $event Control] + + if {$shift && $control && [lsearch $_dodragdrop_actions link] != -1} { + return link + } elseif {$control && [lsearch $_dodragdrop_actions copy] != -1} { + return copy + } elseif {$shift && [lsearch $_dodragdrop_actions move] != -1} { + return move + } elseif {$alt && [lsearch $_dodragdrop_actions link] != -1} { + return link + } + return default +};# xdnd::_default_action + +# ---------------------------------------------------------------------------- +# Command xdnd::getFormatForType +# ---------------------------------------------------------------------------- +proc xdnd::getFormatForType {type} { + switch -glob [string tolower $type] { + text/plain\;charset=utf-8 - + text/html\;charset=utf-8 - + utf8_string {set format UTF8_STRING} + text/html - + text/plain - + string - + text - + compound_text {set format STRING} + text/uri-list* {set format UTF8_STRING} + application/x-color {set format $type} + default {set format $type} + } + return $format +};# xdnd::getFormatForType + +# ---------------------------------------------------------------------------- +# Command xdnd::registerSelectionHandler +# ---------------------------------------------------------------------------- +proc xdnd::registerSelectionHandler {source types} { + foreach type $types { + selection handle -selection XdndSelection \ + -type $type \ + -format [getFormatForType $type] \ + $source [list ::tkdnd::xdnd::_SendData $type] + } +};# xdnd::registerSelectionHandler + +# ---------------------------------------------------------------------------- +# Command xdnd::unregisterSelectionHandler +# ---------------------------------------------------------------------------- +proc xdnd::unregisterSelectionHandler {source types} { + foreach type $types { + catch { + selection handle -selection XdndSelection \ + -type $type \ + -format [getFormatForType $type] \ + $source {} + } + } +};# xdnd::unregisterSelectionHandler + +# ---------------------------------------------------------------------------- +# Command xdnd::_convert_to_unsigned +# ---------------------------------------------------------------------------- +proc xdnd::_convert_to_unsigned {data format} { + switch $format { + 8 { set mask 0xff } + 16 { set mask 0xffff } + 32 { set mask 0xffffff } + default {error "unsupported format $format"} + } + ## Convert signed integer into unsigned... + set d [list] + foreach num $data { + lappend d [expr { $num & $mask }] + } + return $d +};# xdnd::_convert_to_unsigned + +# ---------------------------------------------------------------------------- +# Command xdnd::_SendData +# ---------------------------------------------------------------------------- +proc xdnd::_SendData {type offset bytes args} { + variable _dodragdrop_drag_source + variable _dodragdrop_types + variable _dodragdrop_data + variable _dodragdrop_transfer_data + + ## The variable _dodragdrop_data contains a list of data, one for each + ## type in the _dodragdrop_types variable. We have to search types, and find + ## the corresponding entry in the _dodragdrop_data list. + set index [lsearch $_dodragdrop_types $type] + if {$index < 0} { + error "unable to locate data suitable for type \"$type\"" + } + set typed_data [lindex $_dodragdrop_data $index] + set format 8 + if {$offset == 0} { + ## Prepare the data to be transferred... + switch -glob $type { + text/plain* - UTF8_STRING - STRING - TEXT - COMPOUND_TEXT { + binary scan [encoding convertto utf-8 $typed_data] \ + c* _dodragdrop_transfer_data + set _dodragdrop_transfer_data \ + [_convert_to_unsigned $_dodragdrop_transfer_data $format] + } + text/uri-list* { + set files [list] + foreach file $typed_data { + switch -glob $file { + *://* {lappend files $file} + default {lappend files file://$file} + } + } + binary scan [encoding convertto utf-8 "[join $files \r\n]\r\n"] \ + c* _dodragdrop_transfer_data + set _dodragdrop_transfer_data \ + [_convert_to_unsigned $_dodragdrop_transfer_data $format] + } + application/x-color { + set format 16 + ## Try to understand the provided data: we accept a standard Tk colour, + ## or a list of 3 values (red green blue) or a list of 4 values + ## (red green blue opacity). + switch [llength $typed_data] { + 1 { set color [winfo rgb $_dodragdrop_drag_source $typed_data] + lappend color 65535 } + 3 { set color $typed_data; lappend color 65535 } + 4 { set color $typed_data } + default {error "unknown color data: \"$typed_data\""} + } + ## Convert the 4 elements into 16 bit values... + set _dodragdrop_transfer_data [list] + foreach c $color { + lappend _dodragdrop_transfer_data [format 0x%04X $c] + } + } + default { + set format 32 + binary scan $typed_data c* _dodragdrop_transfer_data + } + } + } + + ## + ## Data has been split into bytes. Count the bytes requested, and return them + ## + set data [lrange $_dodragdrop_transfer_data $offset [expr {$offset+$bytes-1}]] + switch $format { + 8 { + set data [encoding convertfrom utf-8 [binary format c* $data]] + } + 16 { + variable _dodragdrop_selection_requestor + if {$_dodragdrop_selection_requestor} { + ## Tk selection cannot process this format (only 8 & 32 supported). + ## Call our XChangeProperty... + set numItems [llength $data] + variable _dodragdrop_selection_property + variable _dodragdrop_selection_selection + variable _dodragdrop_selection_target + variable _dodragdrop_selection_time + XChangeProperty $_dodragdrop_drag_source \ + $_dodragdrop_selection_requestor \ + $_dodragdrop_selection_property \ + $_dodragdrop_selection_target \ + $format \ + $_dodragdrop_selection_time \ + $data $numItems + return -code break + } + } + 32 { + } + default { + error "unsupported format $format" + } + } + # puts "SendData: $type $offset $bytes $args ($typed_data)" + # puts " $data" + return $data +};# xdnd::_SendData diff --git a/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_utils.tcl b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_utils.tcl new file mode 100644 index 0000000000000000000000000000000000000000..ee961ddb1ca29b383496111eadc2ccdce7776b08 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_utils.tcl @@ -0,0 +1,252 @@ +# +# tkdnd_utils.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +package require tkdnd +namespace eval ::tkdnd { + namespace eval utils { + };# namespace ::tkdnd::utils + namespace eval text { + variable _drag_tag tkdnd::drag::selection::tag + variable _state {} + variable _drag_source_widget {} + variable _drop_target_widget {} + variable _now_dragging 0 + };# namespace ::tkdnd::text +};# namespace ::tkdnd + +bind TkDND_Drag_Text1 {tkdnd::text::_begin_drag clear 1 %W %s %X %Y %x %y} +bind TkDND_Drag_Text1 {tkdnd::text::_begin_drag motion 1 %W %s %X %Y %x %y} +bind TkDND_Drag_Text1 {tkdnd::text::_TextAutoScan %W %x %y} +bind TkDND_Drag_Text1 {tkdnd::text::_begin_drag reset 1 %W %s %X %Y %x %y} +bind TkDND_Drag_Text2 {tkdnd::text::_begin_drag clear 2 %W %s %X %Y %x %y} +bind TkDND_Drag_Text2 {tkdnd::text::_begin_drag motion 2 %W %s %X %Y %x %y} +bind TkDND_Drag_Text2 {tkdnd::text::_begin_drag reset 2 %W %s %X %Y %x %y} +bind TkDND_Drag_Text3 {tkdnd::text::_begin_drag clear 3 %W %s %X %Y %x %y} +bind TkDND_Drag_Text3 {tkdnd::text::_begin_drag motion 3 %W %s %X %Y %x %y} +bind TkDND_Drag_Text3 {tkdnd::text::_begin_drag reset 3 %W %s %X %Y %x %y} + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::drag_source +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::drag_source { mode path { types DND_Text } { event 1 } { tagprefix TkDND_Drag_Text } { tag sel } } { + switch -exact -- $mode { + register { + $path tag bind $tag \ + "tkdnd::text::_begin_drag press ${event} %W %s %X %Y %x %y" + ## Set a binding to the widget, to put selection as data... + bind $path <> "::tkdnd::text::DragInitCmd $path {%t} $tag" + ## Set a binding to the widget, to remove selection if action is move... + bind $path <> "::tkdnd::text::DragEndCmd $path %A $tag" + } + unregister { + $path tag bind $tag {} + bind $path <> {} + bind $path <> {} + } + } + ::tkdnd::drag_source $mode $path $types $event $tagprefix +};# ::tkdnd::text::drag_source + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::drop_target +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::drop_target { mode path { types DND_Text } } { + switch -exact -- $mode { + register { + bind $path <> "::tkdnd::text::DropPosition $path %X %Y %A %a %m" + bind $path <> "::tkdnd::text::Drop $path %D %X %Y %A %a %m" + } + unregister { + bind $path <> {} + bind $path <> {} + bind $path <> {} + bind $path <> {} + } + } + ::tkdnd::drop_target $mode $path $types +};# ::tkdnd::text::drop_target + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::DragInitCmd +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::DragInitCmd { path { types DND_Text } { tag sel } { actions { copy move } } } { + ## Save the selection indices... + variable _drag_source_widget + variable _drop_target_widget + set _drag_source_widget $path + set _drop_target_widget {} + _save_selection $path $tag + list $actions $types [$path get $tag.first $tag.last] +};# ::tkdnd::text::DragInitCmd + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::DragEndCmd +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::DragEndCmd { path action { tag sel } } { + variable _drag_source_widget + variable _drop_target_widget + set _drag_source_widget {} + set _drop_target_widget {} + _restore_selection $path $tag + switch -exact -- $action { + move { + ## Delete the original selected text... + variable _selection_first + variable _selection_last + $path delete $_selection_first $_selection_last + } + } +};# ::tkdnd::text::DragEndCmd + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::DropPosition +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::DropPosition { path X Y action actions keys} { + variable _drag_source_widget + variable _drop_target_widget + set _drop_target_widget $path + ## This check is primitive, a more accurate one is needed! + if {$path eq $_drag_source_widget} { + ## This is a drag within the same widget! Set action to move... + if {"move" in $actions} {set action move} + } + incr X -[winfo rootx $path] + incr Y -[winfo rooty $path] + $path mark set insert @$X,$Y; update + return $action +};# ::tkdnd::text::DropPosition + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::Drop +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::Drop { path data X Y action actions keys } { + incr X -[winfo rootx $path] + incr Y -[winfo rooty $path] + $path mark set insert @$X,$Y + $path insert [$path index insert] $data + return $action +};# ::tkdnd::text::Drop + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::_save_selection +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::_save_selection { path tag} { + variable _drag_tag + variable _selection_first + variable _selection_last + variable _selection_tag $tag + set _selection_first [$path index $tag.first] + set _selection_last [$path index $tag.last] + $path tag add $_drag_tag $_selection_first $_selection_last + $path tag configure $_drag_tag \ + -background [$path tag cget $tag -background] \ + -foreground [$path tag cget $tag -foreground] +};# tkdnd::text::_save_selection + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::_restore_selection +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::_restore_selection { path tag} { + variable _drag_tag + variable _selection_first + variable _selection_last + $path tag delete $_drag_tag + $path tag remove $tag 0.0 end + #$path tag add $tag $_selection_first $_selection_last +};# tkdnd::text::_restore_selection + +# ---------------------------------------------------------------------------- +# Command tkdnd::text::_begin_drag +# ---------------------------------------------------------------------------- +proc ::tkdnd::text::_begin_drag { event button source state X Y x y } { + variable _drop_target_widget + variable _state + # puts "::tkdnd::text::_begin_drag $event $button $source $state $X $Y $x $y" + + switch -exact -- $event { + clear { + switch -exact -- $_state { + press { + ## Do not execute other bindings, as they will erase selection... + return -code break + } + } + set _state clear + } + motion { + variable _now_dragging + if {$_now_dragging} {return -code break} + if { [string equal $_state "press"] } { + variable _x0; variable _y0 + if { abs($_x0-$X) > ${::tkdnd::_dx} || abs($_y0-$Y) > ${::tkdnd::_dy} } { + set _state "done" + set _drop_target_widget {} + set _now_dragging 1 + set code [catch { + ::tkdnd::_init_drag $button $source $state $X $Y $x $y + } info options] + set _drop_target_widget {} + set _now_dragging 0 + if {$code != 0} { + ## Something strange occurred... + return -options $options $info + } + } + return -code break + } + set _state clear + } + press { + variable _x0; variable _y0 + set _x0 $X + set _y0 $Y + set _state "press" + } + reset { + set _state {} + } + } + if {$source eq $_drop_target_widget} {return -code break} + return -code continue +};# tkdnd::text::_begin_drag + +proc tkdnd::text::_TextAutoScan {w x y} { + variable _now_dragging + if {$_now_dragging} {return -code break} + return -code continue +};# tkdnd::text::_TextAutoScan diff --git a/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_windows.tcl b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_windows.tcl new file mode 100644 index 0000000000000000000000000000000000000000..a1d01f3a2c438eaf3f676437d4d4ba89b3ba64f0 --- /dev/null +++ b/gui_data/tkinterdnd2/tkdnd/win64/tkdnd_windows.tcl @@ -0,0 +1,167 @@ +# +# tkdnd_windows.tcl -- +# +# This file implements some utility procedures that are used by the TkDND +# package. +# +# This software is copyrighted by: +# George Petasis, National Centre for Scientific Research "Demokritos", +# Aghia Paraskevi, Athens, Greece. +# e-mail: petasis@iit.demokritos.gr +# +# The following terms apply to all files associated +# with the software unless explicitly disclaimed in individual files. +# +# The authors hereby grant permission to use, copy, modify, distribute, +# and license this software and its documentation for any purpose, provided +# that existing copyright notices are retained in all copies and that this +# notice is included verbatim in any distributions. No written agreement, +# license, or royalty fee is required for any of the authorized uses. +# Modifications to this software may be copyrighted by their authors +# and need not follow the licensing terms described here, provided that +# the new terms are clearly indicated on the first page of each file where +# they apply. +# +# IN NO EVENT SHALL THE AUTHORS OR DISTRIBUTORS BE LIABLE TO ANY PARTY +# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES +# ARISING OUT OF THE USE OF THIS SOFTWARE, ITS DOCUMENTATION, OR ANY +# DERIVATIVES THEREOF, EVEN IF THE AUTHORS HAVE BEEN ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. +# +# THE AUTHORS AND DISTRIBUTORS SPECIFICALLY DISCLAIM ANY WARRANTIES, +# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. THIS SOFTWARE +# IS PROVIDED ON AN "AS IS" BASIS, AND THE AUTHORS AND DISTRIBUTORS HAVE +# NO OBLIGATION TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR +# MODIFICATIONS. +# + +namespace eval olednd { + + proc initialise { } { + ## Mapping from platform types to TkDND types... + ::tkdnd::generic::initialise_platform_to_tkdnd_types [list \ + CF_UNICODETEXT DND_Text \ + CF_TEXT DND_Text \ + CF_HDROP DND_Files \ + UniformResourceLocator DND_URL \ + CF_HTML DND_HTML \ + {HTML Format} DND_HTML \ + CF_RTF DND_RTF \ + CF_RTFTEXT DND_RTF \ + {Rich Text Format} DND_RTF \ + ] + # FileGroupDescriptorW DND_Files \ + # FileGroupDescriptor DND_Files \ + + ## Mapping from TkDND types to platform types... + ::tkdnd::generic::initialise_tkdnd_to_platform_types [list \ + DND_Text {CF_UNICODETEXT CF_TEXT} \ + DND_Files {CF_HDROP} \ + DND_URL {UniformResourceLocator UniformResourceLocatorW} \ + DND_HTML {CF_HTML {HTML Format}} \ + DND_RTF {CF_RTF CF_RTFTEXT {Rich Text Format}} \ + ] + };# initialise + +};# namespace olednd + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDragEnter +# ---------------------------------------------------------------------------- +proc olednd::HandleDragEnter { drop_target typelist actionlist pressedkeys + rootX rootY codelist { data {} } } { + ::tkdnd::generic::SetDroppedData $data + focus $drop_target + ::tkdnd::generic::HandleEnter $drop_target 0 $typelist \ + $codelist $actionlist $pressedkeys + set action [::tkdnd::generic::HandlePosition $drop_target {} \ + $pressedkeys $rootX $rootY] + if {$::tkdnd::_auto_update} {update idletasks} + return $action +};# olednd::HandleDragEnter + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDragOver +# ---------------------------------------------------------------------------- +proc olednd::HandleDragOver { drop_target pressedkeys rootX rootY } { + set action [::tkdnd::generic::HandlePosition $drop_target {} \ + $pressedkeys $rootX $rootY] + if {$::tkdnd::_auto_update} {update idletasks} + return $action +};# olednd::HandleDragOver + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDragLeave +# ---------------------------------------------------------------------------- +proc olednd::HandleDragLeave { drop_target } { + ::tkdnd::generic::HandleLeave + if {$::tkdnd::_auto_update} {update idletasks} +};# olednd::HandleDragLeave + +# ---------------------------------------------------------------------------- +# Command olednd::HandleDrop +# ---------------------------------------------------------------------------- +proc olednd::HandleDrop { drop_target pressedkeys rootX rootY type data } { + ::tkdnd::generic::SetDroppedData [normalise_data $type $data] + set action [::tkdnd::generic::HandleDrop $drop_target {} \ + $pressedkeys $rootX $rootY 0] + if {$::tkdnd::_auto_update} {update idletasks} + return $action +};# olednd::HandleDrop + +# ---------------------------------------------------------------------------- +# Command olednd::GetDataType +# ---------------------------------------------------------------------------- +proc olednd::GetDataType { drop_target typelist } { + foreach {drop_target common_drag_source_types common_drop_target_types} \ + [::tkdnd::generic::FindWindowWithCommonTypes $drop_target $typelist] {break} + lindex $common_drag_source_types 0 +};# olednd::GetDataType + +# ---------------------------------------------------------------------------- +# Command olednd::GetDragSourceCommonTypes +# ---------------------------------------------------------------------------- +proc olednd::GetDragSourceCommonTypes { drop_target } { + ::tkdnd::generic::GetDragSourceCommonTypes +};# olednd::GetDragSourceCommonTypes + +# ---------------------------------------------------------------------------- +# Command olednd::platform_specific_types +# ---------------------------------------------------------------------------- +proc olednd::platform_specific_types { types } { + ::tkdnd::generic::platform_specific_types $types +}; # olednd::platform_specific_types + +# ---------------------------------------------------------------------------- +# Command olednd::platform_specific_type +# ---------------------------------------------------------------------------- +proc olednd::platform_specific_type { type } { + ::tkdnd::generic::platform_specific_type $type +}; # olednd::platform_specific_type + +# ---------------------------------------------------------------------------- +# Command tkdnd::platform_independent_types +# ---------------------------------------------------------------------------- +proc ::tkdnd::platform_independent_types { types } { + ::tkdnd::generic::platform_independent_types $types +}; # tkdnd::platform_independent_types + +# ---------------------------------------------------------------------------- +# Command olednd::platform_independent_type +# ---------------------------------------------------------------------------- +proc olednd::platform_independent_type { type } { + ::tkdnd::generic::platform_independent_type $type +}; # olednd::platform_independent_type + +# ---------------------------------------------------------------------------- +# Command olednd::normalise_data +# ---------------------------------------------------------------------------- +proc olednd::normalise_data { type data } { + switch [lindex [::tkdnd::generic::platform_independent_type $type] 0] { + DND_Text {return $data} + DND_Files {return $data} + DND_HTML {return [encoding convertfrom utf-8 $data]} + default {return $data} + } +}; # olednd::normalise_data diff --git a/install_packages.sh b/install_packages.sh new file mode 100755 index 0000000000000000000000000000000000000000..64193999a4b4782a465f8bc0e1505e4729ecd884 --- /dev/null +++ b/install_packages.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +while read package; do + pip install "$package" +done < requirements.txt diff --git a/lib_v5/__pycache__/mdxnet.cpython-310.pyc b/lib_v5/__pycache__/mdxnet.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d6917a6098baf0f593230ec085a375bf889fd18 Binary files /dev/null and b/lib_v5/__pycache__/mdxnet.cpython-310.pyc differ diff --git a/lib_v5/__pycache__/modules.cpython-310.pyc b/lib_v5/__pycache__/modules.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..07e1942f6f62ea294d788560f9be1db52a196977 Binary files /dev/null and b/lib_v5/__pycache__/modules.cpython-310.pyc differ diff --git a/lib_v5/__pycache__/pyrb.cpython-310.pyc b/lib_v5/__pycache__/pyrb.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f27bbdb9d87bbd7d503f4da816dfab89a7b485f8 Binary files /dev/null and b/lib_v5/__pycache__/pyrb.cpython-310.pyc differ diff --git a/lib_v5/__pycache__/spec_utils.cpython-310.pyc b/lib_v5/__pycache__/spec_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..509fb7a3107a8c2a667fd6d30242ef0e0ac4a684 Binary files /dev/null and b/lib_v5/__pycache__/spec_utils.cpython-310.pyc differ diff --git a/lib_v5/__pycache__/tfc_tdf_v3.cpython-310.pyc b/lib_v5/__pycache__/tfc_tdf_v3.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b98f060932336f54db8fb73f0da2d1e6dced59fb Binary files /dev/null and b/lib_v5/__pycache__/tfc_tdf_v3.cpython-310.pyc differ diff --git a/lib_v5/mdxnet.py b/lib_v5/mdxnet.py new file mode 100644 index 0000000000000000000000000000000000000000..3293c89f08067e06cb02656f147987a32214e40d --- /dev/null +++ b/lib_v5/mdxnet.py @@ -0,0 +1,136 @@ +import torch +import torch.nn as nn +from .modules import TFC_TDF +from pytorch_lightning import LightningModule + +dim_s = 4 + +class AbstractMDXNet(LightningModule): + def __init__(self, target_name, lr, optimizer, dim_c, dim_f, dim_t, n_fft, hop_length, overlap): + super().__init__() + self.target_name = target_name + self.lr = lr + self.optimizer = optimizer + self.dim_c = dim_c + self.dim_f = dim_f + self.dim_t = dim_t + self.n_fft = n_fft + self.n_bins = n_fft // 2 + 1 + self.hop_length = hop_length + self.window = nn.Parameter(torch.hann_window(window_length=self.n_fft, periodic=True), requires_grad=False) + self.freq_pad = nn.Parameter(torch.zeros([1, dim_c, self.n_bins - self.dim_f, self.dim_t]), requires_grad=False) + + def get_optimizer(self): + if self.optimizer == 'rmsprop': + return torch.optim.RMSprop(self.parameters(), self.lr) + + if self.optimizer == 'adamw': + return torch.optim.AdamW(self.parameters(), self.lr) + +class ConvTDFNet(AbstractMDXNet): + def __init__(self, target_name, lr, optimizer, dim_c, dim_f, dim_t, n_fft, hop_length, + num_blocks, l, g, k, bn, bias, overlap): + + super(ConvTDFNet, self).__init__( + target_name, lr, optimizer, dim_c, dim_f, dim_t, n_fft, hop_length, overlap) + #self.save_hyperparameters() + + self.num_blocks = num_blocks + self.l = l + self.g = g + self.k = k + self.bn = bn + self.bias = bias + + if optimizer == 'rmsprop': + norm = nn.BatchNorm2d + + if optimizer == 'adamw': + norm = lambda input:nn.GroupNorm(2, input) + + self.n = num_blocks // 2 + scale = (2, 2) + + self.first_conv = nn.Sequential( + nn.Conv2d(in_channels=self.dim_c, out_channels=g, kernel_size=(1, 1)), + norm(g), + nn.ReLU(), + ) + + f = self.dim_f + c = g + self.encoding_blocks = nn.ModuleList() + self.ds = nn.ModuleList() + for i in range(self.n): + self.encoding_blocks.append(TFC_TDF(c, l, f, k, bn, bias=bias, norm=norm)) + self.ds.append( + nn.Sequential( + nn.Conv2d(in_channels=c, out_channels=c + g, kernel_size=scale, stride=scale), + norm(c + g), + nn.ReLU() + ) + ) + f = f // 2 + c += g + + self.bottleneck_block = TFC_TDF(c, l, f, k, bn, bias=bias, norm=norm) + + self.decoding_blocks = nn.ModuleList() + self.us = nn.ModuleList() + for i in range(self.n): + self.us.append( + nn.Sequential( + nn.ConvTranspose2d(in_channels=c, out_channels=c - g, kernel_size=scale, stride=scale), + norm(c - g), + nn.ReLU() + ) + ) + f = f * 2 + c -= g + + self.decoding_blocks.append(TFC_TDF(c, l, f, k, bn, bias=bias, norm=norm)) + + self.final_conv = nn.Sequential( + nn.Conv2d(in_channels=c, out_channels=self.dim_c, kernel_size=(1, 1)), + ) + + def forward(self, x): + + x = self.first_conv(x) + + x = x.transpose(-1, -2) + + ds_outputs = [] + for i in range(self.n): + x = self.encoding_blocks[i](x) + ds_outputs.append(x) + x = self.ds[i](x) + + x = self.bottleneck_block(x) + + for i in range(self.n): + x = self.us[i](x) + x *= ds_outputs[-i - 1] + x = self.decoding_blocks[i](x) + + x = x.transpose(-1, -2) + + x = self.final_conv(x) + + return x + +class Mixer(nn.Module): + def __init__(self, device, mixer_path): + + super(Mixer, self).__init__() + + self.linear = nn.Linear((dim_s+1)*2, dim_s*2, bias=False) + + self.load_state_dict( + torch.load(mixer_path, map_location=device) + ) + + def forward(self, x): + x = x.reshape(1,(dim_s+1)*2,-1).transpose(-1,-2) + x = self.linear(x) + return x.transpose(-1,-2).reshape(dim_s,2,-1) \ No newline at end of file diff --git a/lib_v5/mixer.ckpt b/lib_v5/mixer.ckpt new file mode 100644 index 0000000000000000000000000000000000000000..29b4612c8173cc88b9f9def198933e7a49d4cf92 --- /dev/null +++ b/lib_v5/mixer.ckpt @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ea781bd52c6a523b825fa6cdbb6189f52e318edd8b17e6fe404f76f7af8caa9c +size 1208 diff --git a/lib_v5/modules.py b/lib_v5/modules.py new file mode 100644 index 0000000000000000000000000000000000000000..4e77d2fb5b97c4ca0e6f6011e012f43e03a70b14 --- /dev/null +++ b/lib_v5/modules.py @@ -0,0 +1,74 @@ +import torch +import torch.nn as nn + + +class TFC(nn.Module): + def __init__(self, c, l, k, norm): + super(TFC, self).__init__() + + self.H = nn.ModuleList() + for i in range(l): + self.H.append( + nn.Sequential( + nn.Conv2d(in_channels=c, out_channels=c, kernel_size=k, stride=1, padding=k // 2), + norm(c), + nn.ReLU(), + ) + ) + + def forward(self, x): + for h in self.H: + x = h(x) + return x + + +class DenseTFC(nn.Module): + def __init__(self, c, l, k, norm): + super(DenseTFC, self).__init__() + + self.conv = nn.ModuleList() + for i in range(l): + self.conv.append( + nn.Sequential( + nn.Conv2d(in_channels=c, out_channels=c, kernel_size=k, stride=1, padding=k // 2), + norm(c), + nn.ReLU(), + ) + ) + + def forward(self, x): + for layer in self.conv[:-1]: + x = torch.cat([layer(x), x], 1) + return self.conv[-1](x) + + +class TFC_TDF(nn.Module): + def __init__(self, c, l, f, k, bn, dense=False, bias=True, norm=nn.BatchNorm2d): + + super(TFC_TDF, self).__init__() + + self.use_tdf = bn is not None + + self.tfc = DenseTFC(c, l, k, norm) if dense else TFC(c, l, k, norm) + + if self.use_tdf: + if bn == 0: + self.tdf = nn.Sequential( + nn.Linear(f, f, bias=bias), + norm(c), + nn.ReLU() + ) + else: + self.tdf = nn.Sequential( + nn.Linear(f, f // bn, bias=bias), + norm(c), + nn.ReLU(), + nn.Linear(f // bn, f, bias=bias), + norm(c), + nn.ReLU() + ) + + def forward(self, x): + x = self.tfc(x) + return x + self.tdf(x) if self.use_tdf else x + diff --git a/lib_v5/pyrb.py b/lib_v5/pyrb.py new file mode 100644 index 0000000000000000000000000000000000000000..883a525ee28351b2d99f674dcc721af3f852f87f --- /dev/null +++ b/lib_v5/pyrb.py @@ -0,0 +1,92 @@ +import os +import subprocess +import tempfile +import six +import numpy as np +import soundfile as sf +import sys + +if getattr(sys, 'frozen', False): + BASE_PATH_RUB = sys._MEIPASS +else: + BASE_PATH_RUB = os.path.dirname(os.path.abspath(__file__)) + +__all__ = ['time_stretch', 'pitch_shift'] + +__RUBBERBAND_UTIL = os.path.join(BASE_PATH_RUB, 'rubberband') + +if six.PY2: + DEVNULL = open(os.devnull, 'w') +else: + DEVNULL = subprocess.DEVNULL + +def __rubberband(y, sr, **kwargs): + + assert sr > 0 + + # Get the input and output tempfile + fd, infile = tempfile.mkstemp(suffix='.wav') + os.close(fd) + fd, outfile = tempfile.mkstemp(suffix='.wav') + os.close(fd) + + # dump the audio + sf.write(infile, y, sr) + + try: + # Execute rubberband + arguments = [__RUBBERBAND_UTIL, '-q'] + + for key, value in six.iteritems(kwargs): + arguments.append(str(key)) + arguments.append(str(value)) + + arguments.extend([infile, outfile]) + + subprocess.check_call(arguments, stdout=DEVNULL, stderr=DEVNULL) + + # Load the processed audio. + y_out, _ = sf.read(outfile, always_2d=True) + + # make sure that output dimensions matches input + if y.ndim == 1: + y_out = np.squeeze(y_out) + + except OSError as exc: + six.raise_from(RuntimeError('Failed to execute rubberband. ' + 'Please verify that rubberband-cli ' + 'is installed.'), + exc) + + finally: + # Remove temp files + os.unlink(infile) + os.unlink(outfile) + + return y_out + +def time_stretch(y, sr, rate, rbargs=None): + if rate <= 0: + raise ValueError('rate must be strictly positive') + + if rate == 1.0: + return y + + if rbargs is None: + rbargs = dict() + + rbargs.setdefault('--tempo', rate) + + return __rubberband(y, sr, **rbargs) + +def pitch_shift(y, sr, n_steps, rbargs=None): + + if n_steps == 0: + return y + + if rbargs is None: + rbargs = dict() + + rbargs.setdefault('--pitch', n_steps) + + return __rubberband(y, sr, **rbargs) diff --git a/lib_v5/results.py b/lib_v5/results.py new file mode 100644 index 0000000000000000000000000000000000000000..476f2d1ef420e97b7a8a9d0f1416eb8d99a1fdb2 --- /dev/null +++ b/lib_v5/results.py @@ -0,0 +1,48 @@ +# -*- coding: utf-8 -*- + +""" +Matchering - Audio Matching and Mastering Python Library +Copyright (C) 2016-2022 Sergree + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU General Public License as published by +the Free Software Foundation, either version 3 of the License, or +(at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU General Public License for more details. + +You should have received a copy of the GNU General Public License +along with this program. If not, see . +""" + +import os +import soundfile as sf + + +class Result: + def __init__( + self, file: str, subtype: str, use_limiter: bool = True, normalize: bool = True + ): + _, file_ext = os.path.splitext(file) + file_ext = file_ext[1:].upper() + if not sf.check_format(file_ext): + raise TypeError(f"{file_ext} format is not supported") + if not sf.check_format(file_ext, subtype): + raise TypeError(f"{file_ext} format does not have {subtype} subtype") + self.file = file + self.subtype = subtype + self.use_limiter = use_limiter + self.normalize = normalize + + +def pcm16(file: str) -> Result: + return Result(file, "PCM_16") + +def pcm24(file: str) -> Result: + return Result(file, "FLOAT") + +def save_audiofile(file: str, wav_set="PCM_16") -> Result: + return Result(file, wav_set) diff --git a/lib_v5/spec_utils.py b/lib_v5/spec_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..8ec520d9848053e04f1bfd597a4d62812c205cd3 --- /dev/null +++ b/lib_v5/spec_utils.py @@ -0,0 +1,1241 @@ +import audioread +import librosa +import numpy as np +import soundfile as sf +import math +import platform +import traceback +from . import pyrb +from scipy.signal import correlate, hilbert +import io + +OPERATING_SYSTEM = platform.system() +SYSTEM_ARCH = platform.platform() +SYSTEM_PROC = platform.processor() +ARM = 'arm' + +AUTO_PHASE = "Automatic" +POSITIVE_PHASE = "Positive Phase" +NEGATIVE_PHASE = "Negative Phase" +NONE_P = "None", +LOW_P = "Shifts: Low", +MED_P = "Shifts: Medium", +HIGH_P = "Shifts: High", +VHIGH_P = "Shifts: Very High" +MAXIMUM_P = "Shifts: Maximum" + +progress_value = 0 +last_update_time = 0 +is_macos = False + +if OPERATING_SYSTEM == 'Windows': + from pyrubberband import pyrb +else: + from . import pyrb + +if OPERATING_SYSTEM == 'Darwin': + wav_resolution = "polyphase" if SYSTEM_PROC == ARM or ARM in SYSTEM_ARCH else "sinc_fastest" + wav_resolution_float_resampling = "kaiser_best" if SYSTEM_PROC == ARM or ARM in SYSTEM_ARCH else wav_resolution + is_macos = True +else: + wav_resolution = "sinc_fastest" + wav_resolution_float_resampling = wav_resolution + +MAX_SPEC = 'Max Spec' +MIN_SPEC = 'Min Spec' +LIN_ENSE = 'Linear Ensemble' + +MAX_WAV = MAX_SPEC +MIN_WAV = MIN_SPEC + +AVERAGE = 'Average' + +def crop_center(h1, h2): + h1_shape = h1.size() + h2_shape = h2.size() + + if h1_shape[3] == h2_shape[3]: + return h1 + elif h1_shape[3] < h2_shape[3]: + raise ValueError('h1_shape[3] must be greater than h2_shape[3]') + + s_time = (h1_shape[3] - h2_shape[3]) // 2 + e_time = s_time + h2_shape[3] + h1 = h1[:, :, :, s_time:e_time] + + return h1 + +def preprocess(X_spec): + X_mag = np.abs(X_spec) + X_phase = np.angle(X_spec) + + return X_mag, X_phase + +def make_padding(width, cropsize, offset): + left = offset + roi_size = cropsize - offset * 2 + if roi_size == 0: + roi_size = cropsize + right = roi_size - (width % roi_size) + left + + return left, right, roi_size + +def normalize(wave, is_normalize=False): + """Normalize audio""" + + maxv = np.abs(wave).max() + if maxv > 1.0: + if is_normalize: + print("Above clipping threshold.") + wave /= maxv + + return wave + +def auto_transpose(audio_array:np.ndarray): + """ + Ensure that the audio array is in the (channels, samples) format. + + Parameters: + audio_array (ndarray): Input audio array. + + Returns: + ndarray: Transposed audio array if necessary. + """ + + # If the second dimension is 2 (indicating stereo channels), transpose the array + if audio_array.shape[1] == 2: + return audio_array.T + return audio_array + +def write_array_to_mem(audio_data, subtype): + if isinstance(audio_data, np.ndarray): + audio_buffer = io.BytesIO() + sf.write(audio_buffer, audio_data, 44100, subtype=subtype, format='WAV') + audio_buffer.seek(0) + return audio_buffer + else: + return audio_data + +def spectrogram_to_image(spec, mode='magnitude'): + if mode == 'magnitude': + if np.iscomplexobj(spec): + y = np.abs(spec) + else: + y = spec + y = np.log10(y ** 2 + 1e-8) + elif mode == 'phase': + if np.iscomplexobj(spec): + y = np.angle(spec) + else: + y = spec + + y -= y.min() + y *= 255 / y.max() + img = np.uint8(y) + + if y.ndim == 3: + img = img.transpose(1, 2, 0) + img = np.concatenate([ + np.max(img, axis=2, keepdims=True), img + ], axis=2) + + return img + +def reduce_vocal_aggressively(X, y, softmask): + v = X - y + y_mag_tmp = np.abs(y) + v_mag_tmp = np.abs(v) + + v_mask = v_mag_tmp > y_mag_tmp + y_mag = np.clip(y_mag_tmp - v_mag_tmp * v_mask * softmask, 0, np.inf) + + return y_mag * np.exp(1.j * np.angle(y)) + +def merge_artifacts(y_mask, thres=0.01, min_range=64, fade_size=32): + mask = y_mask + + try: + if min_range < fade_size * 2: + raise ValueError('min_range must be >= fade_size * 2') + + idx = np.where(y_mask.min(axis=(0, 1)) > thres)[0] + start_idx = np.insert(idx[np.where(np.diff(idx) != 1)[0] + 1], 0, idx[0]) + end_idx = np.append(idx[np.where(np.diff(idx) != 1)[0]], idx[-1]) + artifact_idx = np.where(end_idx - start_idx > min_range)[0] + weight = np.zeros_like(y_mask) + if len(artifact_idx) > 0: + start_idx = start_idx[artifact_idx] + end_idx = end_idx[artifact_idx] + old_e = None + for s, e in zip(start_idx, end_idx): + if old_e is not None and s - old_e < fade_size: + s = old_e - fade_size * 2 + + if s != 0: + weight[:, :, s:s + fade_size] = np.linspace(0, 1, fade_size) + else: + s -= fade_size + + if e != y_mask.shape[2]: + weight[:, :, e - fade_size:e] = np.linspace(1, 0, fade_size) + else: + e += fade_size + + weight[:, :, s + fade_size:e - fade_size] = 1 + old_e = e + + v_mask = 1 - y_mask + y_mask += weight * v_mask + + mask = y_mask + except Exception as e: + error_name = f'{type(e).__name__}' + traceback_text = ''.join(traceback.format_tb(e.__traceback__)) + message = f'{error_name}: "{e}"\n{traceback_text}"' + print('Post Process Failed: ', message) + + return mask + +def align_wave_head_and_tail(a, b): + l = min([a[0].size, b[0].size]) + + return a[:l,:l], b[:l,:l] + +def convert_channels(spec, mp, band): + cc = mp.param['band'][band].get('convert_channels') + + if 'mid_side_c' == cc: + spec_left = np.add(spec[0], spec[1] * .25) + spec_right = np.subtract(spec[1], spec[0] * .25) + elif 'mid_side' == cc: + spec_left = np.add(spec[0], spec[1]) / 2 + spec_right = np.subtract(spec[0], spec[1]) + elif 'stereo_n' == cc: + spec_left = np.add(spec[0], spec[1] * .25) / 0.9375 + spec_right = np.add(spec[1], spec[0] * .25) / 0.9375 + else: + return spec + + return np.asfortranarray([spec_left, spec_right]) + +def combine_spectrograms(specs, mp, is_v51_model=False): + l = min([specs[i].shape[2] for i in specs]) + spec_c = np.zeros(shape=(2, mp.param['bins'] + 1, l), dtype=np.complex64) + offset = 0 + bands_n = len(mp.param['band']) + + for d in range(1, bands_n + 1): + h = mp.param['band'][d]['crop_stop'] - mp.param['band'][d]['crop_start'] + spec_c[:, offset:offset+h, :l] = specs[d][:, mp.param['band'][d]['crop_start']:mp.param['band'][d]['crop_stop'], :l] + offset += h + + if offset > mp.param['bins']: + raise ValueError('Too much bins') + + # lowpass fiter + + if mp.param['pre_filter_start'] > 0: + if is_v51_model: + spec_c *= get_lp_filter_mask(spec_c.shape[1], mp.param['pre_filter_start'], mp.param['pre_filter_stop']) + else: + if bands_n == 1: + spec_c = fft_lp_filter(spec_c, mp.param['pre_filter_start'], mp.param['pre_filter_stop']) + else: + gp = 1 + for b in range(mp.param['pre_filter_start'] + 1, mp.param['pre_filter_stop']): + g = math.pow(10, -(b - mp.param['pre_filter_start']) * (3.5 - gp) / 20.0) + gp = g + spec_c[:, b, :] *= g + + return np.asfortranarray(spec_c) + +def wave_to_spectrogram(wave, hop_length, n_fft, mp, band, is_v51_model=False): + + if wave.ndim == 1: + wave = np.asfortranarray([wave,wave]) + + if not is_v51_model: + if mp.param['reverse']: + wave_left = np.flip(np.asfortranarray(wave[0])) + wave_right = np.flip(np.asfortranarray(wave[1])) + elif mp.param['mid_side']: + wave_left = np.asfortranarray(np.add(wave[0], wave[1]) / 2) + wave_right = np.asfortranarray(np.subtract(wave[0], wave[1])) + elif mp.param['mid_side_b2']: + wave_left = np.asfortranarray(np.add(wave[1], wave[0] * .5)) + wave_right = np.asfortranarray(np.subtract(wave[0], wave[1] * .5)) + else: + wave_left = np.asfortranarray(wave[0]) + wave_right = np.asfortranarray(wave[1]) + else: + wave_left = np.asfortranarray(wave[0]) + wave_right = np.asfortranarray(wave[1]) + + spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length) + spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) + + spec = np.asfortranarray([spec_left, spec_right]) + + if is_v51_model: + spec = convert_channels(spec, mp, band) + + return spec + +def spectrogram_to_wave(spec, hop_length=1024, mp={}, band=0, is_v51_model=True): + spec_left = np.asfortranarray(spec[0]) + spec_right = np.asfortranarray(spec[1]) + + wave_left = librosa.istft(spec_left, hop_length=hop_length) + wave_right = librosa.istft(spec_right, hop_length=hop_length) + + if is_v51_model: + cc = mp.param['band'][band].get('convert_channels') + if 'mid_side_c' == cc: + return np.asfortranarray([np.subtract(wave_left / 1.0625, wave_right / 4.25), np.add(wave_right / 1.0625, wave_left / 4.25)]) + elif 'mid_side' == cc: + return np.asfortranarray([np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]) + elif 'stereo_n' == cc: + return np.asfortranarray([np.subtract(wave_left, wave_right * .25), np.subtract(wave_right, wave_left * .25)]) + else: + if mp.param['reverse']: + return np.asfortranarray([np.flip(wave_left), np.flip(wave_right)]) + elif mp.param['mid_side']: + return np.asfortranarray([np.add(wave_left, wave_right / 2), np.subtract(wave_left, wave_right / 2)]) + elif mp.param['mid_side_b2']: + return np.asfortranarray([np.add(wave_right / 1.25, .4 * wave_left), np.subtract(wave_left / 1.25, .4 * wave_right)]) + + return np.asfortranarray([wave_left, wave_right]) + +def cmb_spectrogram_to_wave(spec_m, mp, extra_bins_h=None, extra_bins=None, is_v51_model=False): + bands_n = len(mp.param['band']) + offset = 0 + + for d in range(1, bands_n + 1): + bp = mp.param['band'][d] + spec_s = np.ndarray(shape=(2, bp['n_fft'] // 2 + 1, spec_m.shape[2]), dtype=complex) + h = bp['crop_stop'] - bp['crop_start'] + spec_s[:, bp['crop_start']:bp['crop_stop'], :] = spec_m[:, offset:offset+h, :] + + offset += h + if d == bands_n: # higher + if extra_bins_h: # if --high_end_process bypass + max_bin = bp['n_fft'] // 2 + spec_s[:, max_bin-extra_bins_h:max_bin, :] = extra_bins[:, :extra_bins_h, :] + if bp['hpf_start'] > 0: + if is_v51_model: + spec_s *= get_hp_filter_mask(spec_s.shape[1], bp['hpf_start'], bp['hpf_stop'] - 1) + else: + spec_s = fft_hp_filter(spec_s, bp['hpf_start'], bp['hpf_stop'] - 1) + if bands_n == 1: + wave = spectrogram_to_wave(spec_s, bp['hl'], mp, d, is_v51_model) + else: + wave = np.add(wave, spectrogram_to_wave(spec_s, bp['hl'], mp, d, is_v51_model)) + else: + sr = mp.param['band'][d+1]['sr'] + if d == 1: # lower + if is_v51_model: + spec_s *= get_lp_filter_mask(spec_s.shape[1], bp['lpf_start'], bp['lpf_stop']) + else: + spec_s = fft_lp_filter(spec_s, bp['lpf_start'], bp['lpf_stop']) + wave = librosa.resample(spectrogram_to_wave(spec_s, bp['hl'], mp, d, is_v51_model), bp['sr'], sr, res_type=wav_resolution) + else: # mid + if is_v51_model: + spec_s *= get_hp_filter_mask(spec_s.shape[1], bp['hpf_start'], bp['hpf_stop'] - 1) + spec_s *= get_lp_filter_mask(spec_s.shape[1], bp['lpf_start'], bp['lpf_stop']) + else: + spec_s = fft_hp_filter(spec_s, bp['hpf_start'], bp['hpf_stop'] - 1) + spec_s = fft_lp_filter(spec_s, bp['lpf_start'], bp['lpf_stop']) + + wave2 = np.add(wave, spectrogram_to_wave(spec_s, bp['hl'], mp, d, is_v51_model)) + wave = librosa.resample(wave2, bp['sr'], sr, res_type=wav_resolution) + + return wave + +def get_lp_filter_mask(n_bins, bin_start, bin_stop): + mask = np.concatenate([ + np.ones((bin_start - 1, 1)), + np.linspace(1, 0, bin_stop - bin_start + 1)[:, None], + np.zeros((n_bins - bin_stop, 1)) + ], axis=0) + + return mask + +def get_hp_filter_mask(n_bins, bin_start, bin_stop): + mask = np.concatenate([ + np.zeros((bin_stop + 1, 1)), + np.linspace(0, 1, 1 + bin_start - bin_stop)[:, None], + np.ones((n_bins - bin_start - 2, 1)) + ], axis=0) + + return mask + +def fft_lp_filter(spec, bin_start, bin_stop): + g = 1.0 + for b in range(bin_start, bin_stop): + g -= 1 / (bin_stop - bin_start) + spec[:, b, :] = g * spec[:, b, :] + + spec[:, bin_stop:, :] *= 0 + + return spec + +def fft_hp_filter(spec, bin_start, bin_stop): + g = 1.0 + for b in range(bin_start, bin_stop, -1): + g -= 1 / (bin_start - bin_stop) + spec[:, b, :] = g * spec[:, b, :] + + spec[:, 0:bin_stop+1, :] *= 0 + + return spec + +def spectrogram_to_wave_old(spec, hop_length=1024): + if spec.ndim == 2: + wave = librosa.istft(spec, hop_length=hop_length) + elif spec.ndim == 3: + spec_left = np.asfortranarray(spec[0]) + spec_right = np.asfortranarray(spec[1]) + + wave_left = librosa.istft(spec_left, hop_length=hop_length) + wave_right = librosa.istft(spec_right, hop_length=hop_length) + wave = np.asfortranarray([wave_left, wave_right]) + + return wave + +def wave_to_spectrogram_old(wave, hop_length, n_fft): + wave_left = np.asfortranarray(wave[0]) + wave_right = np.asfortranarray(wave[1]) + + spec_left = librosa.stft(wave_left, n_fft, hop_length=hop_length) + spec_right = librosa.stft(wave_right, n_fft, hop_length=hop_length) + + spec = np.asfortranarray([spec_left, spec_right]) + + return spec + +def mirroring(a, spec_m, input_high_end, mp): + if 'mirroring' == a: + mirror = np.flip(np.abs(spec_m[:, mp.param['pre_filter_start']-10-input_high_end.shape[1]:mp.param['pre_filter_start']-10, :]), 1) + mirror = mirror * np.exp(1.j * np.angle(input_high_end)) + + return np.where(np.abs(input_high_end) <= np.abs(mirror), input_high_end, mirror) + + if 'mirroring2' == a: + mirror = np.flip(np.abs(spec_m[:, mp.param['pre_filter_start']-10-input_high_end.shape[1]:mp.param['pre_filter_start']-10, :]), 1) + mi = np.multiply(mirror, input_high_end * 1.7) + + return np.where(np.abs(input_high_end) <= np.abs(mi), input_high_end, mi) + +def adjust_aggr(mask, is_non_accom_stem, aggressiveness): + aggr = aggressiveness['value'] * 2 + + if aggr != 0: + if is_non_accom_stem: + aggr = 1 - aggr + + aggr = [aggr, aggr] + + if aggressiveness['aggr_correction'] is not None: + aggr[0] += aggressiveness['aggr_correction']['left'] + aggr[1] += aggressiveness['aggr_correction']['right'] + + for ch in range(2): + mask[ch, :aggressiveness['split_bin']] = np.power(mask[ch, :aggressiveness['split_bin']], 1 + aggr[ch] / 3) + mask[ch, aggressiveness['split_bin']:] = np.power(mask[ch, aggressiveness['split_bin']:], 1 + aggr[ch]) + + return mask + +def stft(wave, nfft, hl): + wave_left = np.asfortranarray(wave[0]) + wave_right = np.asfortranarray(wave[1]) + spec_left = librosa.stft(wave_left, nfft, hop_length=hl) + spec_right = librosa.stft(wave_right, nfft, hop_length=hl) + spec = np.asfortranarray([spec_left, spec_right]) + + return spec + +def istft(spec, hl): + spec_left = np.asfortranarray(spec[0]) + spec_right = np.asfortranarray(spec[1]) + wave_left = librosa.istft(spec_left, hop_length=hl) + wave_right = librosa.istft(spec_right, hop_length=hl) + wave = np.asfortranarray([wave_left, wave_right]) + + return wave + +def spec_effects(wave, algorithm='Default', value=None): + spec = [stft(wave[0],2048,1024), stft(wave[1],2048,1024)] + if algorithm == 'Min_Mag': + v_spec_m = np.where(np.abs(spec[1]) <= np.abs(spec[0]), spec[1], spec[0]) + wave = istft(v_spec_m,1024) + elif algorithm == 'Max_Mag': + v_spec_m = np.where(np.abs(spec[1]) >= np.abs(spec[0]), spec[1], spec[0]) + wave = istft(v_spec_m,1024) + elif algorithm == 'Default': + wave = (wave[1] * value) + (wave[0] * (1-value)) + elif algorithm == 'Invert_p': + X_mag = np.abs(spec[0]) + y_mag = np.abs(spec[1]) + max_mag = np.where(X_mag >= y_mag, X_mag, y_mag) + v_spec = spec[1] - max_mag * np.exp(1.j * np.angle(spec[0])) + wave = istft(v_spec,1024) + + return wave + +def spectrogram_to_wave_no_mp(spec, n_fft=2048, hop_length=1024): + wave = librosa.istft(spec, n_fft=n_fft, hop_length=hop_length) + + if wave.ndim == 1: + wave = np.asfortranarray([wave,wave]) + + return wave + +def wave_to_spectrogram_no_mp(wave): + + spec = librosa.stft(wave, n_fft=2048, hop_length=1024) + + if spec.ndim == 1: + spec = np.asfortranarray([spec,spec]) + + return spec + +def invert_audio(specs, invert_p=True): + + ln = min([specs[0].shape[2], specs[1].shape[2]]) + specs[0] = specs[0][:,:,:ln] + specs[1] = specs[1][:,:,:ln] + + if invert_p: + X_mag = np.abs(specs[0]) + y_mag = np.abs(specs[1]) + max_mag = np.where(X_mag >= y_mag, X_mag, y_mag) + v_spec = specs[1] - max_mag * np.exp(1.j * np.angle(specs[0])) + else: + specs[1] = reduce_vocal_aggressively(specs[0], specs[1], 0.2) + v_spec = specs[0] - specs[1] + + return v_spec + +def invert_stem(mixture, stem): + mixture = wave_to_spectrogram_no_mp(mixture) + stem = wave_to_spectrogram_no_mp(stem) + output = spectrogram_to_wave_no_mp(invert_audio([mixture, stem])) + + return -output.T + +def ensembling(a, inputs, is_wavs=False): + + for i in range(1, len(inputs)): + if i == 1: + input = inputs[0] + + if is_wavs: + ln = min([input.shape[1], inputs[i].shape[1]]) + input = input[:,:ln] + inputs[i] = inputs[i][:,:ln] + else: + ln = min([input.shape[2], inputs[i].shape[2]]) + input = input[:,:,:ln] + inputs[i] = inputs[i][:,:,:ln] + + if MIN_SPEC == a: + input = np.where(np.abs(inputs[i]) <= np.abs(input), inputs[i], input) + if MAX_SPEC == a: + input = np.where(np.abs(inputs[i]) >= np.abs(input), inputs[i], input) + + #linear_ensemble + #input = ensemble_wav(inputs, split_size=1) + + return input + +def ensemble_for_align(waves): + + specs = [] + + for wav in waves: + spec = wave_to_spectrogram_no_mp(wav.T) + specs.append(spec) + + wav_aligned = spectrogram_to_wave_no_mp(ensembling(MIN_SPEC, specs)).T + wav_aligned = match_array_shapes(wav_aligned, waves[1], is_swap=True) + + return wav_aligned + +def ensemble_inputs(audio_input, algorithm, is_normalization, wav_type_set, save_path, is_wave=False, is_array=False): + + wavs_ = [] + + if algorithm == AVERAGE: + output = average_audio(audio_input) + samplerate = 44100 + else: + specs = [] + + for i in range(len(audio_input)): + wave, samplerate = librosa.load(audio_input[i], mono=False, sr=44100) + wavs_.append(wave) + spec = wave if is_wave else wave_to_spectrogram_no_mp(wave) + specs.append(spec) + + wave_shapes = [w.shape[1] for w in wavs_] + target_shape = wavs_[wave_shapes.index(max(wave_shapes))] + + if is_wave: + output = ensembling(algorithm, specs, is_wavs=True) + else: + output = spectrogram_to_wave_no_mp(ensembling(algorithm, specs)) + + output = to_shape(output, target_shape.shape) + + sf.write(save_path, normalize(output.T, is_normalization), samplerate, subtype=wav_type_set) + +def to_shape(x, target_shape): + padding_list = [] + for x_dim, target_dim in zip(x.shape, target_shape): + pad_value = (target_dim - x_dim) + pad_tuple = ((0, pad_value)) + padding_list.append(pad_tuple) + + return np.pad(x, tuple(padding_list), mode='constant') + +def to_shape_minimize(x: np.ndarray, target_shape): + + padding_list = [] + for x_dim, target_dim in zip(x.shape, target_shape): + pad_value = (target_dim - x_dim) + pad_tuple = ((0, pad_value)) + padding_list.append(pad_tuple) + + return np.pad(x, tuple(padding_list), mode='constant') + +def detect_leading_silence(audio, sr, silence_threshold=0.007, frame_length=1024): + """ + Detect silence at the beginning of an audio signal. + + :param audio: np.array, audio signal + :param sr: int, sample rate + :param silence_threshold: float, magnitude threshold below which is considered silence + :param frame_length: int, the number of samples to consider for each check + + :return: float, duration of the leading silence in milliseconds + """ + + if len(audio.shape) == 2: + # If stereo, pick the channel with more energy to determine the silence + channel = np.argmax(np.sum(np.abs(audio), axis=1)) + audio = audio[channel] + + for i in range(0, len(audio), frame_length): + if np.max(np.abs(audio[i:i+frame_length])) > silence_threshold: + return (i / sr) * 1000 + + return (len(audio) / sr) * 1000 + +def adjust_leading_silence(target_audio, reference_audio, silence_threshold=0.01, frame_length=1024): + """ + Adjust the leading silence of the target_audio to match the leading silence of the reference_audio. + + :param target_audio: np.array, audio signal that will have its silence adjusted + :param reference_audio: np.array, audio signal used as a reference + :param sr: int, sample rate + :param silence_threshold: float, magnitude threshold below which is considered silence + :param frame_length: int, the number of samples to consider for each check + + :return: np.array, target_audio adjusted to have the same leading silence as reference_audio + """ + + def find_silence_end(audio): + if len(audio.shape) == 2: + # If stereo, pick the channel with more energy to determine the silence + channel = np.argmax(np.sum(np.abs(audio), axis=1)) + audio_mono = audio[channel] + else: + audio_mono = audio + + for i in range(0, len(audio_mono), frame_length): + if np.max(np.abs(audio_mono[i:i+frame_length])) > silence_threshold: + return i + return len(audio_mono) + + ref_silence_end = find_silence_end(reference_audio) + target_silence_end = find_silence_end(target_audio) + silence_difference = ref_silence_end - target_silence_end + + try: + ref_silence_end_p = (ref_silence_end / 44100) * 1000 + target_silence_end_p = (target_silence_end / 44100) * 1000 + silence_difference_p = ref_silence_end_p - target_silence_end_p + print("silence_difference: ", silence_difference_p) + except Exception as e: + pass + + if silence_difference > 0: # Add silence to target_audio + if len(target_audio.shape) == 2: # stereo + silence_to_add = np.zeros((target_audio.shape[0], silence_difference)) + else: # mono + silence_to_add = np.zeros(silence_difference) + return np.hstack((silence_to_add, target_audio)) + elif silence_difference < 0: # Remove silence from target_audio + if len(target_audio.shape) == 2: # stereo + return target_audio[:, -silence_difference:] + else: # mono + return target_audio[-silence_difference:] + else: # No adjustment needed + return target_audio + +def match_array_shapes(array_1:np.ndarray, array_2:np.ndarray, is_swap=False): + + if is_swap: + array_1, array_2 = array_1.T, array_2.T + + #print("before", array_1.shape, array_2.shape) + if array_1.shape[1] > array_2.shape[1]: + array_1 = array_1[:,:array_2.shape[1]] + elif array_1.shape[1] < array_2.shape[1]: + padding = array_2.shape[1] - array_1.shape[1] + array_1 = np.pad(array_1, ((0,0), (0,padding)), 'constant', constant_values=0) + + #print("after", array_1.shape, array_2.shape) + + if is_swap: + array_1, array_2 = array_1.T, array_2.T + + return array_1 + +def match_mono_array_shapes(array_1: np.ndarray, array_2: np.ndarray): + + if len(array_1) > len(array_2): + array_1 = array_1[:len(array_2)] + elif len(array_1) < len(array_2): + padding = len(array_2) - len(array_1) + array_1 = np.pad(array_1, (0, padding), 'constant', constant_values=0) + + return array_1 + +def change_pitch_semitones(y, sr, semitone_shift): + factor = 2 ** (semitone_shift / 12) # Convert semitone shift to factor for resampling + y_pitch_tuned = [] + for y_channel in y: + y_pitch_tuned.append(librosa.resample(y_channel, sr, sr*factor, res_type=wav_resolution_float_resampling)) + y_pitch_tuned = np.array(y_pitch_tuned) + new_sr = sr * factor + return y_pitch_tuned, new_sr + +def augment_audio(export_path, audio_file, rate, is_normalization, wav_type_set, save_format=None, is_pitch=False, is_time_correction=True): + + wav, sr = librosa.load(audio_file, sr=44100, mono=False) + + if wav.ndim == 1: + wav = np.asfortranarray([wav,wav]) + + if not is_time_correction: + wav_mix = change_pitch_semitones(wav, 44100, semitone_shift=-rate)[0] + else: + if is_pitch: + wav_1 = pyrb.pitch_shift(wav[0], sr, rate, rbargs=None) + wav_2 = pyrb.pitch_shift(wav[1], sr, rate, rbargs=None) + else: + wav_1 = pyrb.time_stretch(wav[0], sr, rate, rbargs=None) + wav_2 = pyrb.time_stretch(wav[1], sr, rate, rbargs=None) + + if wav_1.shape > wav_2.shape: + wav_2 = to_shape(wav_2, wav_1.shape) + if wav_1.shape < wav_2.shape: + wav_1 = to_shape(wav_1, wav_2.shape) + + wav_mix = np.asfortranarray([wav_1, wav_2]) + + sf.write(export_path, normalize(wav_mix.T, is_normalization), sr, subtype=wav_type_set) + save_format(export_path) + +def average_audio(audio): + + waves = [] + wave_shapes = [] + final_waves = [] + + for i in range(len(audio)): + wave = librosa.load(audio[i], sr=44100, mono=False) + waves.append(wave[0]) + wave_shapes.append(wave[0].shape[1]) + + wave_shapes_index = wave_shapes.index(max(wave_shapes)) + target_shape = waves[wave_shapes_index] + waves.pop(wave_shapes_index) + final_waves.append(target_shape) + + for n_array in waves: + wav_target = to_shape(n_array, target_shape.shape) + final_waves.append(wav_target) + + waves = sum(final_waves) + waves = waves/len(audio) + + return waves + +def average_dual_sources(wav_1, wav_2, value): + + if wav_1.shape > wav_2.shape: + wav_2 = to_shape(wav_2, wav_1.shape) + if wav_1.shape < wav_2.shape: + wav_1 = to_shape(wav_1, wav_2.shape) + + wave = (wav_1 * value) + (wav_2 * (1-value)) + + return wave + +def reshape_sources(wav_1: np.ndarray, wav_2: np.ndarray): + + if wav_1.shape > wav_2.shape: + wav_2 = to_shape(wav_2, wav_1.shape) + if wav_1.shape < wav_2.shape: + ln = min([wav_1.shape[1], wav_2.shape[1]]) + wav_2 = wav_2[:,:ln] + + ln = min([wav_1.shape[1], wav_2.shape[1]]) + wav_1 = wav_1[:,:ln] + wav_2 = wav_2[:,:ln] + + return wav_2 + +def reshape_sources_ref(wav_1_shape, wav_2: np.ndarray): + + if wav_1_shape > wav_2.shape: + wav_2 = to_shape(wav_2, wav_1_shape) + + return wav_2 + +def combine_arrarys(audio_sources, is_swap=False): + source = np.zeros_like(max(audio_sources, key=np.size)) + + for v in audio_sources: + v = match_array_shapes(v, source, is_swap=is_swap) + source += v + + return source + +def combine_audio(paths: list, audio_file_base=None, wav_type_set='FLOAT', save_format=None): + + source = combine_arrarys([load_audio(i) for i in paths]) + save_path = f"{audio_file_base}_combined.wav" + sf.write(save_path, source.T, 44100, subtype=wav_type_set) + save_format(save_path) + +def reduce_mix_bv(inst_source, voc_source, reduction_rate=0.9): + # Reduce the volume + inst_source = inst_source * (1 - reduction_rate) + + mix_reduced = combine_arrarys([inst_source, voc_source], is_swap=True) + + return mix_reduced + +def organize_inputs(inputs): + input_list = { + "target":None, + "reference":None, + "reverb":None, + "inst":None + } + + for i in inputs: + if i.endswith("_(Vocals).wav"): + input_list["reference"] = i + elif "_RVC_" in i: + input_list["target"] = i + elif i.endswith("reverbed_stem.wav"): + input_list["reverb"] = i + elif i.endswith("_(Instrumental).wav"): + input_list["inst"] = i + + return input_list + +def check_if_phase_inverted(wav1, wav2, is_mono=False): + # Load the audio files + if not is_mono: + wav1 = np.mean(wav1, axis=0) + wav2 = np.mean(wav2, axis=0) + + # Compute the correlation + correlation = np.corrcoef(wav1[:1000], wav2[:1000]) + + return correlation[0,1] < 0 + +def align_audio(file1, + file2, + file2_aligned, + file_subtracted, + wav_type_set, + is_save_aligned, + command_Text, + save_format, + align_window:list, + align_intro_val:list, + db_analysis:tuple, + set_progress_bar, + phase_option, + phase_shifts, + is_match_silence, + is_spec_match): + + global progress_value + progress_value = 0 + is_mono = False + + def get_diff(a, b): + corr = np.correlate(a, b, "full") + diff = corr.argmax() - (b.shape[0] - 1) + + return diff + + def progress_bar(length): + global progress_value + progress_value += 1 + + if (0.90/length*progress_value) >= 0.9: + length = progress_value + 1 + + set_progress_bar(0.1, (0.9/length*progress_value)) + + # read tracks + + if file1.endswith(".mp3") and is_macos: + length1 = rerun_mp3(file1) + wav1, sr1 = librosa.load(file1, duration=length1, sr=44100, mono=False) + else: + wav1, sr1 = librosa.load(file1, sr=44100, mono=False) + + if file2.endswith(".mp3") and is_macos: + length2 = rerun_mp3(file2) + wav2, sr2 = librosa.load(file2, duration=length2, sr=44100, mono=False) + else: + wav2, sr2 = librosa.load(file2, sr=44100, mono=False) + + if wav1.ndim == 1 and wav2.ndim == 1: + is_mono = True + elif wav1.ndim == 1: + wav1 = np.asfortranarray([wav1,wav1]) + elif wav2.ndim == 1: + wav2 = np.asfortranarray([wav2,wav2]) + + # Check if phase is inverted + if phase_option == AUTO_PHASE: + if check_if_phase_inverted(wav1, wav2, is_mono=is_mono): + wav2 = -wav2 + elif phase_option == POSITIVE_PHASE: + wav2 = +wav2 + elif phase_option == NEGATIVE_PHASE: + wav2 = -wav2 + + if is_match_silence: + wav2 = adjust_leading_silence(wav2, wav1) + + wav1_length = int(librosa.get_duration(y=wav1, sr=44100)) + wav2_length = int(librosa.get_duration(y=wav2, sr=44100)) + + if not is_mono: + wav1 = wav1.transpose() + wav2 = wav2.transpose() + + wav2_org = wav2.copy() + + command_Text("Processing files... \n") + seconds_length = min(wav1_length, wav2_length) + + wav2_aligned_sources = [] + + for sec_len in align_intro_val: + # pick a position at 1 second in and get diff + sec_seg = 1 if sec_len == 1 else int(seconds_length // sec_len) + index = sr1*sec_seg # 1 second in, assuming sr1 = sr2 = 44100 + + if is_mono: + samp1, samp2 = wav1[index : index + sr1], wav2[index : index + sr1] + diff = get_diff(samp1, samp2) + #print(f"Estimated difference: {diff}\n") + else: + index = sr1*sec_seg # 1 second in, assuming sr1 = sr2 = 44100 + samp1, samp2 = wav1[index : index + sr1, 0], wav2[index : index + sr1, 0] + samp1_r, samp2_r = wav1[index : index + sr1, 1], wav2[index : index + sr1, 1] + diff, diff_r = get_diff(samp1, samp2), get_diff(samp1_r, samp2_r) + #print(f"Estimated difference Left Channel: {diff}\nEstimated difference Right Channel: {diff_r}\n") + + # make aligned track 2 + if diff > 0: + zeros_to_append = np.zeros(diff) if is_mono else np.zeros((diff, 2)) + wav2_aligned = np.append(zeros_to_append, wav2_org, axis=0) + elif diff < 0: + wav2_aligned = wav2_org[-diff:] + else: + wav2_aligned = wav2_org + #command_Text(f"Audio files already aligned.\n") + + if not any(np.array_equal(wav2_aligned, source) for source in wav2_aligned_sources): + wav2_aligned_sources.append(wav2_aligned) + + #print("Unique Sources: ", len(wav2_aligned_sources)) + + unique_sources = len(wav2_aligned_sources) + + sub_mapper_big_mapper = {} + + for s in wav2_aligned_sources: + wav2_aligned = match_mono_array_shapes(s, wav1) if is_mono else match_array_shapes(s, wav1, is_swap=True) + + if align_window: + wav_sub = time_correction(wav1, wav2_aligned, seconds_length, align_window=align_window, db_analysis=db_analysis, progress_bar=progress_bar, unique_sources=unique_sources, phase_shifts=phase_shifts) + wav_sub_size = np.abs(wav_sub).mean() + sub_mapper_big_mapper = {**sub_mapper_big_mapper, **{wav_sub_size:wav_sub}} + else: + wav2_aligned = wav2_aligned * np.power(10, db_analysis[0] / 20) + db_range = db_analysis[1] + + for db_adjustment in db_range: + # Adjust the dB of track2 + s_adjusted = wav2_aligned * (10 ** (db_adjustment / 20)) + wav_sub = wav1 - s_adjusted + wav_sub_size = np.abs(wav_sub).mean() + sub_mapper_big_mapper = {**sub_mapper_big_mapper, **{wav_sub_size:wav_sub}} + + #print(sub_mapper_big_mapper.keys(), min(sub_mapper_big_mapper.keys())) + + sub_mapper_value_list = list(sub_mapper_big_mapper.values()) + + if is_spec_match and len(sub_mapper_value_list) >= 2: + #print("using spec ensemble with align") + wav_sub = ensemble_for_align(list(sub_mapper_big_mapper.values())) + else: + #print("using linear ensemble with align") + wav_sub = ensemble_wav(list(sub_mapper_big_mapper.values())) + + #print(f"Mix Mean: {np.abs(wav1).mean()}\nInst Mean: {np.abs(wav2).mean()}") + #print('Final: ', np.abs(wav_sub).mean()) + wav_sub = np.clip(wav_sub, -1, +1) + + command_Text(f"Saving inverted track... ") + + if is_save_aligned or is_spec_match: + wav1 = match_mono_array_shapes(wav1, wav_sub) if is_mono else match_array_shapes(wav1, wav_sub, is_swap=True) + wav2_aligned = wav1 - wav_sub + + if is_spec_match: + if wav1.ndim == 1 and wav2.ndim == 1: + wav2_aligned = np.asfortranarray([wav2_aligned, wav2_aligned]).T + wav1 = np.asfortranarray([wav1, wav1]).T + + wav2_aligned = ensemble_for_align([wav2_aligned, wav1]) + wav_sub = wav1 - wav2_aligned + + if is_save_aligned: + sf.write(file2_aligned, wav2_aligned, sr1, subtype=wav_type_set) + save_format(file2_aligned) + + sf.write(file_subtracted, wav_sub, sr1, subtype=wav_type_set) + save_format(file_subtracted) + +def phase_shift_hilbert(signal, degree): + analytic_signal = hilbert(signal) + return np.cos(np.radians(degree)) * analytic_signal.real - np.sin(np.radians(degree)) * analytic_signal.imag + +def get_phase_shifted_tracks(track, phase_shift): + if phase_shift == 180: + return [track, -track] + + step = phase_shift + end = 180 - (180 % step) if 180 % step == 0 else 181 + phase_range = range(step, end, step) + + flipped_list = [track, -track] + for i in phase_range: + flipped_list.extend([phase_shift_hilbert(track, i), phase_shift_hilbert(track, -i)]) + + return flipped_list + +def time_correction(mix:np.ndarray, instrumental:np.ndarray, seconds_length, align_window, db_analysis, sr=44100, progress_bar=None, unique_sources=None, phase_shifts=NONE_P): + # Function to align two tracks using cross-correlation + + def align_tracks(track1, track2): + # A dictionary to store each version of track2_shifted and its mean absolute value + shifted_tracks = {} + + # Loop to adjust dB of track2 + track2 = track2 * np.power(10, db_analysis[0] / 20) + db_range = db_analysis[1] + + if phase_shifts == 190: + track2_flipped = [track2] + else: + track2_flipped = get_phase_shifted_tracks(track2, phase_shifts) + + for db_adjustment in db_range: + for t in track2_flipped: + # Adjust the dB of track2 + track2_adjusted = t * (10 ** (db_adjustment / 20)) + corr = correlate(track1, track2_adjusted) + delay = np.argmax(np.abs(corr)) - (len(track1) - 1) + track2_shifted = np.roll(track2_adjusted, shift=delay) + + # Compute the mean absolute value of track2_shifted + track2_shifted_sub = track1 - track2_shifted + mean_abs_value = np.abs(track2_shifted_sub).mean() + + # Store track2_shifted and its mean absolute value in the dictionary + shifted_tracks[mean_abs_value] = track2_shifted + + # Return the version of track2_shifted with the smallest mean absolute value + + return shifted_tracks[min(shifted_tracks.keys())] + + # Make sure the audio files have the same shape + + assert mix.shape == instrumental.shape, f"Audio files must have the same shape - Mix: {mix.shape}, Inst: {instrumental.shape}" + + seconds_length = seconds_length // 2 + + sub_mapper = {} + + progress_update_interval = 120 + total_iterations = 0 + + if len(align_window) > 2: + progress_update_interval = 320 + + for secs in align_window: + step = secs / 2 + window_size = int(sr * secs) + step_size = int(sr * step) + + if len(mix.shape) == 1: + total_mono = (len(range(0, len(mix) - window_size, step_size))//progress_update_interval)*unique_sources + total_iterations += total_mono + else: + total_stereo_ = len(range(0, len(mix[:, 0]) - window_size, step_size))*2 + total_stereo = (total_stereo_//progress_update_interval) * unique_sources + total_iterations += total_stereo + + #print(total_iterations) + + for secs in align_window: + sub = np.zeros_like(mix) + divider = np.zeros_like(mix) + step = secs / 2 + window_size = int(sr * secs) + step_size = int(sr * step) + window = np.hanning(window_size) + + # For the mono case: + if len(mix.shape) == 1: + # The files are mono + counter = 0 + for i in range(0, len(mix) - window_size, step_size): + counter += 1 + if counter % progress_update_interval == 0: + progress_bar(total_iterations) + window_mix = mix[i:i+window_size] * window + window_instrumental = instrumental[i:i+window_size] * window + window_instrumental_aligned = align_tracks(window_mix, window_instrumental) + sub[i:i+window_size] += window_mix - window_instrumental_aligned + divider[i:i+window_size] += window + else: + # The files are stereo + counter = 0 + for ch in range(mix.shape[1]): + for i in range(0, len(mix[:, ch]) - window_size, step_size): + counter += 1 + if counter % progress_update_interval == 0: + progress_bar(total_iterations) + window_mix = mix[i:i+window_size, ch] * window + window_instrumental = instrumental[i:i+window_size, ch] * window + window_instrumental_aligned = align_tracks(window_mix, window_instrumental) + sub[i:i+window_size, ch] += window_mix - window_instrumental_aligned + divider[i:i+window_size, ch] += window + + # Normalize the result by the overlap count + sub = np.where(divider > 1e-6, sub / divider, sub) + sub_size = np.abs(sub).mean() + sub_mapper = {**sub_mapper, **{sub_size: sub}} + + #print("SUB_LEN", len(list(sub_mapper.values()))) + + sub = ensemble_wav(list(sub_mapper.values()), split_size=12) + + return sub + +def ensemble_wav(waveforms, split_size=240): + # Create a dictionary to hold the thirds of each waveform and their mean absolute values + waveform_thirds = {i: np.array_split(waveform, split_size) for i, waveform in enumerate(waveforms)} + + # Initialize the final waveform + final_waveform = [] + + # For chunk + for third_idx in range(split_size): + # Compute the mean absolute value of each third from each waveform + means = [np.abs(waveform_thirds[i][third_idx]).mean() for i in range(len(waveforms))] + + # Find the index of the waveform with the lowest mean absolute value for this third + min_index = np.argmin(means) + + # Add the least noisy third to the final waveform + final_waveform.append(waveform_thirds[min_index][third_idx]) + + # Concatenate all the thirds to create the final waveform + final_waveform = np.concatenate(final_waveform) + + return final_waveform + +def ensemble_wav_min(waveforms): + for i in range(1, len(waveforms)): + if i == 1: + wave = waveforms[0] + + ln = min(len(wave), len(waveforms[i])) + wave = wave[:ln] + waveforms[i] = waveforms[i][:ln] + + wave = np.where(np.abs(waveforms[i]) <= np.abs(wave), waveforms[i], wave) + + return wave + +def align_audio_test(wav1, wav2, sr1=44100): + def get_diff(a, b): + corr = np.correlate(a, b, "full") + diff = corr.argmax() - (b.shape[0] - 1) + return diff + + # read tracks + wav1 = wav1.transpose() + wav2 = wav2.transpose() + + #print(f"Audio file shapes: {wav1.shape} / {wav2.shape}\n") + + wav2_org = wav2.copy() + + # pick a position at 1 second in and get diff + index = sr1#*seconds_length # 1 second in, assuming sr1 = sr2 = 44100 + samp1 = wav1[index : index + sr1, 0] # currently use left channel + samp2 = wav2[index : index + sr1, 0] + diff = get_diff(samp1, samp2) + + # make aligned track 2 + if diff > 0: + wav2_aligned = np.append(np.zeros((diff, 1)), wav2_org, axis=0) + elif diff < 0: + wav2_aligned = wav2_org[-diff:] + else: + wav2_aligned = wav2_org + + return wav2_aligned + +def load_audio(audio_file): + wav, sr = librosa.load(audio_file, sr=44100, mono=False) + + if wav.ndim == 1: + wav = np.asfortranarray([wav,wav]) + + return wav + +def rerun_mp3(audio_file): + with audioread.audio_open(audio_file) as f: + track_length = int(f.duration) + + return track_length diff --git a/lib_v5/tfc_tdf_v3.py b/lib_v5/tfc_tdf_v3.py new file mode 100644 index 0000000000000000000000000000000000000000..eba006c98249c1730c50fae2ac8dcd42a9be3453 --- /dev/null +++ b/lib_v5/tfc_tdf_v3.py @@ -0,0 +1,253 @@ +import torch +import torch.nn as nn +from functools import partial + +class STFT: + def __init__(self, n_fft, hop_length, dim_f, device): + self.n_fft = n_fft + self.hop_length = hop_length + self.window = torch.hann_window(window_length=self.n_fft, periodic=True) + self.dim_f = dim_f + self.device = device + + def __call__(self, x): + + x_is_mps = not x.device.type in ["cuda", "cpu"] + if x_is_mps: + x = x.cpu() + + window = self.window.to(x.device) + batch_dims = x.shape[:-2] + c, t = x.shape[-2:] + x = x.reshape([-1, t]) + x = torch.stft(x, n_fft=self.n_fft, hop_length=self.hop_length, window=window, center=True,return_complex=False) + x = x.permute([0, 3, 1, 2]) + x = x.reshape([*batch_dims, c, 2, -1, x.shape[-1]]).reshape([*batch_dims, c * 2, -1, x.shape[-1]]) + + if x_is_mps: + x = x.to(self.device) + + return x[..., :self.dim_f, :] + + def inverse(self, x): + + x_is_mps = not x.device.type in ["cuda", "cpu"] + if x_is_mps: + x = x.cpu() + + window = self.window.to(x.device) + batch_dims = x.shape[:-3] + c, f, t = x.shape[-3:] + n = self.n_fft // 2 + 1 + f_pad = torch.zeros([*batch_dims, c, n - f, t]).to(x.device) + x = torch.cat([x, f_pad], -2) + x = x.reshape([*batch_dims, c // 2, 2, n, t]).reshape([-1, 2, n, t]) + x = x.permute([0, 2, 3, 1]) + x = x[..., 0] + x[..., 1] * 1.j + x = torch.istft(x, n_fft=self.n_fft, hop_length=self.hop_length, window=window, center=True) + x = x.reshape([*batch_dims, 2, -1]) + + if x_is_mps: + x = x.to(self.device) + + return x + +def get_norm(norm_type): + def norm(c, norm_type): + if norm_type == 'BatchNorm': + return nn.BatchNorm2d(c) + elif norm_type == 'InstanceNorm': + return nn.InstanceNorm2d(c, affine=True) + elif 'GroupNorm' in norm_type: + g = int(norm_type.replace('GroupNorm', '')) + return nn.GroupNorm(num_groups=g, num_channels=c) + else: + return nn.Identity() + + return partial(norm, norm_type=norm_type) + + +def get_act(act_type): + if act_type == 'gelu': + return nn.GELU() + elif act_type == 'relu': + return nn.ReLU() + elif act_type[:3] == 'elu': + alpha = float(act_type.replace('elu', '')) + return nn.ELU(alpha) + else: + raise Exception + + +class Upscale(nn.Module): + def __init__(self, in_c, out_c, scale, norm, act): + super().__init__() + self.conv = nn.Sequential( + norm(in_c), + act, + nn.ConvTranspose2d(in_channels=in_c, out_channels=out_c, kernel_size=scale, stride=scale, bias=False) + ) + + def forward(self, x): + return self.conv(x) + + +class Downscale(nn.Module): + def __init__(self, in_c, out_c, scale, norm, act): + super().__init__() + self.conv = nn.Sequential( + norm(in_c), + act, + nn.Conv2d(in_channels=in_c, out_channels=out_c, kernel_size=scale, stride=scale, bias=False) + ) + + def forward(self, x): + return self.conv(x) + + +class TFC_TDF(nn.Module): + def __init__(self, in_c, c, l, f, bn, norm, act): + super().__init__() + + self.blocks = nn.ModuleList() + for i in range(l): + block = nn.Module() + + block.tfc1 = nn.Sequential( + norm(in_c), + act, + nn.Conv2d(in_c, c, 3, 1, 1, bias=False), + ) + block.tdf = nn.Sequential( + norm(c), + act, + nn.Linear(f, f // bn, bias=False), + norm(c), + act, + nn.Linear(f // bn, f, bias=False), + ) + block.tfc2 = nn.Sequential( + norm(c), + act, + nn.Conv2d(c, c, 3, 1, 1, bias=False), + ) + block.shortcut = nn.Conv2d(in_c, c, 1, 1, 0, bias=False) + + self.blocks.append(block) + in_c = c + + def forward(self, x): + for block in self.blocks: + s = block.shortcut(x) + x = block.tfc1(x) + x = x + block.tdf(x) + x = block.tfc2(x) + x = x + s + return x + + +class TFC_TDF_net(nn.Module): + def __init__(self, config, device): + super().__init__() + self.config = config + self.device = device + + norm = get_norm(norm_type=config.model.norm) + act = get_act(act_type=config.model.act) + + self.num_target_instruments = 1 if config.training.target_instrument else len(config.training.instruments) + self.num_subbands = config.model.num_subbands + + dim_c = self.num_subbands * config.audio.num_channels * 2 + n = config.model.num_scales + scale = config.model.scale + l = config.model.num_blocks_per_scale + c = config.model.num_channels + g = config.model.growth + bn = config.model.bottleneck_factor + f = config.audio.dim_f // self.num_subbands + + self.first_conv = nn.Conv2d(dim_c, c, 1, 1, 0, bias=False) + + self.encoder_blocks = nn.ModuleList() + for i in range(n): + block = nn.Module() + block.tfc_tdf = TFC_TDF(c, c, l, f, bn, norm, act) + block.downscale = Downscale(c, c + g, scale, norm, act) + f = f // scale[1] + c += g + self.encoder_blocks.append(block) + + self.bottleneck_block = TFC_TDF(c, c, l, f, bn, norm, act) + + self.decoder_blocks = nn.ModuleList() + for i in range(n): + block = nn.Module() + block.upscale = Upscale(c, c - g, scale, norm, act) + f = f * scale[1] + c -= g + block.tfc_tdf = TFC_TDF(2 * c, c, l, f, bn, norm, act) + self.decoder_blocks.append(block) + + self.final_conv = nn.Sequential( + nn.Conv2d(c + dim_c, c, 1, 1, 0, bias=False), + act, + nn.Conv2d(c, self.num_target_instruments * dim_c, 1, 1, 0, bias=False) + ) + + self.stft = STFT(config.audio.n_fft, config.audio.hop_length, config.audio.dim_f, self.device) + + def cac2cws(self, x): + k = self.num_subbands + b, c, f, t = x.shape + x = x.reshape(b, c, k, f // k, t) + x = x.reshape(b, c * k, f // k, t) + return x + + def cws2cac(self, x): + k = self.num_subbands + b, c, f, t = x.shape + x = x.reshape(b, c // k, k, f, t) + x = x.reshape(b, c // k, f * k, t) + return x + + def forward(self, x): + + x = self.stft(x) + + mix = x = self.cac2cws(x) + + first_conv_out = x = self.first_conv(x) + + x = x.transpose(-1, -2) + + encoder_outputs = [] + for block in self.encoder_blocks: + x = block.tfc_tdf(x) + encoder_outputs.append(x) + x = block.downscale(x) + + x = self.bottleneck_block(x) + + for block in self.decoder_blocks: + x = block.upscale(x) + x = torch.cat([x, encoder_outputs.pop()], 1) + x = block.tfc_tdf(x) + + x = x.transpose(-1, -2) + + x = x * first_conv_out # reduce artifacts + + x = self.final_conv(torch.cat([mix, x], 1)) + + x = self.cws2cac(x) + + if self.num_target_instruments > 1: + b, c, f, t = x.shape + x = x.reshape(b, self.num_target_instruments, -1, f, t) + + x = self.stft.inverse(x) + + return x + + diff --git a/lib_v5/vr_network/__init__.py b/lib_v5/vr_network/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..361b7086d3bebd02a4eb7b20582f2fc5ab299cf2 --- /dev/null +++ b/lib_v5/vr_network/__init__.py @@ -0,0 +1 @@ +# VR init. diff --git a/lib_v5/vr_network/__pycache__/__init__.cpython-310.pyc b/lib_v5/vr_network/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba8ca2080fe2a9cf6d8811fd7bc58cdbe79e65f1 Binary files /dev/null and b/lib_v5/vr_network/__pycache__/__init__.cpython-310.pyc differ diff --git a/lib_v5/vr_network/__pycache__/layers.cpython-310.pyc b/lib_v5/vr_network/__pycache__/layers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6feb24d77140e4519a1fa230c48b00f89be8881 Binary files /dev/null and b/lib_v5/vr_network/__pycache__/layers.cpython-310.pyc differ diff --git a/lib_v5/vr_network/__pycache__/layers_new.cpython-310.pyc b/lib_v5/vr_network/__pycache__/layers_new.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1ec09acb4d6f79e8e0ec626487e27a28a39cc09f Binary files /dev/null and b/lib_v5/vr_network/__pycache__/layers_new.cpython-310.pyc differ diff --git a/lib_v5/vr_network/__pycache__/model_param_init.cpython-310.pyc b/lib_v5/vr_network/__pycache__/model_param_init.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7acf7e1914083742fe0081771ea8473640dcb95 Binary files /dev/null and b/lib_v5/vr_network/__pycache__/model_param_init.cpython-310.pyc differ diff --git a/lib_v5/vr_network/__pycache__/nets.cpython-310.pyc b/lib_v5/vr_network/__pycache__/nets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6563d4b76e9d0318a9a0716f143715f8118c9e6f Binary files /dev/null and b/lib_v5/vr_network/__pycache__/nets.cpython-310.pyc differ diff --git a/lib_v5/vr_network/__pycache__/nets_new.cpython-310.pyc b/lib_v5/vr_network/__pycache__/nets_new.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f9be4df38803e4dac1cb7c7ed8a1af3c6f6eac02 Binary files /dev/null and b/lib_v5/vr_network/__pycache__/nets_new.cpython-310.pyc differ diff --git a/lib_v5/vr_network/layers.py b/lib_v5/vr_network/layers.py new file mode 100644 index 0000000000000000000000000000000000000000..0120a34721a382cff4dac64f3d344d0ab886c8f9 --- /dev/null +++ b/lib_v5/vr_network/layers.py @@ -0,0 +1,143 @@ +import torch +from torch import nn +import torch.nn.functional as F + +from lib_v5 import spec_utils + +class Conv2DBNActiv(nn.Module): + + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(Conv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, nout, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + bias=False), + nn.BatchNorm2d(nout), + activ() + ) + + def __call__(self, x): + return self.conv(x) + +class SeperableConv2DBNActiv(nn.Module): + + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(SeperableConv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, nin, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + groups=nin, + bias=False), + nn.Conv2d( + nin, nout, + kernel_size=1, + bias=False), + nn.BatchNorm2d(nout), + activ() + ) + + def __call__(self, x): + return self.conv(x) + + +class Encoder(nn.Module): + + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): + super(Encoder, self).__init__() + self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) + + def __call__(self, x): + skip = self.conv1(x) + h = self.conv2(skip) + + return h, skip + + +class Decoder(nn.Module): + + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): + super(Decoder, self).__init__() + self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + self.dropout = nn.Dropout2d(0.1) if dropout else None + + def __call__(self, x, skip=None): + x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True) + if skip is not None: + skip = spec_utils.crop_center(skip, x) + x = torch.cat([x, skip], dim=1) + h = self.conv(x) + + if self.dropout is not None: + h = self.dropout(h) + + return h + + +class ASPPModule(nn.Module): + + def __init__(self, nn_architecture, nin, nout, dilations=(4, 8, 16), activ=nn.ReLU): + super(ASPPModule, self).__init__() + self.conv1 = nn.Sequential( + nn.AdaptiveAvgPool2d((1, None)), + Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) + ) + + self.nn_architecture = nn_architecture + self.six_layer = [129605] + self.seven_layer = [537238, 537227, 33966] + + extra_conv = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) + + self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) + self.conv3 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[0], dilations[0], activ=activ) + self.conv4 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[1], dilations[1], activ=activ) + self.conv5 = SeperableConv2DBNActiv( + nin, nin, 3, 1, dilations[2], dilations[2], activ=activ) + + if self.nn_architecture in self.six_layer: + self.conv6 = extra_conv + nin_x = 6 + elif self.nn_architecture in self.seven_layer: + self.conv6 = extra_conv + self.conv7 = extra_conv + nin_x = 7 + else: + nin_x = 5 + + self.bottleneck = nn.Sequential( + Conv2DBNActiv(nin * nin_x, nout, 1, 1, 0, activ=activ), + nn.Dropout2d(0.1) + ) + + def forward(self, x): + _, _, h, w = x.size() + feat1 = F.interpolate(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True) + feat2 = self.conv2(x) + feat3 = self.conv3(x) + feat4 = self.conv4(x) + feat5 = self.conv5(x) + + if self.nn_architecture in self.six_layer: + feat6 = self.conv6(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6), dim=1) + elif self.nn_architecture in self.seven_layer: + feat6 = self.conv6(x) + feat7 = self.conv7(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) + else: + out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) + + bottle = self.bottleneck(out) + return bottle diff --git a/lib_v5/vr_network/layers_new.py b/lib_v5/vr_network/layers_new.py new file mode 100644 index 0000000000000000000000000000000000000000..33181dd8cdd244f91a684698e075da2ab3ef668e --- /dev/null +++ b/lib_v5/vr_network/layers_new.py @@ -0,0 +1,126 @@ +import torch +from torch import nn +import torch.nn.functional as F + +from lib_v5 import spec_utils + +class Conv2DBNActiv(nn.Module): + + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): + super(Conv2DBNActiv, self).__init__() + self.conv = nn.Sequential( + nn.Conv2d( + nin, nout, + kernel_size=ksize, + stride=stride, + padding=pad, + dilation=dilation, + bias=False), + nn.BatchNorm2d(nout), + activ() + ) + + def __call__(self, x): + return self.conv(x) + +class Encoder(nn.Module): + + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): + super(Encoder, self).__init__() + self.conv1 = Conv2DBNActiv(nin, nout, ksize, stride, pad, activ=activ) + self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) + + def __call__(self, x): + h = self.conv1(x) + h = self.conv2(h) + + return h + + +class Decoder(nn.Module): + + def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False): + super(Decoder, self).__init__() + self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) + # self.conv2 = Conv2DBNActiv(nout, nout, ksize, 1, pad, activ=activ) + self.dropout = nn.Dropout2d(0.1) if dropout else None + + def __call__(self, x, skip=None): + x = F.interpolate(x, scale_factor=2, mode='bilinear', align_corners=True) + + if skip is not None: + skip = spec_utils.crop_center(skip, x) + x = torch.cat([x, skip], dim=1) + + h = self.conv1(x) + # h = self.conv2(h) + + if self.dropout is not None: + h = self.dropout(h) + + return h + + +class ASPPModule(nn.Module): + + def __init__(self, nin, nout, dilations=(4, 8, 12), activ=nn.ReLU, dropout=False): + super(ASPPModule, self).__init__() + self.conv1 = nn.Sequential( + nn.AdaptiveAvgPool2d((1, None)), + Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ) + ) + self.conv2 = Conv2DBNActiv(nin, nout, 1, 1, 0, activ=activ) + self.conv3 = Conv2DBNActiv( + nin, nout, 3, 1, dilations[0], dilations[0], activ=activ + ) + self.conv4 = Conv2DBNActiv( + nin, nout, 3, 1, dilations[1], dilations[1], activ=activ + ) + self.conv5 = Conv2DBNActiv( + nin, nout, 3, 1, dilations[2], dilations[2], activ=activ + ) + self.bottleneck = Conv2DBNActiv(nout * 5, nout, 1, 1, 0, activ=activ) + self.dropout = nn.Dropout2d(0.1) if dropout else None + + def forward(self, x): + _, _, h, w = x.size() + feat1 = F.interpolate(self.conv1(x), size=(h, w), mode='bilinear', align_corners=True) + feat2 = self.conv2(x) + feat3 = self.conv3(x) + feat4 = self.conv4(x) + feat5 = self.conv5(x) + out = torch.cat((feat1, feat2, feat3, feat4, feat5), dim=1) + out = self.bottleneck(out) + + if self.dropout is not None: + out = self.dropout(out) + + return out + + +class LSTMModule(nn.Module): + + def __init__(self, nin_conv, nin_lstm, nout_lstm): + super(LSTMModule, self).__init__() + self.conv = Conv2DBNActiv(nin_conv, 1, 1, 1, 0) + self.lstm = nn.LSTM( + input_size=nin_lstm, + hidden_size=nout_lstm // 2, + bidirectional=True + ) + self.dense = nn.Sequential( + nn.Linear(nout_lstm, nin_lstm), + nn.BatchNorm1d(nin_lstm), + nn.ReLU() + ) + + def forward(self, x): + N, _, nbins, nframes = x.size() + h = self.conv(x)[:, 0] # N, nbins, nframes + h = h.permute(2, 0, 1) # nframes, N, nbins + h, _ = self.lstm(h) + h = self.dense(h.reshape(-1, h.size()[-1])) # nframes * N, nbins + h = h.reshape(nframes, N, 1, nbins) + h = h.permute(1, 2, 3, 0) + + return h diff --git a/lib_v5/vr_network/model_param_init.py b/lib_v5/vr_network/model_param_init.py new file mode 100644 index 0000000000000000000000000000000000000000..a5622075e1a6c5f452278299bead1a8e7c8dfa08 --- /dev/null +++ b/lib_v5/vr_network/model_param_init.py @@ -0,0 +1,32 @@ +import json + +default_param = {} +default_param['bins'] = -1 +default_param['unstable_bins'] = -1 # training only +default_param['stable_bins'] = -1 # training only +default_param['sr'] = 44100 +default_param['pre_filter_start'] = -1 +default_param['pre_filter_stop'] = -1 +default_param['band'] = {} + +N_BINS = 'n_bins' + +def int_keys(d): + r = {} + for k, v in d: + if k.isdigit(): + k = int(k) + r[k] = v + return r + +class ModelParameters(object): + def __init__(self, config_path=''): + with open(config_path, 'r') as f: + self.param = json.loads(f.read(), object_pairs_hook=int_keys) + + for k in ['mid_side', 'mid_side_b', 'mid_side_b2', 'stereo_w', 'stereo_n', 'reverse']: + if not k in self.param: + self.param[k] = False + + if N_BINS in self.param: + self.param['bins'] = self.param[N_BINS] \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/1band_sr16000_hl512.json b/lib_v5/vr_network/modelparams/1band_sr16000_hl512.json new file mode 100644 index 0000000000000000000000000000000000000000..72cb4499867ad2827185e85687f06fb73d33eced --- /dev/null +++ b/lib_v5/vr_network/modelparams/1band_sr16000_hl512.json @@ -0,0 +1,19 @@ +{ + "bins": 1024, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 16000, + "hl": 512, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 1024, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 16000, + "pre_filter_start": 1023, + "pre_filter_stop": 1024 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/1band_sr32000_hl512.json b/lib_v5/vr_network/modelparams/1band_sr32000_hl512.json new file mode 100644 index 0000000000000000000000000000000000000000..3c00ecf0a105e55a6a86a3c32db301a2635b5b41 --- /dev/null +++ b/lib_v5/vr_network/modelparams/1band_sr32000_hl512.json @@ -0,0 +1,19 @@ +{ + "bins": 1024, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 32000, + "hl": 512, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 1024, + "hpf_start": -1, + "res_type": "kaiser_fast" + } + }, + "sr": 32000, + "pre_filter_start": 1000, + "pre_filter_stop": 1021 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/1band_sr33075_hl384.json b/lib_v5/vr_network/modelparams/1band_sr33075_hl384.json new file mode 100644 index 0000000000000000000000000000000000000000..55666ac9a8d0547751fb4b4d3bffb1ee2c956913 --- /dev/null +++ b/lib_v5/vr_network/modelparams/1band_sr33075_hl384.json @@ -0,0 +1,19 @@ +{ + "bins": 1024, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 33075, + "hl": 384, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 1024, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 33075, + "pre_filter_start": 1000, + "pre_filter_stop": 1021 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/1band_sr44100_hl1024.json b/lib_v5/vr_network/modelparams/1band_sr44100_hl1024.json new file mode 100644 index 0000000000000000000000000000000000000000..665abe20eb3cc39fe0f8493dad8f25f6ef634a14 --- /dev/null +++ b/lib_v5/vr_network/modelparams/1band_sr44100_hl1024.json @@ -0,0 +1,19 @@ +{ + "bins": 1024, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 44100, + "hl": 1024, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 1024, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 44100, + "pre_filter_start": 1023, + "pre_filter_stop": 1024 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/1band_sr44100_hl256.json b/lib_v5/vr_network/modelparams/1band_sr44100_hl256.json new file mode 100644 index 0000000000000000000000000000000000000000..0e8b16f89b0231d06eabe8d2f7c2670c7caa2272 --- /dev/null +++ b/lib_v5/vr_network/modelparams/1band_sr44100_hl256.json @@ -0,0 +1,19 @@ +{ + "bins": 256, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 44100, + "hl": 256, + "n_fft": 512, + "crop_start": 0, + "crop_stop": 256, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 44100, + "pre_filter_start": 256, + "pre_filter_stop": 256 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/1band_sr44100_hl512.json b/lib_v5/vr_network/modelparams/1band_sr44100_hl512.json new file mode 100644 index 0000000000000000000000000000000000000000..3b38fcaf60ba204e03a47f5bd3f5bcfe75e1983a --- /dev/null +++ b/lib_v5/vr_network/modelparams/1band_sr44100_hl512.json @@ -0,0 +1,19 @@ +{ + "bins": 1024, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 44100, + "hl": 512, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 1024, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 44100, + "pre_filter_start": 1023, + "pre_filter_stop": 1024 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/1band_sr44100_hl512_cut.json b/lib_v5/vr_network/modelparams/1band_sr44100_hl512_cut.json new file mode 100644 index 0000000000000000000000000000000000000000..630df3524e340f43a1ddb7b33ff02cc91fc1cb47 --- /dev/null +++ b/lib_v5/vr_network/modelparams/1band_sr44100_hl512_cut.json @@ -0,0 +1,19 @@ +{ + "bins": 1024, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 44100, + "hl": 512, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 700, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 44100, + "pre_filter_start": 1023, + "pre_filter_stop": 700 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/1band_sr44100_hl512_nf1024.json b/lib_v5/vr_network/modelparams/1band_sr44100_hl512_nf1024.json new file mode 100644 index 0000000000000000000000000000000000000000..120ef1a24013795c077814bd98ce18169acafac0 --- /dev/null +++ b/lib_v5/vr_network/modelparams/1band_sr44100_hl512_nf1024.json @@ -0,0 +1,19 @@ +{ + "bins": 512, + "unstable_bins": 0, + "reduction_bins": 0, + "band": { + "1": { + "sr": 44100, + "hl": 512, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 512, + "hpf_start": -1, + "res_type": "sinc_best" + } + }, + "sr": 44100, + "pre_filter_start": 511, + "pre_filter_stop": 512 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/2band_32000.json b/lib_v5/vr_network/modelparams/2band_32000.json new file mode 100644 index 0000000000000000000000000000000000000000..ab9cf1150a818eb6252105408311be0a40d423b3 --- /dev/null +++ b/lib_v5/vr_network/modelparams/2band_32000.json @@ -0,0 +1,30 @@ +{ + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 705, + "band": { + "1": { + "sr": 6000, + "hl": 66, + "n_fft": 512, + "crop_start": 0, + "crop_stop": 240, + "lpf_start": 60, + "lpf_stop": 118, + "res_type": "sinc_fastest" + }, + "2": { + "sr": 32000, + "hl": 352, + "n_fft": 1024, + "crop_start": 22, + "crop_stop": 505, + "hpf_start": 44, + "hpf_stop": 23, + "res_type": "sinc_medium" + } + }, + "sr": 32000, + "pre_filter_start": 710, + "pre_filter_stop": 731 +} diff --git a/lib_v5/vr_network/modelparams/2band_44100_lofi.json b/lib_v5/vr_network/modelparams/2band_44100_lofi.json new file mode 100644 index 0000000000000000000000000000000000000000..7faa216d7b49aeece24123dbdd868847a1dbc03c --- /dev/null +++ b/lib_v5/vr_network/modelparams/2band_44100_lofi.json @@ -0,0 +1,30 @@ +{ + "bins": 512, + "unstable_bins": 7, + "reduction_bins": 510, + "band": { + "1": { + "sr": 11025, + "hl": 160, + "n_fft": 768, + "crop_start": 0, + "crop_stop": 192, + "lpf_start": 41, + "lpf_stop": 139, + "res_type": "sinc_fastest" + }, + "2": { + "sr": 44100, + "hl": 640, + "n_fft": 1024, + "crop_start": 10, + "crop_stop": 320, + "hpf_start": 47, + "hpf_stop": 15, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 510, + "pre_filter_stop": 512 +} diff --git a/lib_v5/vr_network/modelparams/2band_48000.json b/lib_v5/vr_network/modelparams/2band_48000.json new file mode 100644 index 0000000000000000000000000000000000000000..be075f52e4a8ddba952cb2fc608b29e089e7f9f9 --- /dev/null +++ b/lib_v5/vr_network/modelparams/2band_48000.json @@ -0,0 +1,30 @@ +{ + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 705, + "band": { + "1": { + "sr": 6000, + "hl": 66, + "n_fft": 512, + "crop_start": 0, + "crop_stop": 240, + "lpf_start": 60, + "lpf_stop": 240, + "res_type": "sinc_fastest" + }, + "2": { + "sr": 48000, + "hl": 528, + "n_fft": 1536, + "crop_start": 22, + "crop_stop": 505, + "hpf_start": 82, + "hpf_stop": 22, + "res_type": "sinc_medium" + } + }, + "sr": 48000, + "pre_filter_start": 710, + "pre_filter_stop": 731 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/3band_44100.json b/lib_v5/vr_network/modelparams/3band_44100.json new file mode 100644 index 0000000000000000000000000000000000000000..d99e23986cf7e68be023e3cf382b5d131409095d --- /dev/null +++ b/lib_v5/vr_network/modelparams/3band_44100.json @@ -0,0 +1,42 @@ +{ + "bins": 768, + "unstable_bins": 5, + "reduction_bins": 733, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 768, + "crop_start": 0, + "crop_stop": 278, + "lpf_start": 28, + "lpf_stop": 140, + "res_type": "polyphase" + }, + "2": { + "sr": 22050, + "hl": 256, + "n_fft": 768, + "crop_start": 14, + "crop_stop": 322, + "hpf_start": 70, + "hpf_stop": 14, + "lpf_start": 283, + "lpf_stop": 314, + "res_type": "polyphase" + }, + "3": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 131, + "crop_stop": 313, + "hpf_start": 154, + "hpf_stop": 141, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 757, + "pre_filter_stop": 768 +} diff --git a/lib_v5/vr_network/modelparams/3band_44100_mid.json b/lib_v5/vr_network/modelparams/3band_44100_mid.json new file mode 100644 index 0000000000000000000000000000000000000000..fc2c487dd52d91beb32d69bc36ad8e3b6124978b --- /dev/null +++ b/lib_v5/vr_network/modelparams/3band_44100_mid.json @@ -0,0 +1,43 @@ +{ + "mid_side": true, + "bins": 768, + "unstable_bins": 5, + "reduction_bins": 733, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 768, + "crop_start": 0, + "crop_stop": 278, + "lpf_start": 28, + "lpf_stop": 140, + "res_type": "polyphase" + }, + "2": { + "sr": 22050, + "hl": 256, + "n_fft": 768, + "crop_start": 14, + "crop_stop": 322, + "hpf_start": 70, + "hpf_stop": 14, + "lpf_start": 283, + "lpf_stop": 314, + "res_type": "polyphase" + }, + "3": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 131, + "crop_stop": 313, + "hpf_start": 154, + "hpf_stop": 141, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 757, + "pre_filter_stop": 768 +} diff --git a/lib_v5/vr_network/modelparams/3band_44100_msb2.json b/lib_v5/vr_network/modelparams/3band_44100_msb2.json new file mode 100644 index 0000000000000000000000000000000000000000..33b0877c2e964657af2c648b71cbb84ff6b1e581 --- /dev/null +++ b/lib_v5/vr_network/modelparams/3band_44100_msb2.json @@ -0,0 +1,43 @@ +{ + "mid_side_b2": true, + "bins": 640, + "unstable_bins": 7, + "reduction_bins": 565, + "band": { + "1": { + "sr": 11025, + "hl": 108, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 187, + "lpf_start": 92, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "2": { + "sr": 22050, + "hl": 216, + "n_fft": 768, + "crop_start": 0, + "crop_stop": 212, + "hpf_start": 68, + "hpf_stop": 34, + "lpf_start": 174, + "lpf_stop": 209, + "res_type": "polyphase" + }, + "3": { + "sr": 44100, + "hl": 432, + "n_fft": 640, + "crop_start": 66, + "crop_stop": 307, + "hpf_start": 86, + "hpf_stop": 72, + "res_type": "kaiser_fast" + } + }, + "sr": 44100, + "pre_filter_start": 639, + "pre_filter_stop": 640 +} diff --git a/lib_v5/vr_network/modelparams/4band_44100.json b/lib_v5/vr_network/modelparams/4band_44100.json new file mode 100644 index 0000000000000000000000000000000000000000..4ae850a08f6fe11e7a5a0267f3be35f993cc4eb6 --- /dev/null +++ b/lib_v5/vr_network/modelparams/4band_44100.json @@ -0,0 +1,54 @@ +{ + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 668, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 186, + "lpf_start": 37, + "lpf_stop": 73, + "res_type": "polyphase" + }, + "2": { + "sr": 11025, + "hl": 128, + "n_fft": 512, + "crop_start": 4, + "crop_stop": 185, + "hpf_start": 36, + "hpf_stop": 18, + "lpf_start": 93, + "lpf_stop": 185, + "res_type": "polyphase" + }, + "3": { + "sr": 22050, + "hl": 256, + "n_fft": 512, + "crop_start": 46, + "crop_stop": 186, + "hpf_start": 93, + "hpf_stop": 46, + "lpf_start": 164, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 121, + "crop_stop": 382, + "hpf_start": 138, + "hpf_stop": 123, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 740, + "pre_filter_stop": 768 +} diff --git a/lib_v5/vr_network/modelparams/4band_44100_mid.json b/lib_v5/vr_network/modelparams/4band_44100_mid.json new file mode 100644 index 0000000000000000000000000000000000000000..6346701543891938e69fc35754b58b8da9b561d6 --- /dev/null +++ b/lib_v5/vr_network/modelparams/4band_44100_mid.json @@ -0,0 +1,55 @@ +{ + "bins": 768, + "unstable_bins": 7, + "mid_side": true, + "reduction_bins": 668, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 186, + "lpf_start": 37, + "lpf_stop": 73, + "res_type": "polyphase" + }, + "2": { + "sr": 11025, + "hl": 128, + "n_fft": 512, + "crop_start": 4, + "crop_stop": 185, + "hpf_start": 36, + "hpf_stop": 18, + "lpf_start": 93, + "lpf_stop": 185, + "res_type": "polyphase" + }, + "3": { + "sr": 22050, + "hl": 256, + "n_fft": 512, + "crop_start": 46, + "crop_stop": 186, + "hpf_start": 93, + "hpf_stop": 46, + "lpf_start": 164, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 121, + "crop_stop": 382, + "hpf_start": 138, + "hpf_stop": 123, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 740, + "pre_filter_stop": 768 +} diff --git a/lib_v5/vr_network/modelparams/4band_44100_msb.json b/lib_v5/vr_network/modelparams/4band_44100_msb.json new file mode 100644 index 0000000000000000000000000000000000000000..0bf477114c585236da7c48ffd81960919da38b81 --- /dev/null +++ b/lib_v5/vr_network/modelparams/4band_44100_msb.json @@ -0,0 +1,55 @@ +{ + "mid_side_b": true, + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 668, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 186, + "lpf_start": 37, + "lpf_stop": 73, + "res_type": "polyphase" + }, + "2": { + "sr": 11025, + "hl": 128, + "n_fft": 512, + "crop_start": 4, + "crop_stop": 185, + "hpf_start": 36, + "hpf_stop": 18, + "lpf_start": 93, + "lpf_stop": 185, + "res_type": "polyphase" + }, + "3": { + "sr": 22050, + "hl": 256, + "n_fft": 512, + "crop_start": 46, + "crop_stop": 186, + "hpf_start": 93, + "hpf_stop": 46, + "lpf_start": 164, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 121, + "crop_stop": 382, + "hpf_start": 138, + "hpf_stop": 123, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 740, + "pre_filter_stop": 768 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/4band_44100_msb2.json b/lib_v5/vr_network/modelparams/4band_44100_msb2.json new file mode 100644 index 0000000000000000000000000000000000000000..0bf477114c585236da7c48ffd81960919da38b81 --- /dev/null +++ b/lib_v5/vr_network/modelparams/4band_44100_msb2.json @@ -0,0 +1,55 @@ +{ + "mid_side_b": true, + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 668, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 186, + "lpf_start": 37, + "lpf_stop": 73, + "res_type": "polyphase" + }, + "2": { + "sr": 11025, + "hl": 128, + "n_fft": 512, + "crop_start": 4, + "crop_stop": 185, + "hpf_start": 36, + "hpf_stop": 18, + "lpf_start": 93, + "lpf_stop": 185, + "res_type": "polyphase" + }, + "3": { + "sr": 22050, + "hl": 256, + "n_fft": 512, + "crop_start": 46, + "crop_stop": 186, + "hpf_start": 93, + "hpf_stop": 46, + "lpf_start": 164, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 121, + "crop_stop": 382, + "hpf_start": 138, + "hpf_stop": 123, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 740, + "pre_filter_stop": 768 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/4band_44100_reverse.json b/lib_v5/vr_network/modelparams/4band_44100_reverse.json new file mode 100644 index 0000000000000000000000000000000000000000..779a1c908357cccedcd22b695ca68df13c1967bd --- /dev/null +++ b/lib_v5/vr_network/modelparams/4band_44100_reverse.json @@ -0,0 +1,55 @@ +{ + "reverse": true, + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 668, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 186, + "lpf_start": 37, + "lpf_stop": 73, + "res_type": "polyphase" + }, + "2": { + "sr": 11025, + "hl": 128, + "n_fft": 512, + "crop_start": 4, + "crop_stop": 185, + "hpf_start": 36, + "hpf_stop": 18, + "lpf_start": 93, + "lpf_stop": 185, + "res_type": "polyphase" + }, + "3": { + "sr": 22050, + "hl": 256, + "n_fft": 512, + "crop_start": 46, + "crop_stop": 186, + "hpf_start": 93, + "hpf_stop": 46, + "lpf_start": 164, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 121, + "crop_stop": 382, + "hpf_start": 138, + "hpf_stop": 123, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 740, + "pre_filter_stop": 768 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/4band_44100_sw.json b/lib_v5/vr_network/modelparams/4band_44100_sw.json new file mode 100644 index 0000000000000000000000000000000000000000..1fefd4aa50bf6c744294fbb305888742c96e4c4c --- /dev/null +++ b/lib_v5/vr_network/modelparams/4band_44100_sw.json @@ -0,0 +1,55 @@ +{ + "stereo_w": true, + "bins": 768, + "unstable_bins": 7, + "reduction_bins": 668, + "band": { + "1": { + "sr": 11025, + "hl": 128, + "n_fft": 1024, + "crop_start": 0, + "crop_stop": 186, + "lpf_start": 37, + "lpf_stop": 73, + "res_type": "polyphase" + }, + "2": { + "sr": 11025, + "hl": 128, + "n_fft": 512, + "crop_start": 4, + "crop_stop": 185, + "hpf_start": 36, + "hpf_stop": 18, + "lpf_start": 93, + "lpf_stop": 185, + "res_type": "polyphase" + }, + "3": { + "sr": 22050, + "hl": 256, + "n_fft": 512, + "crop_start": 46, + "crop_stop": 186, + "hpf_start": 93, + "hpf_stop": 46, + "lpf_start": 164, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 512, + "n_fft": 768, + "crop_start": 121, + "crop_stop": 382, + "hpf_start": 138, + "hpf_stop": 123, + "res_type": "sinc_medium" + } + }, + "sr": 44100, + "pre_filter_start": 740, + "pre_filter_stop": 768 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/4band_v2.json b/lib_v5/vr_network/modelparams/4band_v2.json new file mode 100644 index 0000000000000000000000000000000000000000..af798108de02a7243335e71be5c57e4094a5d7b1 --- /dev/null +++ b/lib_v5/vr_network/modelparams/4band_v2.json @@ -0,0 +1,54 @@ +{ + "bins": 672, + "unstable_bins": 8, + "reduction_bins": 637, + "band": { + "1": { + "sr": 7350, + "hl": 80, + "n_fft": 640, + "crop_start": 0, + "crop_stop": 85, + "lpf_start": 25, + "lpf_stop": 53, + "res_type": "polyphase" + }, + "2": { + "sr": 7350, + "hl": 80, + "n_fft": 320, + "crop_start": 4, + "crop_stop": 87, + "hpf_start": 25, + "hpf_stop": 12, + "lpf_start": 31, + "lpf_stop": 62, + "res_type": "polyphase" + }, + "3": { + "sr": 14700, + "hl": 160, + "n_fft": 512, + "crop_start": 17, + "crop_stop": 216, + "hpf_start": 48, + "hpf_stop": 24, + "lpf_start": 139, + "lpf_stop": 210, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 480, + "n_fft": 960, + "crop_start": 78, + "crop_stop": 383, + "hpf_start": 130, + "hpf_stop": 86, + "res_type": "kaiser_fast" + } + }, + "sr": 44100, + "pre_filter_start": 668, + "pre_filter_stop": 672 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/4band_v2_sn.json b/lib_v5/vr_network/modelparams/4band_v2_sn.json new file mode 100644 index 0000000000000000000000000000000000000000..319b99810f364946da7a30b15b916a5309981608 --- /dev/null +++ b/lib_v5/vr_network/modelparams/4band_v2_sn.json @@ -0,0 +1,55 @@ +{ + "bins": 672, + "unstable_bins": 8, + "reduction_bins": 637, + "band": { + "1": { + "sr": 7350, + "hl": 80, + "n_fft": 640, + "crop_start": 0, + "crop_stop": 85, + "lpf_start": 25, + "lpf_stop": 53, + "res_type": "polyphase" + }, + "2": { + "sr": 7350, + "hl": 80, + "n_fft": 320, + "crop_start": 4, + "crop_stop": 87, + "hpf_start": 25, + "hpf_stop": 12, + "lpf_start": 31, + "lpf_stop": 62, + "res_type": "polyphase" + }, + "3": { + "sr": 14700, + "hl": 160, + "n_fft": 512, + "crop_start": 17, + "crop_stop": 216, + "hpf_start": 48, + "hpf_stop": 24, + "lpf_start": 139, + "lpf_stop": 210, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 480, + "n_fft": 960, + "crop_start": 78, + "crop_stop": 383, + "hpf_start": 130, + "hpf_stop": 86, + "convert_channels": "stereo_n", + "res_type": "kaiser_fast" + } + }, + "sr": 44100, + "pre_filter_start": 668, + "pre_filter_stop": 672 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/4band_v3.json b/lib_v5/vr_network/modelparams/4band_v3.json new file mode 100644 index 0000000000000000000000000000000000000000..2a73bc97ac545145a75bdca7addc5d59f5b8574b --- /dev/null +++ b/lib_v5/vr_network/modelparams/4band_v3.json @@ -0,0 +1,54 @@ +{ + "bins": 672, + "unstable_bins": 8, + "reduction_bins": 530, + "band": { + "1": { + "sr": 7350, + "hl": 80, + "n_fft": 640, + "crop_start": 0, + "crop_stop": 85, + "lpf_start": 25, + "lpf_stop": 53, + "res_type": "polyphase" + }, + "2": { + "sr": 7350, + "hl": 80, + "n_fft": 320, + "crop_start": 4, + "crop_stop": 87, + "hpf_start": 25, + "hpf_stop": 12, + "lpf_start": 31, + "lpf_stop": 62, + "res_type": "polyphase" + }, + "3": { + "sr": 14700, + "hl": 160, + "n_fft": 512, + "crop_start": 17, + "crop_stop": 216, + "hpf_start": 48, + "hpf_stop": 24, + "lpf_start": 139, + "lpf_stop": 210, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 480, + "n_fft": 960, + "crop_start": 78, + "crop_stop": 383, + "hpf_start": 130, + "hpf_stop": 86, + "res_type": "kaiser_fast" + } + }, + "sr": 44100, + "pre_filter_start": 668, + "pre_filter_stop": 672 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/4band_v3_sn.json b/lib_v5/vr_network/modelparams/4band_v3_sn.json new file mode 100644 index 0000000000000000000000000000000000000000..6680a06146213f4c1cc3bde7668cec496e1d0389 --- /dev/null +++ b/lib_v5/vr_network/modelparams/4band_v3_sn.json @@ -0,0 +1,55 @@ +{ + "n_bins": 672, + "unstable_bins": 8, + "stable_bins": 530, + "band": { + "1": { + "sr": 7350, + "hl": 80, + "n_fft": 640, + "crop_start": 0, + "crop_stop": 85, + "lpf_start": 25, + "lpf_stop": 53, + "res_type": "polyphase" + }, + "2": { + "sr": 7350, + "hl": 80, + "n_fft": 320, + "crop_start": 4, + "crop_stop": 87, + "hpf_start": 25, + "hpf_stop": 12, + "lpf_start": 31, + "lpf_stop": 62, + "res_type": "polyphase" + }, + "3": { + "sr": 14700, + "hl": 160, + "n_fft": 512, + "crop_start": 17, + "crop_stop": 216, + "hpf_start": 48, + "hpf_stop": 24, + "lpf_start": 139, + "lpf_stop": 210, + "res_type": "polyphase" + }, + "4": { + "sr": 44100, + "hl": 480, + "n_fft": 960, + "crop_start": 78, + "crop_stop": 383, + "hpf_start": 130, + "hpf_stop": 86, + "convert_channels": "stereo_n", + "res_type": "kaiser_fast" + } + }, + "sr": 44100, + "pre_filter_start": 668, + "pre_filter_stop": 672 +} \ No newline at end of file diff --git a/lib_v5/vr_network/modelparams/ensemble.json b/lib_v5/vr_network/modelparams/ensemble.json new file mode 100644 index 0000000000000000000000000000000000000000..ca96bf19c593dbe127e1a013ae456ac093602e28 --- /dev/null +++ b/lib_v5/vr_network/modelparams/ensemble.json @@ -0,0 +1,43 @@ +{ + "mid_side_b2": true, + "bins": 1280, + "unstable_bins": 7, + "reduction_bins": 565, + "band": { + "1": { + "sr": 11025, + "hl": 108, + "n_fft": 2048, + "crop_start": 0, + "crop_stop": 374, + "lpf_start": 92, + "lpf_stop": 186, + "res_type": "polyphase" + }, + "2": { + "sr": 22050, + "hl": 216, + "n_fft": 1536, + "crop_start": 0, + "crop_stop": 424, + "hpf_start": 68, + "hpf_stop": 34, + "lpf_start": 348, + "lpf_stop": 418, + "res_type": "polyphase" + }, + "3": { + "sr": 44100, + "hl": 432, + "n_fft": 1280, + "crop_start": 132, + "crop_stop": 614, + "hpf_start": 172, + "hpf_stop": 144, + "res_type": "polyphase" + } + }, + "sr": 44100, + "pre_filter_start": 1280, + "pre_filter_stop": 1280 +} \ No newline at end of file diff --git a/lib_v5/vr_network/nets.py b/lib_v5/vr_network/nets.py new file mode 100644 index 0000000000000000000000000000000000000000..3896fce4ecc73bdee5d538df322784cafe24a91f --- /dev/null +++ b/lib_v5/vr_network/nets.py @@ -0,0 +1,166 @@ +import torch +from torch import nn +import torch.nn.functional as F + +from . import layers + +class BaseASPPNet(nn.Module): + + def __init__(self, nn_architecture, nin, ch, dilations=(4, 8, 16)): + super(BaseASPPNet, self).__init__() + self.nn_architecture = nn_architecture + self.enc1 = layers.Encoder(nin, ch, 3, 2, 1) + self.enc2 = layers.Encoder(ch, ch * 2, 3, 2, 1) + self.enc3 = layers.Encoder(ch * 2, ch * 4, 3, 2, 1) + self.enc4 = layers.Encoder(ch * 4, ch * 8, 3, 2, 1) + + if self.nn_architecture == 129605: + self.enc5 = layers.Encoder(ch * 8, ch * 16, 3, 2, 1) + self.aspp = layers.ASPPModule(nn_architecture, ch * 16, ch * 32, dilations) + self.dec5 = layers.Decoder(ch * (16 + 32), ch * 16, 3, 1, 1) + else: + self.aspp = layers.ASPPModule(nn_architecture, ch * 8, ch * 16, dilations) + + self.dec4 = layers.Decoder(ch * (8 + 16), ch * 8, 3, 1, 1) + self.dec3 = layers.Decoder(ch * (4 + 8), ch * 4, 3, 1, 1) + self.dec2 = layers.Decoder(ch * (2 + 4), ch * 2, 3, 1, 1) + self.dec1 = layers.Decoder(ch * (1 + 2), ch, 3, 1, 1) + + def __call__(self, x): + h, e1 = self.enc1(x) + h, e2 = self.enc2(h) + h, e3 = self.enc3(h) + h, e4 = self.enc4(h) + + if self.nn_architecture == 129605: + h, e5 = self.enc5(h) + h = self.aspp(h) + h = self.dec5(h, e5) + else: + h = self.aspp(h) + + h = self.dec4(h, e4) + h = self.dec3(h, e3) + h = self.dec2(h, e2) + h = self.dec1(h, e1) + + return h + +def determine_model_capacity(n_fft_bins, nn_architecture): + + sp_model_arch = [31191, 33966, 129605] + hp_model_arch = [123821, 123812] + hp2_model_arch = [537238, 537227] + + if nn_architecture in sp_model_arch: + model_capacity_data = [ + (2, 16), + (2, 16), + (18, 8, 1, 1, 0), + (8, 16), + (34, 16, 1, 1, 0), + (16, 32), + (32, 2, 1), + (16, 2, 1), + (16, 2, 1), + ] + + if nn_architecture in hp_model_arch: + model_capacity_data = [ + (2, 32), + (2, 32), + (34, 16, 1, 1, 0), + (16, 32), + (66, 32, 1, 1, 0), + (32, 64), + (64, 2, 1), + (32, 2, 1), + (32, 2, 1), + ] + + if nn_architecture in hp2_model_arch: + model_capacity_data = [ + (2, 64), + (2, 64), + (66, 32, 1, 1, 0), + (32, 64), + (130, 64, 1, 1, 0), + (64, 128), + (128, 2, 1), + (64, 2, 1), + (64, 2, 1), + ] + + cascaded = CascadedASPPNet + model = cascaded(n_fft_bins, model_capacity_data, nn_architecture) + + return model + +class CascadedASPPNet(nn.Module): + + def __init__(self, n_fft, model_capacity_data, nn_architecture): + super(CascadedASPPNet, self).__init__() + self.stg1_low_band_net = BaseASPPNet(nn_architecture, *model_capacity_data[0]) + self.stg1_high_band_net = BaseASPPNet(nn_architecture, *model_capacity_data[1]) + + self.stg2_bridge = layers.Conv2DBNActiv(*model_capacity_data[2]) + self.stg2_full_band_net = BaseASPPNet(nn_architecture, *model_capacity_data[3]) + + self.stg3_bridge = layers.Conv2DBNActiv(*model_capacity_data[4]) + self.stg3_full_band_net = BaseASPPNet(nn_architecture, *model_capacity_data[5]) + + self.out = nn.Conv2d(*model_capacity_data[6], bias=False) + self.aux1_out = nn.Conv2d(*model_capacity_data[7], bias=False) + self.aux2_out = nn.Conv2d(*model_capacity_data[8], bias=False) + + self.max_bin = n_fft // 2 + self.output_bin = n_fft // 2 + 1 + + self.offset = 128 + + def forward(self, x): + mix = x.detach() + x = x.clone() + + x = x[:, :, :self.max_bin] + + bandw = x.size()[2] // 2 + aux1 = torch.cat([ + self.stg1_low_band_net(x[:, :, :bandw]), + self.stg1_high_band_net(x[:, :, bandw:]) + ], dim=2) + + h = torch.cat([x, aux1], dim=1) + aux2 = self.stg2_full_band_net(self.stg2_bridge(h)) + + h = torch.cat([x, aux1, aux2], dim=1) + h = self.stg3_full_band_net(self.stg3_bridge(h)) + + mask = torch.sigmoid(self.out(h)) + mask = F.pad( + input=mask, + pad=(0, 0, 0, self.output_bin - mask.size()[2]), + mode='replicate') + + if self.training: + aux1 = torch.sigmoid(self.aux1_out(aux1)) + aux1 = F.pad( + input=aux1, + pad=(0, 0, 0, self.output_bin - aux1.size()[2]), + mode='replicate') + aux2 = torch.sigmoid(self.aux2_out(aux2)) + aux2 = F.pad( + input=aux2, + pad=(0, 0, 0, self.output_bin - aux2.size()[2]), + mode='replicate') + return mask * mix, aux1 * mix, aux2 * mix + else: + return mask# * mix + + def predict_mask(self, x): + mask = self.forward(x) + + if self.offset > 0: + mask = mask[:, :, :, self.offset:-self.offset] + + return mask \ No newline at end of file diff --git a/lib_v5/vr_network/nets_new.py b/lib_v5/vr_network/nets_new.py new file mode 100644 index 0000000000000000000000000000000000000000..67772745126d05ade18fb713282469352c5bdd30 --- /dev/null +++ b/lib_v5/vr_network/nets_new.py @@ -0,0 +1,125 @@ +import torch +from torch import nn +import torch.nn.functional as F +from . import layers_new as layers + +class BaseNet(nn.Module): + + def __init__(self, nin, nout, nin_lstm, nout_lstm, dilations=((4, 2), (8, 4), (12, 6))): + super(BaseNet, self).__init__() + self.enc1 = layers.Conv2DBNActiv(nin, nout, 3, 1, 1) + self.enc2 = layers.Encoder(nout, nout * 2, 3, 2, 1) + self.enc3 = layers.Encoder(nout * 2, nout * 4, 3, 2, 1) + self.enc4 = layers.Encoder(nout * 4, nout * 6, 3, 2, 1) + self.enc5 = layers.Encoder(nout * 6, nout * 8, 3, 2, 1) + + self.aspp = layers.ASPPModule(nout * 8, nout * 8, dilations, dropout=True) + + self.dec4 = layers.Decoder(nout * (6 + 8), nout * 6, 3, 1, 1) + self.dec3 = layers.Decoder(nout * (4 + 6), nout * 4, 3, 1, 1) + self.dec2 = layers.Decoder(nout * (2 + 4), nout * 2, 3, 1, 1) + self.lstm_dec2 = layers.LSTMModule(nout * 2, nin_lstm, nout_lstm) + self.dec1 = layers.Decoder(nout * (1 + 2) + 1, nout * 1, 3, 1, 1) + + def __call__(self, x): + e1 = self.enc1(x) + e2 = self.enc2(e1) + e3 = self.enc3(e2) + e4 = self.enc4(e3) + e5 = self.enc5(e4) + + h = self.aspp(e5) + + h = self.dec4(h, e4) + h = self.dec3(h, e3) + h = self.dec2(h, e2) + h = torch.cat([h, self.lstm_dec2(h)], dim=1) + h = self.dec1(h, e1) + + return h + +class CascadedNet(nn.Module): + + def __init__(self, n_fft, nn_arch_size=51000, nout=32, nout_lstm=128): + super(CascadedNet, self).__init__() + self.max_bin = n_fft // 2 + self.output_bin = n_fft // 2 + 1 + self.nin_lstm = self.max_bin // 2 + self.offset = 64 + nout = 64 if nn_arch_size == 218409 else nout + + #print(nout, nout_lstm, n_fft) + + self.stg1_low_band_net = nn.Sequential( + BaseNet(2, nout // 2, self.nin_lstm // 2, nout_lstm), + layers.Conv2DBNActiv(nout // 2, nout // 4, 1, 1, 0) + ) + self.stg1_high_band_net = BaseNet(2, nout // 4, self.nin_lstm // 2, nout_lstm // 2) + + self.stg2_low_band_net = nn.Sequential( + BaseNet(nout // 4 + 2, nout, self.nin_lstm // 2, nout_lstm), + layers.Conv2DBNActiv(nout, nout // 2, 1, 1, 0) + ) + self.stg2_high_band_net = BaseNet(nout // 4 + 2, nout // 2, self.nin_lstm // 2, nout_lstm // 2) + + self.stg3_full_band_net = BaseNet(3 * nout // 4 + 2, nout, self.nin_lstm, nout_lstm) + + self.out = nn.Conv2d(nout, 2, 1, bias=False) + self.aux_out = nn.Conv2d(3 * nout // 4, 2, 1, bias=False) + + def forward(self, x): + x = x[:, :, :self.max_bin] + + bandw = x.size()[2] // 2 + l1_in = x[:, :, :bandw] + h1_in = x[:, :, bandw:] + l1 = self.stg1_low_band_net(l1_in) + h1 = self.stg1_high_band_net(h1_in) + aux1 = torch.cat([l1, h1], dim=2) + + l2_in = torch.cat([l1_in, l1], dim=1) + h2_in = torch.cat([h1_in, h1], dim=1) + l2 = self.stg2_low_band_net(l2_in) + h2 = self.stg2_high_band_net(h2_in) + aux2 = torch.cat([l2, h2], dim=2) + + f3_in = torch.cat([x, aux1, aux2], dim=1) + f3 = self.stg3_full_band_net(f3_in) + + mask = torch.sigmoid(self.out(f3)) + mask = F.pad( + input=mask, + pad=(0, 0, 0, self.output_bin - mask.size()[2]), + mode='replicate' + ) + + if self.training: + aux = torch.cat([aux1, aux2], dim=1) + aux = torch.sigmoid(self.aux_out(aux)) + aux = F.pad( + input=aux, + pad=(0, 0, 0, self.output_bin - aux.size()[2]), + mode='replicate' + ) + return mask, aux + else: + return mask + + def predict_mask(self, x): + mask = self.forward(x) + + if self.offset > 0: + mask = mask[:, :, :, self.offset:-self.offset] + assert mask.size()[3] > 0 + + return mask + + def predict(self, x): + mask = self.forward(x) + pred_mag = x * mask + + if self.offset > 0: + pred_mag = pred_mag[:, :, :, self.offset:-self.offset] + assert pred_mag.size()[3] > 0 + + return pred_mag diff --git a/separate.py b/separate.py new file mode 100644 index 0000000000000000000000000000000000000000..772ecbdcc8c9062d830e971ee691b1ba78a7a8f2 --- /dev/null +++ b/separate.py @@ -0,0 +1,1460 @@ +from __future__ import annotations +from typing import TYPE_CHECKING +from demucs.apply import apply_model, demucs_segments +from demucs.hdemucs import HDemucs +from demucs.model_v2 import auto_load_demucs_model_v2 +from demucs.pretrained import get_model as _gm +from demucs.utils import apply_model_v1 +from demucs.utils import apply_model_v2 +from lib_v5.tfc_tdf_v3 import TFC_TDF_net, STFT +from lib_v5 import spec_utils +from lib_v5.vr_network import nets +from lib_v5.vr_network import nets_new +from lib_v5.vr_network.model_param_init import ModelParameters +from pathlib import Path +from gui_data.constants import * +from gui_data.error_handling import * +from scipy import signal +import audioread +import gzip +import librosa +import math +import numpy as np +import onnxruntime as ort +import os +import torch +import warnings +import pydub +import soundfile as sf +import lib_v5.mdxnet as MdxnetSet +import math +#import random +from onnx import load +from onnx2pytorch import ConvertModel +import gc + +if TYPE_CHECKING: + from UVR import ModelData + +# if not is_macos: +# import torch_directml + +mps_available = torch.backends.mps.is_available() if is_macos else False +cuda_available = torch.cuda.is_available() + +# def get_gpu_info(): +# directml_device, directml_available = DIRECTML_DEVICE, False + +# if not is_macos: +# directml_available = torch_directml.is_available() + +# if directml_available: +# directml_device = str(torch_directml.device()).partition(":")[0] + +# return directml_device, directml_available + +# DIRECTML_DEVICE, directml_available = get_gpu_info() + +def clear_gpu_cache(): + gc.collect() + if is_macos: + from torch import mps + mps.empty_cache() + else: + torch.cuda.empty_cache() + +warnings.filterwarnings("ignore") +cpu = torch.device('cpu') + +class SeperateAttributes: + def __init__(self, model_data: ModelData, + process_data: dict, + main_model_primary_stem_4_stem=None, + main_process_method=None, + is_return_dual=True, + main_model_primary=None, + vocal_stem_path=None, + master_inst_source=None, + master_vocal_source=None): + + self.list_all_models: list + self.process_data = process_data + self.progress_value = 0 + self.set_progress_bar = process_data['set_progress_bar'] + self.write_to_console = process_data['write_to_console'] + if vocal_stem_path: + self.audio_file, self.audio_file_base = vocal_stem_path + self.audio_file_base_voc_split = lambda stem, split:os.path.join(self.export_path, f'{self.audio_file_base.replace("_(Vocals)", "")}_({stem}_{split}).wav') + else: + self.audio_file = process_data['audio_file'] + self.audio_file_base = process_data['audio_file_base'] + self.audio_file_base_voc_split = None + self.export_path = process_data['export_path'] + self.cached_source_callback = process_data['cached_source_callback'] + self.cached_model_source_holder = process_data['cached_model_source_holder'] + self.is_4_stem_ensemble = process_data['is_4_stem_ensemble'] + self.list_all_models = process_data['list_all_models'] + self.process_iteration = process_data['process_iteration'] + self.is_return_dual = is_return_dual + self.is_pitch_change = model_data.is_pitch_change + self.semitone_shift = model_data.semitone_shift + self.is_match_frequency_pitch = model_data.is_match_frequency_pitch + self.overlap = model_data.overlap + self.overlap_mdx = model_data.overlap_mdx + self.overlap_mdx23 = model_data.overlap_mdx23 + self.is_mdx_combine_stems = model_data.is_mdx_combine_stems + self.is_mdx_c = model_data.is_mdx_c + self.mdx_c_configs = model_data.mdx_c_configs + self.mdxnet_stem_select = model_data.mdxnet_stem_select + self.mixer_path = model_data.mixer_path + self.model_samplerate = model_data.model_samplerate + self.model_capacity = model_data.model_capacity + self.is_vr_51_model = model_data.is_vr_51_model + self.is_pre_proc_model = model_data.is_pre_proc_model + self.is_secondary_model_activated = model_data.is_secondary_model_activated if not self.is_pre_proc_model else False + self.is_secondary_model = model_data.is_secondary_model if not self.is_pre_proc_model else True + self.process_method = model_data.process_method + self.model_path = model_data.model_path + self.model_name = model_data.model_name + self.model_basename = model_data.model_basename + self.wav_type_set = model_data.wav_type_set + self.mp3_bit_set = model_data.mp3_bit_set + self.save_format = model_data.save_format + self.is_gpu_conversion = model_data.is_gpu_conversion + self.is_normalization = model_data.is_normalization + self.is_primary_stem_only = model_data.is_primary_stem_only if not self.is_secondary_model else model_data.is_primary_model_primary_stem_only + self.is_secondary_stem_only = model_data.is_secondary_stem_only if not self.is_secondary_model else model_data.is_primary_model_secondary_stem_only + self.is_ensemble_mode = model_data.is_ensemble_mode + self.secondary_model = model_data.secondary_model # + self.primary_model_primary_stem = model_data.primary_model_primary_stem + self.primary_stem_native = model_data.primary_stem_native + self.primary_stem = model_data.primary_stem # + self.secondary_stem = model_data.secondary_stem # + self.is_invert_spec = model_data.is_invert_spec # + self.is_deverb_vocals = model_data.is_deverb_vocals + self.is_mixer_mode = model_data.is_mixer_mode # + self.secondary_model_scale = model_data.secondary_model_scale # + self.is_demucs_pre_proc_model_inst_mix = model_data.is_demucs_pre_proc_model_inst_mix # + self.primary_source_map = {} + self.secondary_source_map = {} + self.primary_source = None + self.secondary_source = None + self.secondary_source_primary = None + self.secondary_source_secondary = None + self.main_model_primary_stem_4_stem = main_model_primary_stem_4_stem + self.main_model_primary = main_model_primary + self.ensemble_primary_stem = model_data.ensemble_primary_stem + self.is_multi_stem_ensemble = model_data.is_multi_stem_ensemble + self.is_other_gpu = False + self.is_deverb = True + self.DENOISER_MODEL = model_data.DENOISER_MODEL + self.DEVERBER_MODEL = model_data.DEVERBER_MODEL + self.is_source_swap = False + self.vocal_split_model = model_data.vocal_split_model + self.is_vocal_split_model = model_data.is_vocal_split_model + self.master_vocal_path = None + self.set_master_inst_source = None + self.master_inst_source = master_inst_source + self.master_vocal_source = master_vocal_source + self.is_save_inst_vocal_splitter = isinstance(master_inst_source, np.ndarray) and model_data.is_save_inst_vocal_splitter + self.is_inst_only_voc_splitter = model_data.is_inst_only_voc_splitter + self.is_karaoke = model_data.is_karaoke + self.is_bv_model = model_data.is_bv_model + self.is_bv_model_rebalenced = model_data.bv_model_rebalance and self.is_vocal_split_model + self.is_sec_bv_rebalance = model_data.is_sec_bv_rebalance + self.stem_path_init = os.path.join(self.export_path, f'{self.audio_file_base}_({self.secondary_stem}).wav') + self.deverb_vocal_opt = model_data.deverb_vocal_opt + self.is_save_vocal_only = model_data.is_save_vocal_only + self.device = cpu + self.run_type = ['CPUExecutionProvider'] + self.is_opencl = False + self.device_set = model_data.device_set + self.is_use_opencl = model_data.is_use_opencl + + if self.is_inst_only_voc_splitter or self.is_sec_bv_rebalance: + self.is_primary_stem_only = False + self.is_secondary_stem_only = False + + if main_model_primary and self.is_multi_stem_ensemble: + self.primary_stem, self.secondary_stem = main_model_primary, secondary_stem(main_model_primary) + + if self.is_gpu_conversion >= 0: + if mps_available: + self.device, self.is_other_gpu = 'mps', True + else: + device_prefix = None + if self.device_set != DEFAULT: + device_prefix = CUDA_DEVICE#DIRECTML_DEVICE if self.is_use_opencl and directml_available else CUDA_DEVICE + + # if directml_available and self.is_use_opencl: + # self.device = torch_directml.device() if not device_prefix else f'{device_prefix}:{self.device_set}' + # self.is_other_gpu = True + if cuda_available:# and not self.is_use_opencl: + self.device = CUDA_DEVICE if not device_prefix else f'{device_prefix}:{self.device_set}' + self.run_type = ['CUDAExecutionProvider'] + + if model_data.process_method == MDX_ARCH_TYPE: + self.is_mdx_ckpt = model_data.is_mdx_ckpt + self.primary_model_name, self.primary_sources = self.cached_source_callback(MDX_ARCH_TYPE, model_name=self.model_basename) + self.is_denoise = model_data.is_denoise# + self.is_denoise_model = model_data.is_denoise_model# + self.is_mdx_c_seg_def = model_data.is_mdx_c_seg_def# + self.mdx_batch_size = model_data.mdx_batch_size + self.compensate = model_data.compensate + self.mdx_segment_size = model_data.mdx_segment_size + + if self.is_mdx_c: + if not self.is_4_stem_ensemble: + self.primary_stem = model_data.ensemble_primary_stem if process_data['is_ensemble_master'] else model_data.primary_stem + self.secondary_stem = model_data.ensemble_secondary_stem if process_data['is_ensemble_master'] else model_data.secondary_stem + else: + self.dim_f, self.dim_t = model_data.mdx_dim_f_set, 2**model_data.mdx_dim_t_set + + self.check_label_secondary_stem_runs() + self.n_fft = model_data.mdx_n_fft_scale_set + self.chunks = model_data.chunks + self.margin = model_data.margin + self.adjust = 1 + self.dim_c = 4 + self.hop = 1024 + + if model_data.process_method == DEMUCS_ARCH_TYPE: + self.demucs_stems = model_data.demucs_stems if not main_process_method in [MDX_ARCH_TYPE, VR_ARCH_TYPE] else None + self.secondary_model_4_stem = model_data.secondary_model_4_stem + self.secondary_model_4_stem_scale = model_data.secondary_model_4_stem_scale + self.is_chunk_demucs = model_data.is_chunk_demucs + self.segment = model_data.segment + self.demucs_version = model_data.demucs_version + self.demucs_source_list = model_data.demucs_source_list + self.demucs_source_map = model_data.demucs_source_map + self.is_demucs_combine_stems = model_data.is_demucs_combine_stems + self.demucs_stem_count = model_data.demucs_stem_count + self.pre_proc_model = model_data.pre_proc_model + self.device = cpu if self.is_other_gpu and not self.demucs_version in [DEMUCS_V3, DEMUCS_V4] else self.device + + self.primary_stem = model_data.ensemble_primary_stem if process_data['is_ensemble_master'] else model_data.primary_stem + self.secondary_stem = model_data.ensemble_secondary_stem if process_data['is_ensemble_master'] else model_data.secondary_stem + + if (self.is_multi_stem_ensemble or self.is_4_stem_ensemble) and not self.is_secondary_model: + self.is_return_dual = False + + if self.is_multi_stem_ensemble and main_model_primary: + self.is_4_stem_ensemble = False + if main_model_primary in self.demucs_source_map.keys(): + self.primary_stem = main_model_primary + self.secondary_stem = secondary_stem(main_model_primary) + elif secondary_stem(main_model_primary) in self.demucs_source_map.keys(): + self.primary_stem = secondary_stem(main_model_primary) + self.secondary_stem = main_model_primary + + if self.is_secondary_model and not process_data['is_ensemble_master']: + if not self.demucs_stem_count == 2 and model_data.primary_model_primary_stem == INST_STEM: + self.primary_stem = VOCAL_STEM + self.secondary_stem = INST_STEM + else: + self.primary_stem = model_data.primary_model_primary_stem + self.secondary_stem = secondary_stem(self.primary_stem) + + self.shifts = model_data.shifts + self.is_split_mode = model_data.is_split_mode if not self.demucs_version == DEMUCS_V4 else True + self.primary_model_name, self.primary_sources = self.cached_source_callback(DEMUCS_ARCH_TYPE, model_name=self.model_basename) + + if model_data.process_method == VR_ARCH_TYPE: + self.check_label_secondary_stem_runs() + self.primary_model_name, self.primary_sources = self.cached_source_callback(VR_ARCH_TYPE, model_name=self.model_basename) + self.mp = model_data.vr_model_param + self.high_end_process = model_data.is_high_end_process + self.is_tta = model_data.is_tta + self.is_post_process = model_data.is_post_process + self.is_gpu_conversion = model_data.is_gpu_conversion + self.batch_size = model_data.batch_size + self.window_size = model_data.window_size + self.input_high_end_h = None + self.input_high_end = None + self.post_process_threshold = model_data.post_process_threshold + self.aggressiveness = {'value': model_data.aggression_setting, + 'split_bin': self.mp.param['band'][1]['crop_stop'], + 'aggr_correction': self.mp.param.get('aggr_correction')} + + def check_label_secondary_stem_runs(self): + + # For ensemble master that's not a 4-stem ensemble, and not mdx_c + if self.process_data['is_ensemble_master'] and not self.is_4_stem_ensemble and not self.is_mdx_c: + if self.ensemble_primary_stem != self.primary_stem: + self.is_primary_stem_only, self.is_secondary_stem_only = self.is_secondary_stem_only, self.is_primary_stem_only + + # For secondary models + if self.is_pre_proc_model or self.is_secondary_model: + self.is_primary_stem_only = False + self.is_secondary_stem_only = False + + def start_inference_console_write(self): + if self.is_secondary_model and not self.is_pre_proc_model and not self.is_vocal_split_model: + self.write_to_console(INFERENCE_STEP_2_SEC(self.process_method, self.model_basename)) + + if self.is_pre_proc_model: + self.write_to_console(INFERENCE_STEP_2_PRE(self.process_method, self.model_basename)) + + if self.is_vocal_split_model: + self.write_to_console(INFERENCE_STEP_2_VOC_S(self.process_method, self.model_basename)) + + def running_inference_console_write(self, is_no_write=False): + self.write_to_console(DONE, base_text='') if not is_no_write else None + self.set_progress_bar(0.05) if not is_no_write else None + + if self.is_secondary_model and not self.is_pre_proc_model and not self.is_vocal_split_model: + self.write_to_console(INFERENCE_STEP_1_SEC) + elif self.is_pre_proc_model: + self.write_to_console(INFERENCE_STEP_1_PRE) + elif self.is_vocal_split_model: + self.write_to_console(INFERENCE_STEP_1_VOC_S) + else: + self.write_to_console(INFERENCE_STEP_1) + + def running_inference_progress_bar(self, length, is_match_mix=False): + if not is_match_mix: + self.progress_value += 1 + + if (0.8/length*self.progress_value) >= 0.8: + length = self.progress_value + 1 + + self.set_progress_bar(0.1, (0.8/length*self.progress_value)) + + def load_cached_sources(self): + + if self.is_secondary_model and not self.is_pre_proc_model: + self.write_to_console(INFERENCE_STEP_2_SEC_CACHED_MODOEL(self.process_method, self.model_basename)) + elif self.is_pre_proc_model: + self.write_to_console(INFERENCE_STEP_2_PRE_CACHED_MODOEL(self.process_method, self.model_basename)) + else: + self.write_to_console(INFERENCE_STEP_2_PRIMARY_CACHED, "") + + def cache_source(self, secondary_sources): + + model_occurrences = self.list_all_models.count(self.model_basename) + + if not model_occurrences <= 1: + if self.process_method == MDX_ARCH_TYPE: + self.cached_model_source_holder(MDX_ARCH_TYPE, secondary_sources, self.model_basename) + + if self.process_method == VR_ARCH_TYPE: + self.cached_model_source_holder(VR_ARCH_TYPE, secondary_sources, self.model_basename) + + if self.process_method == DEMUCS_ARCH_TYPE: + self.cached_model_source_holder(DEMUCS_ARCH_TYPE, secondary_sources, self.model_basename) + + def process_vocal_split_chain(self, sources: dict): + + def is_valid_vocal_split_condition(master_vocal_source): + """Checks if conditions for vocal split processing are met.""" + conditions = [ + isinstance(master_vocal_source, np.ndarray), + self.vocal_split_model, + not self.is_ensemble_mode, + not self.is_karaoke, + not self.is_bv_model + ] + return all(conditions) + + # Retrieve sources from the dictionary with default fallbacks + master_inst_source = sources.get(INST_STEM, None) + master_vocal_source = sources.get(VOCAL_STEM, None) + + # Process the vocal split chain if conditions are met + if is_valid_vocal_split_condition(master_vocal_source): + process_chain_model( + self.vocal_split_model, + self.process_data, + vocal_stem_path=self.master_vocal_path, + master_vocal_source=master_vocal_source, + master_inst_source=master_inst_source + ) + + def process_secondary_stem(self, stem_source, secondary_model_source=None, model_scale=None): + if not self.is_secondary_model: + if self.is_secondary_model_activated and isinstance(secondary_model_source, np.ndarray): + secondary_model_scale = model_scale if model_scale else self.secondary_model_scale + stem_source = spec_utils.average_dual_sources(stem_source, secondary_model_source, secondary_model_scale) + + return stem_source + + def final_process(self, stem_path, source, secondary_source, stem_name, samplerate): + source = self.process_secondary_stem(source, secondary_source) + self.write_audio(stem_path, source, samplerate, stem_name=stem_name) + + return {stem_name: source} + + def write_audio(self, stem_path: str, stem_source, samplerate, stem_name=None): + + def save_audio_file(path, source): + source = spec_utils.normalize(source, self.is_normalization) + sf.write(path, source, samplerate, subtype=self.wav_type_set) + + if is_not_ensemble: + save_format(path, self.save_format, self.mp3_bit_set) + + def save_voc_split_instrumental(stem_name, stem_source, is_inst_invert=False): + inst_stem_name = "Instrumental (With Lead Vocals)" if stem_name == LEAD_VOCAL_STEM else "Instrumental (With Backing Vocals)" + inst_stem_path_name = LEAD_VOCAL_STEM_I if stem_name == LEAD_VOCAL_STEM else BV_VOCAL_STEM_I + inst_stem_path = self.audio_file_base_voc_split(INST_STEM, inst_stem_path_name) + stem_source = -stem_source if is_inst_invert else stem_source + inst_stem_source = spec_utils.combine_arrarys([self.master_inst_source, stem_source], is_swap=True) + save_with_message(inst_stem_path, inst_stem_name, inst_stem_source) + + def save_voc_split_vocal(stem_name, stem_source): + voc_split_stem_name = LEAD_VOCAL_STEM_LABEL if stem_name == LEAD_VOCAL_STEM else BV_VOCAL_STEM_LABEL + voc_split_stem_path = self.audio_file_base_voc_split(VOCAL_STEM, stem_name) + save_with_message(voc_split_stem_path, voc_split_stem_name, stem_source) + + def save_with_message(stem_path, stem_name, stem_source): + is_deverb = self.is_deverb_vocals and ( + self.deverb_vocal_opt == stem_name or + (self.deverb_vocal_opt == 'ALL' and + (stem_name == VOCAL_STEM or stem_name == LEAD_VOCAL_STEM_LABEL or stem_name == BV_VOCAL_STEM_LABEL))) + + self.write_to_console(f'{SAVING_STEM[0]}{stem_name}{SAVING_STEM[1]}') + + if is_deverb and is_not_ensemble: + deverb_vocals(stem_path, stem_source) + + save_audio_file(stem_path, stem_source) + self.write_to_console(DONE, base_text='') + + def deverb_vocals(stem_path:str, stem_source): + self.write_to_console(INFERENCE_STEP_DEVERBING, base_text='') + stem_source_deverbed, stem_source_2 = vr_denoiser(stem_source, self.device, is_deverber=True, model_path=self.DEVERBER_MODEL) + save_audio_file(stem_path.replace(".wav", "_deverbed.wav"), stem_source_deverbed) + save_audio_file(stem_path.replace(".wav", "_reverb_only.wav"), stem_source_2) + + is_bv_model_lead = (self.is_bv_model_rebalenced and self.is_vocal_split_model and stem_name == LEAD_VOCAL_STEM) + is_bv_rebalance_lead = (self.is_bv_model_rebalenced and self.is_vocal_split_model and stem_name == BV_VOCAL_STEM) + is_no_vocal_save = self.is_inst_only_voc_splitter and (stem_name == VOCAL_STEM or stem_name == BV_VOCAL_STEM or stem_name == LEAD_VOCAL_STEM) or is_bv_model_lead + is_not_ensemble = (not self.is_ensemble_mode or self.is_vocal_split_model) + is_do_not_save_inst = (self.is_save_vocal_only and self.is_sec_bv_rebalance and stem_name == INST_STEM) + + if is_bv_rebalance_lead: + master_voc_source = spec_utils.match_array_shapes(self.master_vocal_source, stem_source, is_swap=True) + bv_rebalance_lead_source = stem_source-master_voc_source + + if not is_bv_model_lead and not is_do_not_save_inst: + if self.is_vocal_split_model or not self.is_secondary_model: + if self.is_vocal_split_model and not self.is_inst_only_voc_splitter: + save_voc_split_vocal(stem_name, stem_source) + if is_bv_rebalance_lead: + save_voc_split_vocal(LEAD_VOCAL_STEM, bv_rebalance_lead_source) + else: + if not is_no_vocal_save: + save_with_message(stem_path, stem_name, stem_source) + + if self.is_save_inst_vocal_splitter and not self.is_save_vocal_only: + save_voc_split_instrumental(stem_name, stem_source) + if is_bv_rebalance_lead: + save_voc_split_instrumental(LEAD_VOCAL_STEM, bv_rebalance_lead_source, is_inst_invert=True) + + self.set_progress_bar(0.95) + + if stem_name == VOCAL_STEM: + self.master_vocal_path = stem_path + + def pitch_fix(self, source, sr_pitched, org_mix): + semitone_shift = self.semitone_shift + source = spec_utils.change_pitch_semitones(source, sr_pitched, semitone_shift=semitone_shift)[0] + source = spec_utils.match_array_shapes(source, org_mix) + return source + + def match_frequency_pitch(self, mix): + source = mix + if self.is_match_frequency_pitch and self.is_pitch_change: + source, sr_pitched = spec_utils.change_pitch_semitones(mix, 44100, semitone_shift=-self.semitone_shift) + source = self.pitch_fix(source, sr_pitched, mix) + + return source + +class SeperateMDX(SeperateAttributes): + + def seperate(self): + samplerate = 44100 + + if self.primary_model_name == self.model_basename and isinstance(self.primary_sources, tuple): + mix, source = self.primary_sources + self.load_cached_sources() + else: + self.start_inference_console_write() + + if self.is_mdx_ckpt: + model_params = torch.load(self.model_path, map_location=lambda storage, loc: storage)['hyper_parameters'] + self.dim_c, self.hop = model_params['dim_c'], model_params['hop_length'] + separator = MdxnetSet.ConvTDFNet(**model_params) + self.model_run = separator.load_from_checkpoint(self.model_path).to(self.device).eval() + else: + if self.mdx_segment_size == self.dim_t and not self.is_other_gpu: + ort_ = ort.InferenceSession(self.model_path, providers=self.run_type) + self.model_run = lambda spek:ort_.run(None, {'input': spek.cpu().numpy()})[0] + else: + self.model_run = ConvertModel(load(self.model_path)) + self.model_run.to(self.device).eval() + + self.running_inference_console_write() + mix = prepare_mix(self.audio_file) + + source = self.demix(mix) + + if not self.is_vocal_split_model: + self.cache_source((mix, source)) + self.write_to_console(DONE, base_text='') + + mdx_net_cut = True if self.primary_stem in MDX_NET_FREQ_CUT and self.is_match_frequency_pitch else False + + if self.is_secondary_model_activated and self.secondary_model: + self.secondary_source_primary, self.secondary_source_secondary = process_secondary_model(self.secondary_model, self.process_data, main_process_method=self.process_method, main_model_primary=self.primary_stem) + + if not self.is_primary_stem_only: + secondary_stem_path = os.path.join(self.export_path, f'{self.audio_file_base}_({self.secondary_stem}).wav') + if not isinstance(self.secondary_source, np.ndarray): + raw_mix = self.demix(self.match_frequency_pitch(mix), is_match_mix=True) if mdx_net_cut else self.match_frequency_pitch(mix) + self.secondary_source = spec_utils.invert_stem(raw_mix, source) if self.is_invert_spec else mix.T-source.T + + self.secondary_source_map = self.final_process(secondary_stem_path, self.secondary_source, self.secondary_source_secondary, self.secondary_stem, samplerate) + + if not self.is_secondary_stem_only: + primary_stem_path = os.path.join(self.export_path, f'{self.audio_file_base}_({self.primary_stem}).wav') + + if not isinstance(self.primary_source, np.ndarray): + self.primary_source = source.T + + self.primary_source_map = self.final_process(primary_stem_path, self.primary_source, self.secondary_source_primary, self.primary_stem, samplerate) + + clear_gpu_cache() + + secondary_sources = {**self.primary_source_map, **self.secondary_source_map} + + self.process_vocal_split_chain(secondary_sources) + + if self.is_secondary_model or self.is_pre_proc_model: + return secondary_sources + + def initialize_model_settings(self): + self.n_bins = self.n_fft//2+1 + self.trim = self.n_fft//2 + self.chunk_size = self.hop * (self.mdx_segment_size-1) + self.gen_size = self.chunk_size-2*self.trim + self.stft = STFT(self.n_fft, self.hop, self.dim_f, self.device) + + def demix(self, mix, is_match_mix=False): + self.initialize_model_settings() + + org_mix = mix + tar_waves_ = [] + + if is_match_mix: + chunk_size = self.hop * (256-1) + overlap = 0.02 + else: + chunk_size = self.chunk_size + overlap = self.overlap_mdx + + if self.is_pitch_change: + mix, sr_pitched = spec_utils.change_pitch_semitones(mix, 44100, semitone_shift=-self.semitone_shift) + + gen_size = chunk_size-2*self.trim + + pad = gen_size + self.trim - ((mix.shape[-1]) % gen_size) + mixture = np.concatenate((np.zeros((2, self.trim), dtype='float32'), mix, np.zeros((2, pad), dtype='float32')), 1) + + step = self.chunk_size - self.n_fft if overlap == DEFAULT else int((1 - overlap) * chunk_size) + result = np.zeros((1, 2, mixture.shape[-1]), dtype=np.float32) + divider = np.zeros((1, 2, mixture.shape[-1]), dtype=np.float32) + total = 0 + total_chunks = (mixture.shape[-1] + step - 1) // step + + for i in range(0, mixture.shape[-1], step): + total += 1 + start = i + end = min(i + chunk_size, mixture.shape[-1]) + + chunk_size_actual = end - start + + if overlap == 0: + window = None + else: + window = np.hanning(chunk_size_actual) + window = np.tile(window[None, None, :], (1, 2, 1)) + + mix_part_ = mixture[:, start:end] + if end != i + chunk_size: + pad_size = (i + chunk_size) - end + mix_part_ = np.concatenate((mix_part_, np.zeros((2, pad_size), dtype='float32')), axis=-1) + + mix_part = torch.tensor([mix_part_], dtype=torch.float32).to(self.device) + mix_waves = mix_part.split(self.mdx_batch_size) + + with torch.no_grad(): + for mix_wave in mix_waves: + self.running_inference_progress_bar(total_chunks, is_match_mix=is_match_mix) + + tar_waves = self.run_model(mix_wave, is_match_mix=is_match_mix) + + if window is not None: + tar_waves[..., :chunk_size_actual] *= window + divider[..., start:end] += window + else: + divider[..., start:end] += 1 + + result[..., start:end] += tar_waves[..., :end-start] + + tar_waves = result / divider + tar_waves_.append(tar_waves) + + tar_waves_ = np.vstack(tar_waves_)[:, :, self.trim:-self.trim] + tar_waves = np.concatenate(tar_waves_, axis=-1)[:, :mix.shape[-1]] + + source = tar_waves[:,0:None] + + if self.is_pitch_change and not is_match_mix: + source = self.pitch_fix(source, sr_pitched, org_mix) + + source = source if is_match_mix else source*self.compensate + + if self.is_denoise_model and not is_match_mix: + if NO_STEM in self.primary_stem_native or self.primary_stem_native == INST_STEM: + if org_mix.shape[1] != source.shape[1]: + source = spec_utils.match_array_shapes(source, org_mix) + source = org_mix - vr_denoiser(org_mix-source, self.device, model_path=self.DENOISER_MODEL) + else: + source = vr_denoiser(source, self.device, model_path=self.DENOISER_MODEL) + + return source + + def run_model(self, mix, is_match_mix=False): + + spek = self.stft(mix.to(self.device))*self.adjust + spek[:, :, :3, :] *= 0 + + if is_match_mix: + spec_pred = spek.cpu().numpy() + else: + spec_pred = -self.model_run(-spek)*0.5+self.model_run(spek)*0.5 if self.is_denoise else self.model_run(spek) + + return self.stft.inverse(torch.tensor(spec_pred).to(self.device)).cpu().detach().numpy() + +class SeperateMDXC(SeperateAttributes): + + def seperate(self): + samplerate = 44100 + sources = None + + if self.primary_model_name == self.model_basename and isinstance(self.primary_sources, tuple): + mix, sources = self.primary_sources + self.load_cached_sources() + else: + self.start_inference_console_write() + self.running_inference_console_write() + mix = prepare_mix(self.audio_file) + sources = self.demix(mix) + if not self.is_vocal_split_model: + self.cache_source((mix, sources)) + self.write_to_console(DONE, base_text='') + + stem_list = [self.mdx_c_configs.training.target_instrument] if self.mdx_c_configs.training.target_instrument else [i for i in self.mdx_c_configs.training.instruments] + + if self.is_secondary_model: + if self.is_pre_proc_model: + self.mdxnet_stem_select = stem_list[0] + else: + self.mdxnet_stem_select = self.main_model_primary_stem_4_stem if self.main_model_primary_stem_4_stem else self.primary_model_primary_stem + self.primary_stem = self.mdxnet_stem_select + self.secondary_stem = secondary_stem(self.mdxnet_stem_select) + self.is_primary_stem_only, self.is_secondary_stem_only = False, False + + is_all_stems = self.mdxnet_stem_select == ALL_STEMS + is_not_ensemble_master = not self.process_data['is_ensemble_master'] + is_not_single_stem = not len(stem_list) <= 2 + is_not_secondary_model = not self.is_secondary_model + is_ensemble_4_stem = self.is_4_stem_ensemble and is_not_single_stem + + if (is_all_stems and is_not_ensemble_master and is_not_single_stem and is_not_secondary_model) or is_ensemble_4_stem and not self.is_pre_proc_model: + for stem in stem_list: + primary_stem_path = os.path.join(self.export_path, f'{self.audio_file_base}_({stem}).wav') + self.primary_source = sources[stem].T + self.write_audio(primary_stem_path, self.primary_source, samplerate, stem_name=stem) + + if stem == VOCAL_STEM and not self.is_sec_bv_rebalance: + self.process_vocal_split_chain({VOCAL_STEM:stem}) + else: + if len(stem_list) == 1: + source_primary = sources + else: + source_primary = sources[stem_list[0]] if self.is_multi_stem_ensemble and len(stem_list) == 2 else sources[self.mdxnet_stem_select] + if self.is_secondary_model_activated and self.secondary_model: + self.secondary_source_primary, self.secondary_source_secondary = process_secondary_model(self.secondary_model, + self.process_data, + main_process_method=self.process_method, + main_model_primary=self.primary_stem) + + if not self.is_primary_stem_only: + secondary_stem_path = os.path.join(self.export_path, f'{self.audio_file_base}_({self.secondary_stem}).wav') + if not isinstance(self.secondary_source, np.ndarray): + + if self.is_mdx_combine_stems and len(stem_list) >= 2: + if len(stem_list) == 2: + secondary_source = sources[self.secondary_stem] + else: + sources.pop(self.primary_stem) + next_stem = next(iter(sources)) + secondary_source = np.zeros_like(sources[next_stem]) + for v in sources.values(): + secondary_source += v + + self.secondary_source = secondary_source.T + else: + self.secondary_source, raw_mix = source_primary, self.match_frequency_pitch(mix) + self.secondary_source = spec_utils.to_shape(self.secondary_source, raw_mix.shape) + + if self.is_invert_spec: + self.secondary_source = spec_utils.invert_stem(raw_mix, self.secondary_source) + else: + self.secondary_source = (-self.secondary_source.T+raw_mix.T) + + self.secondary_source_map = self.final_process(secondary_stem_path, self.secondary_source, self.secondary_source_secondary, self.secondary_stem, samplerate) + + if not self.is_secondary_stem_only: + primary_stem_path = os.path.join(self.export_path, f'{self.audio_file_base}_({self.primary_stem}).wav') + if not isinstance(self.primary_source, np.ndarray): + self.primary_source = source_primary.T + + self.primary_source_map = self.final_process(primary_stem_path, self.primary_source, self.secondary_source_primary, self.primary_stem, samplerate) + + clear_gpu_cache() + + secondary_sources = {**self.primary_source_map, **self.secondary_source_map} + self.process_vocal_split_chain(secondary_sources) + + if self.is_secondary_model or self.is_pre_proc_model: + return secondary_sources + + def demix(self, mix): + sr_pitched = 441000 + org_mix = mix + if self.is_pitch_change: + mix, sr_pitched = spec_utils.change_pitch_semitones(mix, 44100, semitone_shift=-self.semitone_shift) + + model = TFC_TDF_net(self.mdx_c_configs, device=self.device) + model.load_state_dict(torch.load(self.model_path, map_location=cpu)) + model.to(self.device).eval() + mix = torch.tensor(mix, dtype=torch.float32) + + try: + S = model.num_target_instruments + except Exception as e: + S = model.module.num_target_instruments + + mdx_segment_size = self.mdx_c_configs.inference.dim_t if self.is_mdx_c_seg_def else self.mdx_segment_size + + batch_size = self.mdx_batch_size + chunk_size = self.mdx_c_configs.audio.hop_length * (mdx_segment_size - 1) + overlap = self.overlap_mdx23 + + hop_size = chunk_size // overlap + mix_shape = mix.shape[1] + pad_size = hop_size - (mix_shape - chunk_size) % hop_size + mix = torch.cat([torch.zeros(2, chunk_size - hop_size), mix, torch.zeros(2, pad_size + chunk_size - hop_size)], 1) + + chunks = mix.unfold(1, chunk_size, hop_size).transpose(0, 1) + batches = [chunks[i : i + batch_size] for i in range(0, len(chunks), batch_size)] + + X = torch.zeros(S, *mix.shape) if S > 1 else torch.zeros_like(mix) + X = X.to(self.device) + + with torch.no_grad(): + cnt = 0 + for batch in batches: + self.running_inference_progress_bar(len(batches)) + x = model(batch.to(self.device)) + + for w in x: + X[..., cnt * hop_size : cnt * hop_size + chunk_size] += w + cnt += 1 + + estimated_sources = X[..., chunk_size - hop_size:-(pad_size + chunk_size - hop_size)] / overlap + del X + pitch_fix = lambda s:self.pitch_fix(s, sr_pitched, org_mix) + + if S > 1: + sources = {k: pitch_fix(v) if self.is_pitch_change else v for k, v in zip(self.mdx_c_configs.training.instruments, estimated_sources.cpu().detach().numpy())} + del estimated_sources + if self.is_denoise_model: + if VOCAL_STEM in sources.keys() and INST_STEM in sources.keys(): + sources[VOCAL_STEM] = vr_denoiser(sources[VOCAL_STEM], self.device, model_path=self.DENOISER_MODEL) + if sources[VOCAL_STEM].shape[1] != org_mix.shape[1]: + sources[VOCAL_STEM] = spec_utils.match_array_shapes(sources[VOCAL_STEM], org_mix) + sources[INST_STEM] = org_mix - sources[VOCAL_STEM] + + return sources + else: + est_s = estimated_sources.cpu().detach().numpy() + del estimated_sources + return pitch_fix(est_s) if self.is_pitch_change else est_s + +class SeperateDemucs(SeperateAttributes): + def seperate(self): + samplerate = 44100 + source = None + model_scale = None + stem_source = None + stem_source_secondary = None + inst_mix = None + inst_source = None + is_no_write = False + is_no_piano_guitar = False + is_no_cache = False + + if self.primary_model_name == self.model_basename and isinstance(self.primary_sources, np.ndarray) and not self.pre_proc_model: + source = self.primary_sources + self.load_cached_sources() + else: + self.start_inference_console_write() + is_no_cache = True + + mix = prepare_mix(self.audio_file) + + if is_no_cache: + if self.demucs_version == DEMUCS_V1: + if str(self.model_path).endswith(".gz"): + self.model_path = gzip.open(self.model_path, "rb") + klass, args, kwargs, state = torch.load(self.model_path) + self.demucs = klass(*args, **kwargs) + self.demucs.to(self.device) + self.demucs.load_state_dict(state) + elif self.demucs_version == DEMUCS_V2: + self.demucs = auto_load_demucs_model_v2(self.demucs_source_list, self.model_path) + self.demucs.to(self.device) + self.demucs.load_state_dict(torch.load(self.model_path)) + self.demucs.eval() + else: + self.demucs = HDemucs(sources=self.demucs_source_list) + self.demucs = _gm(name=os.path.splitext(os.path.basename(self.model_path))[0], + repo=Path(os.path.dirname(self.model_path))) + self.demucs = demucs_segments(self.segment, self.demucs) + self.demucs.to(self.device) + self.demucs.eval() + + if self.pre_proc_model: + if self.primary_stem not in [VOCAL_STEM, INST_STEM]: + is_no_write = True + self.write_to_console(DONE, base_text='') + mix_no_voc = process_secondary_model(self.pre_proc_model, self.process_data, is_pre_proc_model=True) + inst_mix = prepare_mix(mix_no_voc[INST_STEM]) + self.process_iteration() + self.running_inference_console_write(is_no_write=is_no_write) + inst_source = self.demix_demucs(inst_mix) + self.process_iteration() + + self.running_inference_console_write(is_no_write=is_no_write) if not self.pre_proc_model else None + + if self.primary_model_name == self.model_basename and isinstance(self.primary_sources, np.ndarray) and self.pre_proc_model: + source = self.primary_sources + else: + source = self.demix_demucs(mix) + + self.write_to_console(DONE, base_text='') + + del self.demucs + clear_gpu_cache() + + if isinstance(inst_source, np.ndarray): + source_reshape = spec_utils.reshape_sources(inst_source[self.demucs_source_map[VOCAL_STEM]], source[self.demucs_source_map[VOCAL_STEM]]) + inst_source[self.demucs_source_map[VOCAL_STEM]] = source_reshape + source = inst_source + + if isinstance(source, np.ndarray): + + if len(source) == 2: + self.demucs_source_map = DEMUCS_2_SOURCE_MAPPER + else: + self.demucs_source_map = DEMUCS_6_SOURCE_MAPPER if len(source) == 6 else DEMUCS_4_SOURCE_MAPPER + + if len(source) == 6 and self.process_data['is_ensemble_master'] or len(source) == 6 and self.is_secondary_model: + is_no_piano_guitar = True + six_stem_other_source = list(source) + six_stem_other_source = [i for n, i in enumerate(source) if n in [self.demucs_source_map[OTHER_STEM], self.demucs_source_map[GUITAR_STEM], self.demucs_source_map[PIANO_STEM]]] + other_source = np.zeros_like(six_stem_other_source[0]) + for i in six_stem_other_source: + other_source += i + source_reshape = spec_utils.reshape_sources(source[self.demucs_source_map[OTHER_STEM]], other_source) + source[self.demucs_source_map[OTHER_STEM]] = source_reshape + + if not self.is_vocal_split_model: + self.cache_source(source) + + if (self.demucs_stems == ALL_STEMS and not self.process_data['is_ensemble_master']) or self.is_4_stem_ensemble and not self.is_return_dual: + for stem_name, stem_value in self.demucs_source_map.items(): + if self.is_secondary_model_activated and not self.is_secondary_model and not stem_value >= 4: + if self.secondary_model_4_stem[stem_value]: + model_scale = self.secondary_model_4_stem_scale[stem_value] + stem_source_secondary = process_secondary_model(self.secondary_model_4_stem[stem_value], self.process_data, main_model_primary_stem_4_stem=stem_name, is_source_load=True, is_return_dual=False) + if isinstance(stem_source_secondary, np.ndarray): + stem_source_secondary = stem_source_secondary[1 if self.secondary_model_4_stem[stem_value].demucs_stem_count == 2 else stem_value].T + elif type(stem_source_secondary) is dict: + stem_source_secondary = stem_source_secondary[stem_name] + + stem_source_secondary = None if stem_value >= 4 else stem_source_secondary + stem_path = os.path.join(self.export_path, f'{self.audio_file_base}_({stem_name}).wav') + stem_source = source[stem_value].T + + stem_source = self.process_secondary_stem(stem_source, secondary_model_source=stem_source_secondary, model_scale=model_scale) + self.write_audio(stem_path, stem_source, samplerate, stem_name=stem_name) + + if stem_name == VOCAL_STEM and not self.is_sec_bv_rebalance: + self.process_vocal_split_chain({VOCAL_STEM:stem_source}) + + if self.is_secondary_model: + return source + else: + if self.is_secondary_model_activated and self.secondary_model: + self.secondary_source_primary, self.secondary_source_secondary = process_secondary_model(self.secondary_model, self.process_data, main_process_method=self.process_method) + + if not self.is_primary_stem_only: + def secondary_save(sec_stem_name, source, raw_mixture=None, is_inst_mixture=False): + secondary_source = self.secondary_source if not is_inst_mixture else None + secondary_stem_path = os.path.join(self.export_path, f'{self.audio_file_base}_({sec_stem_name}).wav') + secondary_source_secondary = None + + if not isinstance(secondary_source, np.ndarray): + if self.is_demucs_combine_stems: + source = list(source) + if is_inst_mixture: + source = [i for n, i in enumerate(source) if not n in [self.demucs_source_map[self.primary_stem], self.demucs_source_map[VOCAL_STEM]]] + else: + source.pop(self.demucs_source_map[self.primary_stem]) + + source = source[:len(source) - 2] if is_no_piano_guitar else source + secondary_source = np.zeros_like(source[0]) + for i in source: + secondary_source += i + secondary_source = secondary_source.T + else: + if not isinstance(raw_mixture, np.ndarray): + raw_mixture = prepare_mix(self.audio_file) + + secondary_source = source[self.demucs_source_map[self.primary_stem]] + + if self.is_invert_spec: + secondary_source = spec_utils.invert_stem(raw_mixture, secondary_source) + else: + raw_mixture = spec_utils.reshape_sources(secondary_source, raw_mixture) + secondary_source = (-secondary_source.T+raw_mixture.T) + + if not is_inst_mixture: + self.secondary_source = secondary_source + secondary_source_secondary = self.secondary_source_secondary + self.secondary_source = self.process_secondary_stem(secondary_source, secondary_source_secondary) + self.secondary_source_map = {self.secondary_stem: self.secondary_source} + + self.write_audio(secondary_stem_path, secondary_source, samplerate, stem_name=sec_stem_name) + + secondary_save(self.secondary_stem, source, raw_mixture=mix) + + if self.is_demucs_pre_proc_model_inst_mix and self.pre_proc_model and not self.is_4_stem_ensemble: + secondary_save(f"{self.secondary_stem} {INST_STEM}", source, raw_mixture=inst_mix, is_inst_mixture=True) + + if not self.is_secondary_stem_only: + primary_stem_path = os.path.join(self.export_path, f'{self.audio_file_base}_({self.primary_stem}).wav') + if not isinstance(self.primary_source, np.ndarray): + self.primary_source = source[self.demucs_source_map[self.primary_stem]].T + + self.primary_source_map = self.final_process(primary_stem_path, self.primary_source, self.secondary_source_primary, self.primary_stem, samplerate) + + secondary_sources = {**self.primary_source_map, **self.secondary_source_map} + + self.process_vocal_split_chain(secondary_sources) + + if self.is_secondary_model: + return secondary_sources + + def demix_demucs(self, mix): + + org_mix = mix + + if self.is_pitch_change: + mix, sr_pitched = spec_utils.change_pitch_semitones(mix, 44100, semitone_shift=-self.semitone_shift) + + processed = {} + mix = torch.tensor(mix, dtype=torch.float32) + ref = mix.mean(0) + mix = (mix - ref.mean()) / ref.std() + mix_infer = mix + + with torch.no_grad(): + if self.demucs_version == DEMUCS_V1: + sources = apply_model_v1(self.demucs, + mix_infer.to(self.device), + self.shifts, + self.is_split_mode, + set_progress_bar=self.set_progress_bar) + elif self.demucs_version == DEMUCS_V2: + sources = apply_model_v2(self.demucs, + mix_infer.to(self.device), + self.shifts, + self.is_split_mode, + self.overlap, + set_progress_bar=self.set_progress_bar) + else: + sources = apply_model(self.demucs, + mix_infer[None], + self.shifts, + self.is_split_mode, + self.overlap, + static_shifts=1 if self.shifts == 0 else self.shifts, + set_progress_bar=self.set_progress_bar, + device=self.device)[0] + + sources = (sources * ref.std() + ref.mean()).cpu().numpy() + sources[[0,1]] = sources[[1,0]] + processed[mix] = sources[:,:,0:None].copy() + sources = list(processed.values()) + sources = [s[:,:,0:None] for s in sources] + #sources = [self.pitch_fix(s[:,:,0:None], sr_pitched, org_mix) if self.is_pitch_change else s[:,:,0:None] for s in sources] + sources = np.concatenate(sources, axis=-1) + + if self.is_pitch_change: + sources = np.stack([self.pitch_fix(stem, sr_pitched, org_mix) for stem in sources]) + + return sources + +class SeperateVR(SeperateAttributes): + + def seperate(self): + if self.primary_model_name == self.model_basename and isinstance(self.primary_sources, tuple): + y_spec, v_spec = self.primary_sources + self.load_cached_sources() + else: + self.start_inference_console_write() + + device = self.device + + nn_arch_sizes = [ + 31191, # default + 33966, 56817, 123821, 123812, 129605, 218409, 537238, 537227] + vr_5_1_models = [56817, 218409] + model_size = math.ceil(os.stat(self.model_path).st_size / 1024) + nn_arch_size = min(nn_arch_sizes, key=lambda x:abs(x-model_size)) + + if nn_arch_size in vr_5_1_models or self.is_vr_51_model: + self.model_run = nets_new.CascadedNet(self.mp.param['bins'] * 2, + nn_arch_size, + nout=self.model_capacity[0], + nout_lstm=self.model_capacity[1]) + self.is_vr_51_model = True + else: + self.model_run = nets.determine_model_capacity(self.mp.param['bins'] * 2, nn_arch_size) + + self.model_run.load_state_dict(torch.load(self.model_path, map_location=cpu)) + self.model_run.to(device) + + self.running_inference_console_write() + + y_spec, v_spec = self.inference_vr(self.loading_mix(), device, self.aggressiveness) + if not self.is_vocal_split_model: + self.cache_source((y_spec, v_spec)) + self.write_to_console(DONE, base_text='') + + if self.is_secondary_model_activated and self.secondary_model: + self.secondary_source_primary, self.secondary_source_secondary = process_secondary_model(self.secondary_model, self.process_data, main_process_method=self.process_method, main_model_primary=self.primary_stem) + + if not self.is_secondary_stem_only: + primary_stem_path = os.path.join(self.export_path, f'{self.audio_file_base}_({self.primary_stem}).wav') + if not isinstance(self.primary_source, np.ndarray): + self.primary_source = self.spec_to_wav(y_spec).T + if not self.model_samplerate == 44100: + self.primary_source = librosa.resample(self.primary_source.T, orig_sr=self.model_samplerate, target_sr=44100).T + + self.primary_source_map = self.final_process(primary_stem_path, self.primary_source, self.secondary_source_primary, self.primary_stem, 44100) + + if not self.is_primary_stem_only: + secondary_stem_path = os.path.join(self.export_path, f'{self.audio_file_base}_({self.secondary_stem}).wav') + if not isinstance(self.secondary_source, np.ndarray): + self.secondary_source = self.spec_to_wav(v_spec).T + if not self.model_samplerate == 44100: + self.secondary_source = librosa.resample(self.secondary_source.T, orig_sr=self.model_samplerate, target_sr=44100).T + + self.secondary_source_map = self.final_process(secondary_stem_path, self.secondary_source, self.secondary_source_secondary, self.secondary_stem, 44100) + + clear_gpu_cache() + secondary_sources = {**self.primary_source_map, **self.secondary_source_map} + + self.process_vocal_split_chain(secondary_sources) + + if self.is_secondary_model: + return secondary_sources + + def loading_mix(self): + + X_wave, X_spec_s = {}, {} + + bands_n = len(self.mp.param['band']) + + audio_file = spec_utils.write_array_to_mem(self.audio_file, subtype=self.wav_type_set) + is_mp3 = audio_file.endswith('.mp3') if isinstance(audio_file, str) else False + + for d in range(bands_n, 0, -1): + bp = self.mp.param['band'][d] + + if OPERATING_SYSTEM == 'Darwin': + wav_resolution = 'polyphase' if SYSTEM_PROC == ARM or ARM in SYSTEM_ARCH else bp['res_type'] + else: + wav_resolution = bp['res_type'] + + if d == bands_n: # high-end band + X_wave[d], _ = librosa.load(audio_file, bp['sr'], False, dtype=np.float32, res_type=wav_resolution) + X_spec_s[d] = spec_utils.wave_to_spectrogram(X_wave[d], bp['hl'], bp['n_fft'], self.mp, band=d, is_v51_model=self.is_vr_51_model) + + if not np.any(X_wave[d]) and is_mp3: + X_wave[d] = rerun_mp3(audio_file, bp['sr']) + + if X_wave[d].ndim == 1: + X_wave[d] = np.asarray([X_wave[d], X_wave[d]]) + else: # lower bands + X_wave[d] = librosa.resample(X_wave[d+1], self.mp.param['band'][d+1]['sr'], bp['sr'], res_type=wav_resolution) + X_spec_s[d] = spec_utils.wave_to_spectrogram(X_wave[d], bp['hl'], bp['n_fft'], self.mp, band=d, is_v51_model=self.is_vr_51_model) + + if d == bands_n and self.high_end_process != 'none': + self.input_high_end_h = (bp['n_fft']//2 - bp['crop_stop']) + (self.mp.param['pre_filter_stop'] - self.mp.param['pre_filter_start']) + self.input_high_end = X_spec_s[d][:, bp['n_fft']//2-self.input_high_end_h:bp['n_fft']//2, :] + + X_spec = spec_utils.combine_spectrograms(X_spec_s, self.mp, is_v51_model=self.is_vr_51_model) + + del X_wave, X_spec_s, audio_file + + return X_spec + + def inference_vr(self, X_spec, device, aggressiveness): + def _execute(X_mag_pad, roi_size): + X_dataset = [] + patches = (X_mag_pad.shape[2] - 2 * self.model_run.offset) // roi_size + total_iterations = patches//self.batch_size if not self.is_tta else (patches//self.batch_size)*2 + for i in range(patches): + start = i * roi_size + X_mag_window = X_mag_pad[:, :, start:start + self.window_size] + X_dataset.append(X_mag_window) + + X_dataset = np.asarray(X_dataset) + self.model_run.eval() + with torch.no_grad(): + mask = [] + for i in range(0, patches, self.batch_size): + self.progress_value += 1 + if self.progress_value >= total_iterations: + self.progress_value = total_iterations + self.set_progress_bar(0.1, 0.8/total_iterations*self.progress_value) + X_batch = X_dataset[i: i + self.batch_size] + X_batch = torch.from_numpy(X_batch).to(device) + pred = self.model_run.predict_mask(X_batch) + if not pred.size()[3] > 0: + raise Exception(ERROR_MAPPER[WINDOW_SIZE_ERROR]) + pred = pred.detach().cpu().numpy() + pred = np.concatenate(pred, axis=2) + mask.append(pred) + if len(mask) == 0: + raise Exception(ERROR_MAPPER[WINDOW_SIZE_ERROR]) + + mask = np.concatenate(mask, axis=2) + return mask + + def postprocess(mask, X_mag, X_phase): + is_non_accom_stem = False + for stem in NON_ACCOM_STEMS: + if stem == self.primary_stem: + is_non_accom_stem = True + + mask = spec_utils.adjust_aggr(mask, is_non_accom_stem, aggressiveness) + + if self.is_post_process: + mask = spec_utils.merge_artifacts(mask, thres=self.post_process_threshold) + + y_spec = mask * X_mag * np.exp(1.j * X_phase) + v_spec = (1 - mask) * X_mag * np.exp(1.j * X_phase) + + return y_spec, v_spec + + X_mag, X_phase = spec_utils.preprocess(X_spec) + n_frame = X_mag.shape[2] + pad_l, pad_r, roi_size = spec_utils.make_padding(n_frame, self.window_size, self.model_run.offset) + X_mag_pad = np.pad(X_mag, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant') + X_mag_pad /= X_mag_pad.max() + mask = _execute(X_mag_pad, roi_size) + + if self.is_tta: + pad_l += roi_size // 2 + pad_r += roi_size // 2 + X_mag_pad = np.pad(X_mag, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant') + X_mag_pad /= X_mag_pad.max() + mask_tta = _execute(X_mag_pad, roi_size) + mask_tta = mask_tta[:, :, roi_size // 2:] + mask = (mask[:, :, :n_frame] + mask_tta[:, :, :n_frame]) * 0.5 + else: + mask = mask[:, :, :n_frame] + + y_spec, v_spec = postprocess(mask, X_mag, X_phase) + + return y_spec, v_spec + + def spec_to_wav(self, spec): + if self.high_end_process.startswith('mirroring') and isinstance(self.input_high_end, np.ndarray) and self.input_high_end_h: + input_high_end_ = spec_utils.mirroring(self.high_end_process, spec, self.input_high_end, self.mp) + wav = spec_utils.cmb_spectrogram_to_wave(spec, self.mp, self.input_high_end_h, input_high_end_, is_v51_model=self.is_vr_51_model) + else: + wav = spec_utils.cmb_spectrogram_to_wave(spec, self.mp, is_v51_model=self.is_vr_51_model) + + return wav + +def process_secondary_model(secondary_model: ModelData, + process_data, + main_model_primary_stem_4_stem=None, + is_source_load=False, + main_process_method=None, + is_pre_proc_model=False, + is_return_dual=True, + main_model_primary=None): + + if not is_pre_proc_model: + process_iteration = process_data['process_iteration'] + process_iteration() + + if secondary_model.process_method == VR_ARCH_TYPE: + seperator = SeperateVR(secondary_model, process_data, main_model_primary_stem_4_stem=main_model_primary_stem_4_stem, main_process_method=main_process_method, main_model_primary=main_model_primary) + if secondary_model.process_method == MDX_ARCH_TYPE: + if secondary_model.is_mdx_c: + seperator = SeperateMDXC(secondary_model, process_data, main_model_primary_stem_4_stem=main_model_primary_stem_4_stem, main_process_method=main_process_method, is_return_dual=is_return_dual, main_model_primary=main_model_primary) + else: + seperator = SeperateMDX(secondary_model, process_data, main_model_primary_stem_4_stem=main_model_primary_stem_4_stem, main_process_method=main_process_method, main_model_primary=main_model_primary) + if secondary_model.process_method == DEMUCS_ARCH_TYPE: + seperator = SeperateDemucs(secondary_model, process_data, main_model_primary_stem_4_stem=main_model_primary_stem_4_stem, main_process_method=main_process_method, is_return_dual=is_return_dual, main_model_primary=main_model_primary) + + secondary_sources = seperator.seperate() + + if type(secondary_sources) is dict and not is_source_load and not is_pre_proc_model: + return gather_sources(secondary_model.primary_model_primary_stem, secondary_stem(secondary_model.primary_model_primary_stem), secondary_sources) + else: + return secondary_sources + +def process_chain_model(secondary_model: ModelData, + process_data, + vocal_stem_path, + master_vocal_source, + master_inst_source=None): + + process_iteration = process_data['process_iteration'] + process_iteration() + + if secondary_model.bv_model_rebalance: + vocal_source = spec_utils.reduce_mix_bv(master_inst_source, master_vocal_source, reduction_rate=secondary_model.bv_model_rebalance) + else: + vocal_source = master_vocal_source + + vocal_stem_path = [vocal_source, os.path.splitext(os.path.basename(vocal_stem_path))[0]] + + if secondary_model.process_method == VR_ARCH_TYPE: + seperator = SeperateVR(secondary_model, process_data, vocal_stem_path=vocal_stem_path, master_inst_source=master_inst_source, master_vocal_source=master_vocal_source) + if secondary_model.process_method == MDX_ARCH_TYPE: + if secondary_model.is_mdx_c: + seperator = SeperateMDXC(secondary_model, process_data, vocal_stem_path=vocal_stem_path, master_inst_source=master_inst_source, master_vocal_source=master_vocal_source) + else: + seperator = SeperateMDX(secondary_model, process_data, vocal_stem_path=vocal_stem_path, master_inst_source=master_inst_source, master_vocal_source=master_vocal_source) + if secondary_model.process_method == DEMUCS_ARCH_TYPE: + seperator = SeperateDemucs(secondary_model, process_data, vocal_stem_path=vocal_stem_path, master_inst_source=master_inst_source, master_vocal_source=master_vocal_source) + + secondary_sources = seperator.seperate() + + if type(secondary_sources) is dict: + return secondary_sources + else: + return None + +def gather_sources(primary_stem_name, secondary_stem_name, secondary_sources: dict): + + source_primary = False + source_secondary = False + + for key, value in secondary_sources.items(): + if key in primary_stem_name: + source_primary = value + if key in secondary_stem_name: + source_secondary = value + + return source_primary, source_secondary + +def prepare_mix(mix): + + audio_path = mix + + if not isinstance(mix, np.ndarray): + mix, sr = librosa.load(mix, mono=False, sr=44100) + else: + mix = mix.T + + if isinstance(audio_path, str): + if not np.any(mix) and audio_path.endswith('.mp3'): + mix = rerun_mp3(audio_path) + + if mix.ndim == 1: + mix = np.asfortranarray([mix,mix]) + + return mix + +def rerun_mp3(audio_file, sample_rate=44100): + + with audioread.audio_open(audio_file) as f: + track_length = int(f.duration) + + return librosa.load(audio_file, duration=track_length, mono=False, sr=sample_rate)[0] + +def save_format(audio_path, save_format, mp3_bit_set): + + if not save_format == WAV: + + if OPERATING_SYSTEM == 'Darwin': + FFMPEG_PATH = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'ffmpeg') + pydub.AudioSegment.converter = FFMPEG_PATH + + musfile = pydub.AudioSegment.from_wav(audio_path) + + if save_format == FLAC: + audio_path_flac = audio_path.replace(".wav", ".flac") + musfile.export(audio_path_flac, format="flac") + + if save_format == MP3: + audio_path_mp3 = audio_path.replace(".wav", ".mp3") + try: + musfile.export(audio_path_mp3, format="mp3", bitrate=mp3_bit_set, codec="libmp3lame") + except Exception as e: + print(e) + musfile.export(audio_path_mp3, format="mp3", bitrate=mp3_bit_set) + + try: + os.remove(audio_path) + except Exception as e: + print(e) + +def pitch_shift(mix): + new_sr = 31183 + + # Resample audio file + resampled_audio = signal.resample_poly(mix, new_sr, 44100) + + return resampled_audio + +def list_to_dictionary(lst): + dictionary = {item: index for index, item in enumerate(lst)} + return dictionary + +def vr_denoiser(X, device, hop_length=1024, n_fft=2048, cropsize=256, is_deverber=False, model_path=None): + batchsize = 4 + + if is_deverber: + nout, nout_lstm = 64, 128 + mp = ModelParameters(os.path.join('lib_v5', 'vr_network', 'modelparams', '4band_v3.json')) + n_fft = mp.param['bins'] * 2 + else: + mp = None + hop_length=1024 + nout, nout_lstm = 16, 128 + + model = nets_new.CascadedNet(n_fft, nout=nout, nout_lstm=nout_lstm) + model.load_state_dict(torch.load(model_path, map_location=cpu)) + model.to(device) + + if mp is None: + X_spec = spec_utils.wave_to_spectrogram_old(X, hop_length, n_fft) + else: + X_spec = loading_mix(X.T, mp) + + #PreProcess + X_mag = np.abs(X_spec) + X_phase = np.angle(X_spec) + + #Sep + n_frame = X_mag.shape[2] + pad_l, pad_r, roi_size = spec_utils.make_padding(n_frame, cropsize, model.offset) + X_mag_pad = np.pad(X_mag, ((0, 0), (0, 0), (pad_l, pad_r)), mode='constant') + X_mag_pad /= X_mag_pad.max() + + X_dataset = [] + patches = (X_mag_pad.shape[2] - 2 * model.offset) // roi_size + for i in range(patches): + start = i * roi_size + X_mag_crop = X_mag_pad[:, :, start:start + cropsize] + X_dataset.append(X_mag_crop) + + X_dataset = np.asarray(X_dataset) + + model.eval() + + with torch.no_grad(): + mask = [] + # To reduce the overhead, dataloader is not used. + for i in range(0, patches, batchsize): + X_batch = X_dataset[i: i + batchsize] + X_batch = torch.from_numpy(X_batch).to(device) + + pred = model.predict_mask(X_batch) + + pred = pred.detach().cpu().numpy() + pred = np.concatenate(pred, axis=2) + mask.append(pred) + + mask = np.concatenate(mask, axis=2) + + mask = mask[:, :, :n_frame] + + #Post Proc + if is_deverber: + v_spec = mask * X_mag * np.exp(1.j * X_phase) + y_spec = (1 - mask) * X_mag * np.exp(1.j * X_phase) + else: + v_spec = (1 - mask) * X_mag * np.exp(1.j * X_phase) + + if mp is None: + wave = spec_utils.spectrogram_to_wave_old(v_spec, hop_length=1024) + else: + wave = spec_utils.cmb_spectrogram_to_wave(v_spec, mp, is_v51_model=True).T + + wave = spec_utils.match_array_shapes(wave, X) + + if is_deverber: + wave_2 = spec_utils.cmb_spectrogram_to_wave(y_spec, mp, is_v51_model=True).T + wave_2 = spec_utils.match_array_shapes(wave_2, X) + return wave, wave_2 + else: + return wave + +def loading_mix(X, mp): + + X_wave, X_spec_s = {}, {} + + bands_n = len(mp.param['band']) + + for d in range(bands_n, 0, -1): + bp = mp.param['band'][d] + + if OPERATING_SYSTEM == 'Darwin': + wav_resolution = 'polyphase' if SYSTEM_PROC == ARM or ARM in SYSTEM_ARCH else bp['res_type'] + else: + wav_resolution = 'polyphase'#bp['res_type'] + + if d == bands_n: # high-end band + X_wave[d] = X + + else: # lower bands + X_wave[d] = librosa.resample(X_wave[d+1], mp.param['band'][d+1]['sr'], bp['sr'], res_type=wav_resolution) + + X_spec_s[d] = spec_utils.wave_to_spectrogram(X_wave[d], bp['hl'], bp['n_fft'], mp, band=d, is_v51_model=True) + + # if d == bands_n and is_high_end_process: + # input_high_end_h = (bp['n_fft']//2 - bp['crop_stop']) + (mp.param['pre_filter_stop'] - mp.param['pre_filter_start']) + # input_high_end = X_spec_s[d][:, bp['n_fft']//2-input_high_end_h:bp['n_fft']//2, :] + + X_spec = spec_utils.combine_spectrograms(X_spec_s, mp) + + del X_wave, X_spec_s + + return X_spec