File size: 2,602 Bytes
bd6e54b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import gradio as gr
import numpy as np

from model.DiffSynthSampler import DiffSynthSampler
from tools import safe_int, read_wav_to_numpy
from webUI.natural_language_guided.utils import latent_representation_to_Gradio_image, \
    encodeBatch2GradioOutput_STFT, add_instrument
from webUI.natural_language_guided_4.utils import resize_image_to_aspect_ratio


def get_instruments_module(gradioWebUI, virtual_instruments_state):

    with gr.Tab("intruments"):
        gr.Markdown("Use neural networks to select random sounds using your favorite instrument!")
        with gr.Row(variant="panel"):
            with gr.Column(scale=1):
                input_text = gr.Textbox(label="input")

                @gr.render(inputs=input_text)
                def show_split(text):
                    textboxes = []

                    if len(text) == 0:
                        gr.Markdown("## No Input Provided")
                    else:
                        for letter in text:
                            textboxes.append(gr.Textbox(letter, interactive=True))

                    def merge(*splitted_texts):
                        out = ""
                        for t in splitted_texts:
                            out += t
                        return out

                    submit_botton.click(merge, inputs=textboxes, outputs=merged_textbox)

                submit_botton = gr.Button("submit")

                merged_textbox = gr.Textbox(placeholder="placeholder", interactive=False)

            with gr.Column(scale=1):

                @gr.render(inputs=virtual_instruments_state)
                def check_instruments(virtual_instruments_dict):
                    virtual_instruments = virtual_instruments_dict["virtual_instruments"]
                    instrument_names = list(virtual_instruments.keys())

                    instrument_dropdown = gr.Dropdown(
                        instrument_names, label="instrument", info="info placeholder"
                    )

                    def select_instrument(instrument):
                        print(f"instrument: {instrument}")
                        sr, signal = virtual_instruments[instrument]["signal"]
                        return {selected_instrument_audio: (sr, signal)}

                    instrument_dropdown.select(select_instrument, inputs=instrument_dropdown,
                                               outputs=selected_instrument_audio)

                selected_instrument_audio = gr.Audio(type="numpy", label="Play", scale=1, interactive=False)