sander-wood commited on
Commit
b3bc81c
1 Parent(s): 4c5fbb0

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +106 -0
app.py ADDED
@@ -0,0 +1,106 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ import random
4
+ from unidecode import unidecode
5
+ from samplings import top_p_sampling, temperature_sampling
6
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
7
+
8
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
9
+
10
+ description = """
11
+ <div>
12
+
13
+ <a style="display:inline-block" href='https://github.com/sander-wood/text-to-music'><img src='https://img.shields.io/github/stars/sander-wood/text-to-music?style=social' /></a>
14
+ <a style="display:inline-block" href="https://arxiv.org/pdf/2211.11216.pdf"><img src="https://img.shields.io/badge/arXiv-2211.11216-b31b1b.svg"></a>
15
+ <a class="duplicate-button" style="display:inline-block" target="_blank" href="https://huggingface.co/spaces/sander-wood/text-to-music?duplicate=true"><img style="margin-top:0;margin-bottom:0" src="https://huggingface.co/datasets/huggingface/badges/raw/main/duplicate-this-space-md-dark.svg" alt="Duplicate Space"></a>
16
+ </div>
17
+
18
+ ## ℹ️ How to use this demo?
19
+ 1. Enter a query in the text box.
20
+ 2. You can set the parameters (i.e., number of tunes, maximum length, top-p, temperature, and random seed) for the generation. (optional)
21
+ 3. Click "Submit" and wait for the result.
22
+ 4. The generated ABC notation can be converted to MIDI or PDF using [EasyABC](https://sourceforge.net/projects/easyabc/), you can also use this [online renderer](https://ldzhangyx.github.io/abc/) to render the ABC notation.
23
+
24
+ ## ❕Notice
25
+ - The text box is case-sensitive.
26
+ - The demo is based on BART-base and fine-tuned on the Textune dataset (282,870 text-music pairs).
27
+ - The demo only supports English text as the input.
28
+ - The demo is still in the early stage, and the generated music is not perfect. If you have any suggestions, please feel free to contact me via [email](mailto:shangda@mail.ccom.edu.cn).
29
+ """
30
+
31
+
32
+ examples = [
33
+ ["This is a traditional Irish dance music.\nNote Length-1/8\nMeter-6/8\nKey-D", 3, 1024, 0.9, 1.0, 0],
34
+ ["This is a jazz-swing lead sheet with chord and vocal.", 3, 1024, 0.9, 1.0, 0]
35
+ ]
36
+
37
+
38
+ def generate_abc(text, num_tunes, max_length, top_p, temperature, seed):
39
+
40
+ try:
41
+ seed = int(seed)
42
+ except:
43
+ seed = None
44
+
45
+ print("Input Text: " + text)
46
+ text = unidecode(text)
47
+ tokenizer = AutoTokenizer.from_pretrained('sander-wood/text-to-music')
48
+ model = AutoModelForSeq2SeqLM.from_pretrained('sander-wood/text-to-music')
49
+ model = model.to(device)
50
+
51
+ input_ids = tokenizer(text,
52
+ return_tensors='pt',
53
+ truncation=True,
54
+ max_length=max_length)['input_ids'].to(device)
55
+ decoder_start_token_id = model.config.decoder_start_token_id
56
+ eos_token_id = model.config.eos_token_id
57
+ random.seed(seed)
58
+ tunes = ""
59
+
60
+ for n_idx in range(num_tunes):
61
+ print("\nX:"+str(n_idx+1)+"\n", end="")
62
+ tunes += "X:"+str(n_idx+1)+"\n"
63
+ decoder_input_ids = torch.tensor([[decoder_start_token_id]])
64
+
65
+ for t_idx in range(max_length):
66
+
67
+ if seed!=None:
68
+ n_seed = random.randint(0, 1000000)
69
+ random.seed(n_seed)
70
+ else:
71
+ n_seed = None
72
+ outputs = model(input_ids=input_ids,
73
+ decoder_input_ids=decoder_input_ids.to(device))
74
+ probs = outputs.logits[0][-1]
75
+ probs = torch.nn.Softmax(dim=-1)(probs).cpu().detach().numpy()
76
+ sampled_id = temperature_sampling(probs=top_p_sampling(probs,
77
+ top_p=top_p,
78
+ seed=n_seed,
79
+ return_probs=True),
80
+ seed=n_seed,
81
+ temperature=temperature)
82
+ decoder_input_ids = torch.cat((decoder_input_ids, torch.tensor([[sampled_id]])), 1)
83
+ if sampled_id!=eos_token_id:
84
+ sampled_token = tokenizer.decode([sampled_id])
85
+ print(sampled_token, end="")
86
+ tunes += sampled_token
87
+ else:
88
+ tunes += '\n'
89
+ break
90
+
91
+ return tunes
92
+
93
+ input_text = gr.inputs.Textbox(lines=5, label="Input Text", placeholder="Describe the music you want to generate ...")
94
+ input_num_tunes = gr.inputs.Slider(minimum=1, maximum=10, step=1, default=1, label="Number of Tunes")
95
+ input_max_length = gr.inputs.Slider(minimum=10, maximum=1000, step=10, default=500, label="Max Length")
96
+ input_top_p = gr.inputs.Slider(minimum=0.0, maximum=1.0, step=0.05, default=0.9, label="Top P")
97
+ input_temperature = gr.inputs.Slider(minimum=0.0, maximum=2.0, step=0.1, default=1.0, label="Temperature")
98
+ input_seed = gr.inputs.Textbox(lines=1, label="Seed (int)", default="None")
99
+ output_abc = gr.outputs.Textbox(label="Generated Tunes")
100
+
101
+ gr.Interface(fn=generate_abc,
102
+ inputs=[input_text, input_num_tunes, input_max_length, input_top_p, input_temperature, input_seed],
103
+ outputs=output_abc,
104
+ title="Textune: Generating Tune from Text",
105
+ description=description,
106
+ examples=examples).launch(debug=True)