Ron Au commited on
Commit
d3a278d
1 Parent(s): 37537a6

Initial Commit

Browse files
.vscode/settings.json ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ {
2
+ "python.formatting.provider": "black"
3
+ }
app.py ADDED
@@ -0,0 +1,143 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2022 Tristan Behrens.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+
17
+ from flask import Flask, render_template, request, send_file, jsonify, redirect, url_for
18
+ from PIL import Image
19
+ import os
20
+ import io
21
+ import random
22
+ import base64
23
+ import torch
24
+ import wave
25
+ from source.logging import create_logger
26
+ from source.tokensequence import token_sequence_to_audio, token_sequence_to_image
27
+ from source import constants
28
+ from transformers import AutoTokenizer, AutoModelForCausalLM
29
+
30
+ logger = create_logger(__name__)
31
+
32
+ # Load the auth-token from authtoken.txt.
33
+ # auth_token = None
34
+ # with open("authtoken.txt", "r") as authtoken_file:
35
+ # auth_token = authtoken_file.read().strip()
36
+ auth_token = os.getenv("authtoken")
37
+
38
+ # Loading the model and its tokenizer.
39
+ logger.info("Loading tokenizer and model...")
40
+ tokenizer = AutoTokenizer.from_pretrained("ai-guru/lakhclean_mmmtrack_4bars_d-2048", use_auth_token=auth_token)
41
+ model = AutoModelForCausalLM.from_pretrained("ai-guru/lakhclean_mmmtrack_4bars_d-2048", use_auth_token=auth_token)
42
+ logger.info("Done.")
43
+
44
+ # Create the app.
45
+ logger.info("Creating app...")
46
+ app = Flask(__name__)
47
+ logger.info("Done.")
48
+
49
+ # Route for the loading page.
50
+ @app.route("/")
51
+ def index():
52
+ return render_template(
53
+ "composer.html",
54
+ compose_styles = constants.get_compose_styles_for_ui(),
55
+ densities=constants.get_densities_for_ui(),
56
+ temperatures=constants.get_temperatures_for_ui(),
57
+ )
58
+
59
+ @app.route("/compose", methods=["POST"])
60
+ def compose():
61
+
62
+ # Get the parameters as JSON.
63
+ params = request.get_json()
64
+ music_style = params["music_style"]
65
+ density = params["density"]
66
+ temperature = params["temperature"]
67
+
68
+ instruments = constants.get_instruments(music_style)
69
+ density = constants.get_density(density)
70
+ temperature = constants.get_temperature(temperature)
71
+ print(f"instruments: {instruments} density: {density} temperature: {temperature}")
72
+
73
+ # Generate with the given parameters.
74
+ logger.info(f"Generating token sequence...")
75
+ generated_sequence = generate_sequence(instruments, density, temperature)
76
+ logger.info(f"Generated token sequence: {generated_sequence}")
77
+
78
+ # Get the audio data as a array of int16.
79
+ logger.info("Generating audio...")
80
+ sample_rate, audio_data = token_sequence_to_audio(generated_sequence)
81
+ logger.info(f"Done. Audio data: {len(audio_data)}")
82
+
83
+ # Encode the audio-data as wave file in memory. Use the wave module.
84
+ audio_data_bytes = io.BytesIO()
85
+ wave_file = wave.open(audio_data_bytes, "wb")
86
+ wave_file.setframerate(sample_rate)
87
+ wave_file.setnchannels(1)
88
+ wave_file.setsampwidth(2)
89
+ wave_file.writeframes(audio_data)
90
+ wave_file.close()
91
+
92
+ # Return the audio-data as a base64-encoded string.
93
+ audio_data_bytes.seek(0)
94
+ audio_data_base64 = base64.b64encode(audio_data_bytes.read()).decode("utf-8")
95
+ audio_data_bytes.close()
96
+
97
+ # Convert the audio data to an PIL image.
98
+ image = token_sequence_to_image(generated_sequence)
99
+
100
+ # Save PIL image to harddrive as PNG.
101
+ logger.debug(f"Saving image to harddrive... {type(image)}")
102
+ image_file_name = "compose.png"
103
+ image.save(image_file_name, "PNG")
104
+
105
+ # Save image to virtual file.
106
+ img_io = io.BytesIO()
107
+ image.save(img_io, 'PNG', quality=70)
108
+ img_io.seek(0)
109
+
110
+ # Return the image as a base64-encoded string.
111
+ image_data_base64 = base64.b64encode(img_io.read()).decode("utf-8")
112
+ img_io.close()
113
+
114
+ # Return.
115
+ return jsonify({
116
+ "tokens": generated_sequence,
117
+ "audio": "data:audio/wav;base64," + audio_data_base64,
118
+ "image": "data:image/png;base64," + image_data_base64,
119
+ "status": "OK"
120
+ })
121
+
122
+ def generate_sequence(instruments, density, temperature):
123
+
124
+ instruments = instruments[::]
125
+ random.shuffle(instruments)
126
+
127
+ generated_ids = tokenizer.encode("PIECE_START", return_tensors="pt")[0]
128
+
129
+ for instrument in instruments:
130
+ more_ids = tokenizer.encode(f"TRACK_START INST={instrument} DENSITY={density}", return_tensors="pt")[0]
131
+ generated_ids = torch.cat((generated_ids, more_ids))
132
+ generated_ids = generated_ids.unsqueeze(0)
133
+
134
+ generated_ids = model.generate(
135
+ generated_ids,
136
+ max_length=2048,
137
+ do_sample=True,
138
+ temperature=temperature,
139
+ eos_token_id=tokenizer.encode("TRACK_END")[0]
140
+ )[0]
141
+
142
+ generated_sequence = tokenizer.decode(generated_ids)
143
+ return generated_sequence
packages.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ libfluidsynth1
2
+ fluid-soundfont-gm
3
+ build-essential
4
+ libasound2-dev
5
+ libjack-dev
6
+ ffmpeg
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
1
+ transformers
2
+ tokenizers
3
+ datasets
4
+ flask
5
+ torch
6
+ pyfluidsynth
7
+ note_seq
source/constants.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ compose_styles_config = {
2
+ "piano": {
3
+ "readable": "Piano",
4
+ "instruments": ["1"],
5
+ },
6
+ "chamber": {
7
+ "readable": "Chamber Music",
8
+ "instruments": ["0", "40", "42"],
9
+ },
10
+ "rock_and_metal": {
11
+ "readable": "Rock and Metal",
12
+ "instruments": ["DRUMS", "30", "34"],
13
+ },
14
+ "synth": {
15
+ "readable": "Synthesizer",
16
+ "instruments": ["DRUMS", "38", "80"],
17
+ },
18
+ "church": {
19
+ "readable": "Church",
20
+ "instruments": ["19", "52"],
21
+ },
22
+ "timpani_strings_harp": {
23
+ "readable": "Timpani, Contrabass, Harp",
24
+ "instruments": ["47", "43", "46"],
25
+ },
26
+ "country": {
27
+ "readable": "Country",
28
+ "instruments": ["DRUMS", "22", "32", "25"],
29
+ },
30
+ }
31
+
32
+ densities_config = {
33
+ "low": {
34
+ "readable": "Low",
35
+ "density": 4,
36
+ },
37
+ "medium": {
38
+ "readable": "Medium",
39
+ "density": 6,
40
+ },
41
+ "high": {
42
+ "readable": "High",
43
+ "density": 8,
44
+ },
45
+ }
46
+
47
+ temperatures_config = {
48
+ "low": {
49
+ "readable": "Low",
50
+ "temperature": 0.5,
51
+ },
52
+ "medium": {
53
+ "readable": "Medium",
54
+ "temperature": 0.75,
55
+ },
56
+ "high": {
57
+ "readable": "High",
58
+ "temperature": 1.0,
59
+ },
60
+ "very_high": {
61
+ "readable": "Very High",
62
+ "temperature": 1.25,
63
+ },
64
+ }
65
+
66
+ def get_compose_styles_for_ui():
67
+ compose_styles = [[key, compose_styles_config[key]["readable"]] for key, value in compose_styles_config.items()]
68
+ return compose_styles
69
+
70
+ def get_densities_for_ui():
71
+ densities = [[key, densities_config[key]["readable"]] for key, value in densities_config.items()]
72
+ return densities
73
+
74
+ def get_temperatures_for_ui():
75
+ temperatures = [[key, temperatures_config[key]["readable"]] for key, value in temperatures_config.items()]
76
+ return temperatures
77
+
78
+ def get_instruments(key):
79
+ return compose_styles_config[key]["instruments"]
80
+
81
+ def get_density(key):
82
+ return densities_config[key]["density"]
83
+
84
+ def get_temperature(key):
85
+ return temperatures_config[key]["temperature"]
source/logging.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 Tristan Behrens.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Lint as: python3
16
+
17
+ import logging
18
+
19
+ loggers_dict = {}
20
+
21
+ def create_logger(name:str):
22
+ global loggers_dict
23
+ if name in loggers_dict:
24
+ return loggers_dict[name]
25
+ else:
26
+ logger = logging.getLogger(name)
27
+ loggers_dict[name] = logger
28
+ logger.setLevel(logging.DEBUG)
29
+ handler = logging.StreamHandler()
30
+ handler.setLevel(logging.DEBUG)
31
+ formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
32
+ handler.setFormatter(formatter)
33
+ logger.addHandler(handler)
34
+ logger.propagate = False
35
+ return logger
36
+
37
+ def set_log_level(name, level):
38
+ logger_names = []
39
+ if name == "all":
40
+ logger_names = list(loggers_dict.keys())
41
+ else:
42
+ logger_names = [name]
43
+ for name in logger_names:
44
+ logger = loggers_dict[name]
45
+ logger.setLevel(level)
source/tokensequence.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import note_seq
2
+ import numpy as np
3
+ from PIL import Image
4
+
5
+ NOTE_LENGTH_16TH_120BPM = 0.25 * 60 / 120
6
+ BAR_LENGTH_120BPM = 4.0 * 60 / 120
7
+ SAMPLE_RATE=44100
8
+
9
+ def token_sequence_to_audio(token_sequence):
10
+ note_sequence = token_sequence_to_note_sequence(token_sequence)
11
+ synth = note_seq.midi_synth.fluidsynth
12
+ array_of_floats = synth(note_sequence, sample_rate=SAMPLE_RATE)
13
+ note_plot = note_seq.plot_sequence(note_sequence, False)
14
+ array_of_floats /=1.414
15
+ array_of_floats *= 32767
16
+ int16_data = array_of_floats.astype(np.int16)
17
+ return SAMPLE_RATE, int16_data
18
+
19
+ def token_sequence_to_image(token_sequence):
20
+
21
+ note_sequence = token_sequence_to_note_sequence(token_sequence)
22
+
23
+ # Find minumum and maximum pitch.
24
+ min_pitch = 128
25
+ max_pitch = 0
26
+ for note in note_sequence.notes:
27
+ if note.pitch < min_pitch:
28
+ min_pitch = note.pitch
29
+ if note.pitch > max_pitch:
30
+ max_pitch = note.pitch
31
+
32
+ image_height = max_pitch - min_pitch + 1
33
+ image_width = int(16 * 4)
34
+
35
+ color = (150, 15, 15)
36
+
37
+ # Indicate that this bar was used for composition.
38
+ image = Image.new("RGB", (image_width, image_height), color)
39
+
40
+ colors = [(0, 0, 0), (255, 255, 255), (255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255)]
41
+
42
+ instrument_to_color_index = {}
43
+
44
+ # Draw the notes.
45
+ for note in note_sequence.notes:
46
+ x = int(note.start_time / note_sequence.total_time * image_width)
47
+ y = note.pitch - min_pitch
48
+ width = int((note.end_time - note.start_time) / note_sequence.total_time * image_width)
49
+ height = 1
50
+
51
+ if note.instrument not in instrument_to_color_index:
52
+ instrument_to_color_index[note.instrument] = len(instrument_to_color_index)
53
+ color_index = instrument_to_color_index[note.instrument]
54
+ color = colors[color_index]
55
+
56
+ #color = (255, 255, 255)
57
+ image.paste(color, (x, y, x + width, y + height))
58
+
59
+ # Rescale and rotate.
60
+ factor = 8
61
+ image = image.resize((image_width * factor, image_height * factor), Image.NEAREST)
62
+ image = image.transpose(Image.FLIP_TOP_BOTTOM)
63
+
64
+ return image
65
+
66
+
67
+
68
+
69
+ def token_sequence_to_note_sequence(token_sequence, use_program=True, use_drums=True, instrument_mapper=None, only_piano=False):
70
+ if isinstance(token_sequence, str):
71
+ token_sequence = token_sequence.split()
72
+ note_sequence = empty_note_sequence()
73
+
74
+ # Render all notes.
75
+ current_program = 1
76
+ current_is_drum = False
77
+ current_instrument = 0
78
+ track_count = 0
79
+ for token_index, token in enumerate(token_sequence):
80
+
81
+ if token == "PIECE_START":
82
+ pass
83
+ elif token == "PIECE_END":
84
+ print("The end.")
85
+ break
86
+ elif token == "TRACK_START":
87
+ current_bar_index = 0
88
+ track_count += 1
89
+ pass
90
+ elif token == "TRACK_END":
91
+ pass
92
+ elif token.startswith("INST"):
93
+ instrument = token.split("=")[-1]
94
+ if instrument != "DRUMS" and use_program:
95
+ if instrument_mapper is not None:
96
+ if instrument in instrument_mapper:
97
+ instrument = instrument_mapper[instrument]
98
+ current_program = int(instrument)
99
+ current_instrument = track_count
100
+ current_is_drum = False
101
+ if instrument == "DRUMS" and use_drums:
102
+ current_instrument = 0
103
+ current_program = 0
104
+ current_is_drum = True
105
+ elif token == "BAR_START":
106
+ current_time = current_bar_index * BAR_LENGTH_120BPM
107
+ current_notes = {}
108
+ elif token == "BAR_END":
109
+ current_bar_index += 1
110
+ pass
111
+ elif token.startswith("NOTE_ON"):
112
+ pitch = int(token.split("=")[-1])
113
+ note = note_sequence.notes.add()
114
+ note.start_time = current_time
115
+ note.end_time = current_time + 4 * NOTE_LENGTH_16TH_120BPM
116
+ note.pitch = pitch
117
+ note.instrument = current_instrument
118
+ note.program = current_program
119
+ note.velocity = 80
120
+ note.is_drum = current_is_drum
121
+ current_notes[pitch] = note
122
+ elif token.startswith("NOTE_OFF"):
123
+ pitch = int(token.split("=")[-1])
124
+ if pitch in current_notes:
125
+ note = current_notes[pitch]
126
+ note.end_time = current_time
127
+ elif token.startswith("TIME_DELTA"):
128
+ delta = float(token.split("=")[-1]) * NOTE_LENGTH_16TH_120BPM
129
+ current_time += delta
130
+ elif token.startswith("DENSITY="):
131
+ pass
132
+ elif token == "[PAD]":
133
+ pass
134
+ else:
135
+ pass
136
+
137
+ # Make the instruments right.
138
+ instruments_drums = []
139
+ for note in note_sequence.notes:
140
+ pair = [note.program, note.is_drum]
141
+ if pair not in instruments_drums:
142
+ instruments_drums += [pair]
143
+ note.instrument = instruments_drums.index(pair)
144
+
145
+ if only_piano:
146
+ for note in note_sequence.notes:
147
+ if not note.is_drum:
148
+ note.instrument = 0
149
+ note.program = 0
150
+
151
+ note_sequence.total_time = current_time
152
+
153
+ return note_sequence
154
+
155
+ def empty_note_sequence(qpm=120.0, total_time=0.0):
156
+ note_sequence = note_seq.protobuf.music_pb2.NoteSequence()
157
+ note_sequence.tempos.add().qpm = qpm
158
+ note_sequence.ticks_per_quarter = note_seq.constants.STANDARD_PPQ
159
+ note_sequence.total_time = total_time
160
+ return note_sequence
startserver.sh ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
1
+ #!/usr/bin/sh
2
+
3
+ export FLASK_ENV=development
4
+ export FLASK_DEBUG=1
5
+ flask run
static/script.js ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ function compose() {
2
+
3
+ // Get the value of the select with id selected_midi_program.
4
+ var selected_music_style = document.getElementById("selected_music_style").value;
5
+ var selected_density = document.getElementById("selected_density").value;
6
+ var selected_temperature = document.getElementById("selected_temperature").value;
7
+
8
+ var xhr = new XMLHttpRequest();
9
+ xhr.open("POST", "/compose", true);
10
+ xhr.setRequestHeader("Content-Type", "application/json");
11
+ xhr.send(JSON.stringify({
12
+ music_style: selected_music_style,
13
+ density: selected_density,
14
+ temperature: selected_temperature
15
+ }));
16
+
17
+ xhr.onreadystatechange = function() {
18
+ if (xhr.readyState == 4 && xhr.status == 200) {
19
+ var response = JSON.parse(xhr.responseText);
20
+ console.log(response);
21
+
22
+ if (response.status == "OK") {
23
+
24
+ // Replace the inner html of the div with id token_sequence with the token sequence.
25
+ document.getElementById("token_sequence").innerHTML = response.tokens;
26
+
27
+ // Replace the source of the audio element with id audio..
28
+ var audio = document.getElementById("audio_id");
29
+ audio.src = response.audio;
30
+
31
+ // Replace the source of the image element with id image.
32
+ var image = document.getElementById("image_id");
33
+ image.src = response.image;
34
+
35
+ }
36
+ else {
37
+ alert("Error: " + response.error);
38
+ }
39
+ }
40
+ }
41
+ }
42
+
43
+
44
+ function post_command(command_name, command_parameters, reload) {
45
+ var xhr = new XMLHttpRequest();
46
+ xhr.open("POST", "/command", true);
47
+ xhr.setRequestHeader("Content-Type", "application/json");
48
+ xhr.send(JSON.stringify({command_name: command_name, command_parameters: command_parameters}));
49
+
50
+ xhr.onreadystatechange = function() {
51
+ if (xhr.readyState == 4 && xhr.status == 200) {
52
+ var response = JSON.parse(xhr.responseText);
53
+ if (response.status == "OK") {
54
+
55
+ // Reload the page if requested.
56
+ if (reload) {
57
+ console.log("Reloading.");
58
+ load_cells();
59
+ }
60
+
61
+ }
62
+ else {
63
+ alert("Error: " + response.error);
64
+ }
65
+ }
66
+ }
67
+ }
static/style.css ADDED
File without changes
templates/composer.html ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!DOCTYPE html>
2
+ <html lang="en">
3
+ <head>
4
+ <meta name="viewport" content="width=device-width, initial-scale=1.0"/>
5
+ <meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>
6
+ <title>Composer</title>
7
+ <script src="https://kit.fontawesome.com/d6e88637ad.js" crossorigin="anonymous"></script>
8
+ <link rel="stylesheet" href="/static/style.css" />
9
+ <script src="{{ url_for('static', filename='script.js') }}"></script>
10
+ <script src="https://ajax.googleapis.com/ajax/libs/jquery/3.1.1/jquery.min.js"></script>
11
+ </head>
12
+
13
+ <body>
14
+ <div class="container">
15
+
16
+ <div>Style</div>
17
+ <select id="selected_music_style">
18
+ {%for compose_style in compose_styles%}
19
+ <option value="{{compose_style[0]}}">{{compose_style[1]}}</option>
20
+ {%endfor%}
21
+ </select>
22
+
23
+ <div>Density</div>
24
+ <select id="selected_density">
25
+ {%for density in densities%}
26
+ <option value="{{density[0]}}">{{density[1]}}</option>
27
+ {%endfor%}
28
+ </select>
29
+
30
+ <div>Temperature</div>
31
+ <select id="selected_temperature">
32
+ {%for temperature in temperatures%}
33
+ <option value="{{temperature[0]}}">{{temperature[1]}}</option>
34
+ {%endfor%}
35
+ </select>
36
+
37
+ <audio id="audio_id" controls="controls" autobuffer="autobuffer" autoplay="autoplay">
38
+ <source src="" />
39
+ </audio>
40
+
41
+ <img id="image_id" src="" />
42
+
43
+ <div class="item btn" command="compose" style="cursor: pointer;" onclick="compose()">Compose</div>
44
+
45
+ <div id="token_sequence">PIECE_START</div>
46
+
47
+ </div>
48
+ </body>
49
+ </html>