Spaces:
Runtime error
Runtime error
ryanlinjui
commited on
Commit
•
bd05f58
1
Parent(s):
6377b99
Add application file
Browse files- .gitignore +15 -0
- app.py +40 -0
- pyproject.toml +16 -0
- taiko.py +104 -0
.gitignore
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# python cache
|
2 |
+
__pycache__
|
3 |
+
|
4 |
+
# poetry
|
5 |
+
.venv
|
6 |
+
poetry.lock
|
7 |
+
|
8 |
+
# gradio
|
9 |
+
flagged
|
10 |
+
|
11 |
+
# test
|
12 |
+
*.wav
|
13 |
+
*.ogg
|
14 |
+
*.json
|
15 |
+
*.tja
|
app.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import gradio as gr
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
from taiko import (
|
6 |
+
preprocess,
|
7 |
+
generate_taiko_wav,
|
8 |
+
COURSE
|
9 |
+
)
|
10 |
+
|
11 |
+
def handle(chart_path, music_path):
|
12 |
+
data = json.loads(open(chart_path, "r").read())
|
13 |
+
|
14 |
+
if len(data["data"]) < 0 and len(data["data"]) > 5:
|
15 |
+
raise("Issue occur: Json data")
|
16 |
+
|
17 |
+
for d in data["data"]:
|
18 |
+
chart = preprocess(d["chart"])
|
19 |
+
audio = generate_taiko_wav(chart, music_path)
|
20 |
+
COURSE[d["course"]]["audio"] = audio
|
21 |
+
|
22 |
+
c = 2
|
23 |
+
if music_path is None: c = 1
|
24 |
+
|
25 |
+
return \
|
26 |
+
(COURSE[0]["audio"].frame_rate * c, np.array(COURSE[0]["audio"].get_array_of_samples())), \
|
27 |
+
(COURSE[1]["audio"].frame_rate * c, np.array(COURSE[1]["audio"].get_array_of_samples())), \
|
28 |
+
(COURSE[2]["audio"].frame_rate * c, np.array(COURSE[2]["audio"].get_array_of_samples())), \
|
29 |
+
(COURSE[3]["audio"].frame_rate * c, np.array(COURSE[3]["audio"].get_array_of_samples())), \
|
30 |
+
(COURSE[4]["audio"].frame_rate * c, np.array(COURSE[4]["audio"].get_array_of_samples()))
|
31 |
+
|
32 |
+
if __name__ == "__main__":
|
33 |
+
inputs = [
|
34 |
+
gr.File(label="太鼓達人譜面Json/Taiko Chart Json Data"),
|
35 |
+
gr.File(label="譜面音樂/Chart Music (Optional)")
|
36 |
+
]
|
37 |
+
outputs = [gr.Audio(label=course["label"]) for course in COURSE]
|
38 |
+
|
39 |
+
demo = gr.Interface(fn=handle, inputs=inputs, outputs=outputs, title="`程設二作業HW0105 / Taiko Music Generator")
|
40 |
+
demo.launch(share=True)
|
pyproject.toml
ADDED
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[tool.poetry]
|
2 |
+
name = "taiko-music-generator"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = ""
|
5 |
+
authors = ["ryanlinjui <ryanlinjui@gmail.com>"]
|
6 |
+
readme = "README.md"
|
7 |
+
packages = [{include = "taiko_music_generator"}]
|
8 |
+
|
9 |
+
[tool.poetry.dependencies]
|
10 |
+
python = "^3.10"
|
11 |
+
gradio = "^4.19.2"
|
12 |
+
|
13 |
+
|
14 |
+
[build-system]
|
15 |
+
requires = ["poetry-core"]
|
16 |
+
build-backend = "poetry.core.masonry.api"
|
taiko.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydub import AudioSegment
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
DON_WAV = "./Arcade - Taiko no Tatsujin 2020 Version - Common Sound Effects/Don.wav"
|
5 |
+
KATSU_WAV = "./Arcade - Taiko no Tatsujin 2020 Version - Common Sound Effects/Katsu.wav"
|
6 |
+
BALLOON_BANG_WAV = "./Arcade - Taiko no Tatsujin 2020 Version - Common Sound Effects/Balloon.wav"
|
7 |
+
|
8 |
+
COURSE = [
|
9 |
+
{
|
10 |
+
"audio": AudioSegment.empty(),
|
11 |
+
"label": "かんたん/梅花(簡單)/Easy",
|
12 |
+
},
|
13 |
+
{
|
14 |
+
"audio": AudioSegment.empty(),
|
15 |
+
"label": "ふつう/竹子(普通)/Normal",
|
16 |
+
},
|
17 |
+
{
|
18 |
+
"audio": AudioSegment.empty(),
|
19 |
+
"label": "むずかしい/樹(困難)/Hard",
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"audio": AudioSegment.empty(),
|
23 |
+
"label": "おに/魔王/Oni",
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"audio": AudioSegment.empty(),
|
27 |
+
"label": "裏/Edit",
|
28 |
+
}
|
29 |
+
]
|
30 |
+
|
31 |
+
HIT_PER_SEC = 30
|
32 |
+
|
33 |
+
def preprocess(data:list, offset:float=0):
|
34 |
+
chart = []
|
35 |
+
for m in data:
|
36 |
+
if m[0] in {1, 3}: # Don or Big Don
|
37 |
+
chart.append((DON_WAV, offset + m[1]))
|
38 |
+
elif m[0] in {2, 4}: # Katsu or Big Katsu
|
39 |
+
chart.append((KATSU_WAV, offset + m[1]))
|
40 |
+
elif m[0] in {5, 6}: # Drum Roll or Big Drum Roll
|
41 |
+
count = m[1]
|
42 |
+
while count < m[2]:
|
43 |
+
chart.append((DON_WAV, offset + count))
|
44 |
+
count += (1 / HIT_PER_SEC)
|
45 |
+
|
46 |
+
elif m[0] == 7: # Balloon
|
47 |
+
count = m[1]
|
48 |
+
balloon_count = 0
|
49 |
+
while count < m[2] and balloon_count < m[3]:
|
50 |
+
chart.append((DON_WAV, offset + count))
|
51 |
+
count += (1 / HIT_PER_SEC)
|
52 |
+
balloon_count += 1
|
53 |
+
|
54 |
+
if balloon_count >= m[3]:
|
55 |
+
chart.append((BALLOON_BANG_WAV, offset + m[1]))
|
56 |
+
else:
|
57 |
+
raise ValueError("Your json file has some problems.")
|
58 |
+
|
59 |
+
return chart
|
60 |
+
|
61 |
+
def resize_audio(file_path:str, target_duration:int, target_amplitude:int):
|
62 |
+
audio = AudioSegment.from_wav(file_path)
|
63 |
+
|
64 |
+
audio = audio[:target_duration * 1000]
|
65 |
+
|
66 |
+
if file_path == DON_WAV or file_path == BALLOON_BANG_WAV:
|
67 |
+
return audio
|
68 |
+
|
69 |
+
audio = audio - (audio.dBFS - target_amplitude)
|
70 |
+
return audio
|
71 |
+
|
72 |
+
def generate_taiko_wav(chart: list, music:str=None):
|
73 |
+
max_length = int(max([start_time + len(resize_audio(file_path, target_duration=1, target_amplitude=-20))
|
74 |
+
for file_path, start_time in chart]))
|
75 |
+
|
76 |
+
mixed_audio = np.zeros(max_length)
|
77 |
+
|
78 |
+
for file_path, start_time in chart:
|
79 |
+
audio = resize_audio(file_path, target_duration=0.5, target_amplitude=-20)
|
80 |
+
audio_array = np.array(audio.get_array_of_samples())
|
81 |
+
|
82 |
+
start_index = int(start_time * audio.frame_rate)
|
83 |
+
end_index = start_index + len(audio_array)
|
84 |
+
|
85 |
+
if len(mixed_audio) < end_index:
|
86 |
+
mixed_audio = np.pad(mixed_audio, (0, end_index - len(mixed_audio)))
|
87 |
+
|
88 |
+
mixed_audio[start_index:end_index] += audio_array
|
89 |
+
|
90 |
+
mixed_audio = np.clip(mixed_audio, -32768, 32767)
|
91 |
+
|
92 |
+
mixed_audio_segment = AudioSegment(
|
93 |
+
mixed_audio.astype(np.int16).tobytes(),
|
94 |
+
frame_rate=audio.frame_rate,
|
95 |
+
sample_width=2,
|
96 |
+
channels=1
|
97 |
+
)
|
98 |
+
|
99 |
+
if music is None:
|
100 |
+
return mixed_audio_segment
|
101 |
+
else:
|
102 |
+
background_music = AudioSegment.from_ogg(music)
|
103 |
+
mixed_audio_with_bg = background_music.overlay(mixed_audio_segment)
|
104 |
+
return mixed_audio_with_bg
|