Spaces:
Runtime error
Runtime error
yes mr krabs
Browse files- app.py +60 -9
- beat_manipulator/__init__.py +2 -0
- beat_manipulator/analyze.py +62 -0
- beat_manipulator/beatmap.py +409 -0
- beat_manipulator/effect.py +106 -0
- beat_manipulator/generate.py +22 -0
- beat_manipulator/image.py +176 -0
- BeatManipulator.py → beat_manipulator/main.py +398 -341
- beat_manipulator/mix.py +44 -0
- beat_manipulator/tests.py +121 -0
- wrapper.py → beat_manipulator/wrapper.py +149 -56
- requirements.txt +8 -0
- samples/cowbell.flac +0 -0
app.py
CHANGED
@@ -1,9 +1,60 @@
|
|
1 |
-
import gradio as gr
|
2 |
-
import
|
3 |
-
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr, numpy as np
|
2 |
+
from gradio.components import Audio, Textbox, Checkbox
|
3 |
+
import beat_manipulator as bm
|
4 |
+
def BeatSwap(audiofile, pattern: str, scale:float, shift:float, caching:bool):
|
5 |
+
scale=float(scale)
|
6 |
+
shift=float(shift)
|
7 |
+
song=bm.song(path=audiofile, filename=audiofile.split('.')[-2][:-8]+'.'+audiofile.split('.')[-1], caching=caching)
|
8 |
+
song.quick_beatswap(output=None, pattern=pattern, scale=scale, shift=shift)
|
9 |
+
#song.write_audio(output=bm.outputfilename('',song.filename, suffix=' (beatswap)'))
|
10 |
+
return (song.samplerate, np.asarray(song.audio).T)
|
11 |
+
|
12 |
+
audiofile=Audio(source='upload', type='filepath')
|
13 |
+
patternbox = Textbox(label="Pattern:", placeholder="1, 3, 2r, 4d8", lines=1)
|
14 |
+
scalebox = Textbox(value=1, label="Beatmap scale, beatmap's beats per minute will be multiplied by this:", placeholder=1, lines=1)
|
15 |
+
shiftbox = Textbox(value=0, label="Beatmap shift, in beats (applies before scaling):", placeholder=0, lines=1)
|
16 |
+
cachebox = Checkbox(value=True, label="""Enable caching beatmaps. If True, a text file with the beatmap will be saved to the server (your PC if you are running locally), so that beatswapping for the second time doesn't have to generate the beatmap again.
|
17 |
+
|
18 |
+
Text file will be named after your file, and will only contain a list of numbers of positions of each beat. Note: I have no idea if this actually works on Hugging Face.""")
|
19 |
+
|
20 |
+
gr.Interface (fn=BeatSwap,inputs=[audiofile,patternbox,scalebox,shiftbox, cachebox],outputs=Audio(type='numpy'),theme="default",
|
21 |
+
title = "Stunlocked's Beat Manipulator"
|
22 |
+
,description = "Remix music via AI-powered beat detection and advanced beat swapping. https://github.com/stunlocked1/BeatManipulator/blob/main/presets.json"
|
23 |
+
,article="""# <h1><p style='text-align: center'><a href='https://github.com/stunlocked1/BeatManipulator' target='_blank'>Github</a></p></h1>
|
24 |
+
|
25 |
+
# Basic usage
|
26 |
+
|
27 |
+
Upload your audio, enter the beat swapping pattern, change scale and shift if needed, and run the app.
|
28 |
+
|
29 |
+
You can test where each beat is by writing `test` into the `pattern` field, which will put cowbells on each beat. Beatmap can sometimes be shifted, for example 0.5 beats forward, so this use scale and shift to adjust it.
|
30 |
+
|
31 |
+
Feel free to use complex patterns and very low scales - most of the computation time is in detecting beats, not swapping them.
|
32 |
+
|
33 |
+
# Pattern syntax
|
34 |
+
|
35 |
+
Patterns are sequences of numbers or ranges, separated by `,`. Numbers and ranges can be followed by letters that apply effects to them. Spaces can be freely used for formatting as they will be ignored. Any other character that isnt used in the syntax can also be used for formatting but only between beats, not inside them.
|
36 |
+
- `1, 3, 2, 4` - every 4 beats, swap 2nd and 3rd beat. This pattern loops every 4 beats, because 4 is the biggest number in it.
|
37 |
+
- `!` after a number sets length of the pattern (beat isnt played). `1, 3, 2, 4, 8!` - every 8 beats, swap 2nd and 3rd beat, and 5-8 beats will be skipped.
|
38 |
+
- `1, 3, 4` - skip 2nd beat
|
39 |
+
- `1, 2, 2, 4` - repeat 2nd beat
|
40 |
+
- `1, 1:1.5, 4` - play a range of beats. `0:0.5` means first half of 1st beat. Keep that in mind, to play first half of 5th beat, you do `4:4.5`, not `5:5.5`. `1` is equivalent to `0:1`. `1.5` is equivalent to `0.5:1.5`. `1,2,3,4` is `0:4`.
|
41 |
+
- `1, 0:1/3, 0:1/3, 2/3:1` - you can use expressions with `+`, `-`, `*`, `/`.
|
42 |
+
- `?` after a beat makes that number not count for looping. `1, 2, 3, 4!, 8?` - every 4 beats, 4th beat is replaced with 8th beat.
|
43 |
+
- `v` + number - controls volume of that beat. `1v2` means 200% volume, `1v1/3` means 33.33% volume, etc.
|
44 |
+
- `r` after a beat reverses that beat. `1r, 2` - every two beats first beat will be reversed
|
45 |
+
- another way to reverse - `4:0` is reversed `0:4`.
|
46 |
+
- `s` + number - changes speed and pitch of that beat. 2 will be 2 times faster, 1/2 will be 2 times slower. Note: Only integers or 1/integer numbers are supported, everything else will be rounded.
|
47 |
+
- `c` - swaps left and right channels of the beat. If followed by 0, mutes left channel instead, 1 - right channel.
|
48 |
+
- `b` + number - bitcrush. The higher the number, the stronger the effect. Barely noticeable at values less then 1
|
49 |
+
- `d` + number - downsample (8-bit sound). The higher the number, the stronger the effect. Starts being noticeable at 3, good 8-bit sounding values are around 8+.
|
50 |
+
- `t` + number - saturation
|
51 |
+
- you can combine stuff like `0:1/3d8v2cr` - that line means 0:1/3 beat will be downsampled, 200% volume, swapped channels, and reversed
|
52 |
+
|
53 |
+
there are certain commands you can write in pattern instead of the actual pattern:
|
54 |
+
- `random` - each beat will be randomly selected from all beats, basically similar to shuffling all beats
|
55 |
+
- `reverse` - reverses the order of all beats
|
56 |
+
- `test` - test beat detection by putting cowbells on each beat. The highest pitched cowbell should be on the first beat; next cowbell should be on the snare. If it is not, use scale and shift.
|
57 |
+
|
58 |
+
There are also some interesting patterns there: https://github.com/stunlocked1/BeatManipulator/blob/main/presets.json. Those are meant to be used with properly adjusted shift and scale, where 1st beat is 1st kick, 2nd beat is the snare after it.
|
59 |
+
"""
|
60 |
+
).launch(share=False)
|
beat_manipulator/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .main import *
|
2 |
+
from . import analyze, effect, generate, image, mix, wrapper
|
beat_manipulator/analyze.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy
|
2 |
+
#@njit SLOWER
|
3 |
+
def detect_bpm(audio, samplerate, bpm_low=40, bpm_high=300, bpm_step=0.1, mode=1, shift_step=10):
|
4 |
+
"""A very slow and inefficient algorithm!"""
|
5 |
+
audio = numpy.asarray(audio)
|
6 |
+
audio = (audio[0] + audio[1]).astype(numpy.float32)
|
7 |
+
length=len(audio)
|
8 |
+
mlength=length- int( 1 / ((bpm_low / 60) / samplerate) ) # to make sure those parts do not affect the calculation as they will be cut sometimes
|
9 |
+
#audio[:int(spb_low)]=0 # to make sure those parts do not affect the calculation as they will be cut sometimes
|
10 |
+
bpmdiffs=[]
|
11 |
+
bpmdiffsi=[]
|
12 |
+
minimum=100000000
|
13 |
+
for i in range(int((bpm_high-bpm_low)/bpm_step)):
|
14 |
+
spb=int(round(1/(((bpm_low + i*bpm_step) / 60) / samplerate)))
|
15 |
+
# audio is reshaped into a 2d array with bpm
|
16 |
+
end=-int(length % spb)
|
17 |
+
if end == 0: end = length
|
18 |
+
image = audio[:end].reshape(-1, spb)
|
19 |
+
if mode == 1: image=image.T
|
20 |
+
# diff21, diff22, diff41, diff42 = image[:-2].flatten(), image[2:].flatten(), image[:-4].flatten(), image[4:].flatten()
|
21 |
+
# difference=abs( numpy.dot(diff21, diff22)/(numpy.linalg.norm(diff21)*numpy.linalg.norm(diff22)) + numpy.dot(diff41, diff42)/(numpy.linalg.norm(diff41)*numpy.linalg.norm(diff42)) )
|
22 |
+
diff2=numpy.abs ( (image[:-2] - image[2:]).flatten()[:mlength] )
|
23 |
+
diff4=numpy.abs ( (image[:-4] - image[4:]).flatten()[:mlength] )
|
24 |
+
difference=numpy.sum(diff2*diff2*diff2*diff2) + numpy.sum(diff4*diff4*diff4*diff4)
|
25 |
+
# for i in range(len(image)-1):
|
26 |
+
# difference.append(numpy.sum(image[i]-image[i]+1))
|
27 |
+
if mode == 3:
|
28 |
+
image=image.T
|
29 |
+
diff2=numpy.abs ( (image[:-2] - image[2:]).flatten()[:mlength] )
|
30 |
+
diff4=numpy.abs ( (image[:-4] - image[4:]).flatten()[:mlength] )
|
31 |
+
difference=numpy.sum(diff2*diff2*diff2*diff2) + numpy.sum(diff4*diff4*diff4*diff4)
|
32 |
+
bpmdiffs.append(spb)
|
33 |
+
bpmdiffsi.append(difference)
|
34 |
+
if difference<minimum:
|
35 |
+
#print(f'{spb}: testing BPM = {(1/spb)*60*samplerate}; value = {difference}')
|
36 |
+
print(i)
|
37 |
+
minimum=difference
|
38 |
+
spb = bpmdiffs[numpy.argmin(numpy.asarray(bpmdiffsi))]
|
39 |
+
#print(f'BPM = {(1/spb)*60*samplerate}')
|
40 |
+
bpmdiffs=[]
|
41 |
+
bpmdiffsi=[]
|
42 |
+
#audio[int(spb):]=0
|
43 |
+
print(spb)
|
44 |
+
for shift in range(0, spb, shift_step):
|
45 |
+
#print(shift)
|
46 |
+
end=-int(length % spb)
|
47 |
+
if end == 0: end = length+shift
|
48 |
+
image = audio[shift:end].reshape(-1, spb)
|
49 |
+
length-=shift_step
|
50 |
+
if mode == 1: image=image.T
|
51 |
+
diff = numpy.abs ( (image[:-1] - image[1:]).flatten()[:mlength] )
|
52 |
+
difference=numpy.sum(diff*diff)
|
53 |
+
if mode == 3:
|
54 |
+
image=image.T
|
55 |
+
diff = numpy.abs ( (image[:-1] - image[1:]).flatten()[:mlength] )
|
56 |
+
difference += numpy.sum(diff*diff)
|
57 |
+
bpmdiffs.append(shift)
|
58 |
+
bpmdiffsi.append(difference)
|
59 |
+
#if shift%1000==0: print(f'testing shift = {shift}; value = {difference}')
|
60 |
+
shift = bpmdiffs[numpy.argmin(numpy.asarray(bpmdiffsi))]
|
61 |
+
#print(f'BPM = {(1/spb)*60*samplerate}; shift = {shift/samplerate} sec.')
|
62 |
+
return numpy.arange(shift, length, spb)
|
beat_manipulator/beatmap.py
ADDED
@@ -0,0 +1,409 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy
|
2 |
+
def _safer_eval(string:str) -> float:
|
3 |
+
#print(string, end=' ')
|
4 |
+
if isinstance(string, str):
|
5 |
+
#print(string, end=' ')
|
6 |
+
#print(''.join([i for i in string if i.isdecimal() or i in '.+-*/']), end = ' ')
|
7 |
+
string = eval(''.join([i for i in string if i.isdecimal() or i in '.+-*/']))
|
8 |
+
#print(string)
|
9 |
+
return string
|
10 |
+
|
11 |
+
class beatmap:
|
12 |
+
def __init__(self, beatmap:list = None, audio = None, samplerate = None, caching = True, log = True, path=None, artist=None, title=None, filename = None):
|
13 |
+
self.beatmap = beatmap
|
14 |
+
self.audio = audio
|
15 |
+
self.samplerate = samplerate
|
16 |
+
self.caching = caching
|
17 |
+
self.log = log
|
18 |
+
self.path = path
|
19 |
+
self.artist = artist
|
20 |
+
self.title = title
|
21 |
+
self.filename = filename
|
22 |
+
|
23 |
+
def __getitem__(self, var):
|
24 |
+
return self.beatmap[var]
|
25 |
+
|
26 |
+
def __len__(self):
|
27 |
+
return len(self.beatmap)
|
28 |
+
|
29 |
+
def _toarray(self):
|
30 |
+
if isinstance(self.beatmap, list): self.beatmap=numpy.asarray(self.beatmap, dtype=int)
|
31 |
+
|
32 |
+
def _add_beat_to_end(self):
|
33 |
+
self.beatmap=numpy.abs(numpy.append(self.beatmap, len(self.audio[0])))
|
34 |
+
self.beatmap=self.beatmap.astype(int)
|
35 |
+
|
36 |
+
def generate(self, lib='madmom.BeatDetectionProcessor', caching=True, split=None):
|
37 |
+
"""Creates self.beatmap attribute with a list of positions of beats in samples."""
|
38 |
+
if self.log is True: print(f'analyzing beats using {lib}; ', end='')
|
39 |
+
#if audio is None and filename is None: (audio, samplerate) = open_audio()
|
40 |
+
if caching is True and self.caching is True:
|
41 |
+
audio_id=hex(len(self.audio[0]))
|
42 |
+
import os
|
43 |
+
if not os.path.exists('SavedBeatmaps'):
|
44 |
+
os.mkdir('SavedBeatmaps')
|
45 |
+
cacheDir="SavedBeatmaps/" + ''.join(self.filename.split('/')[-1]) + "_"+lib+"_"+audio_id+'.txt'
|
46 |
+
try:
|
47 |
+
self.beatmap=numpy.loadtxt(cacheDir, dtype=int)
|
48 |
+
self.bpm=numpy.average(self.beatmap)/self.samplerate
|
49 |
+
if self.log is True: print('loaded cached beatmap.')
|
50 |
+
return
|
51 |
+
except OSError:
|
52 |
+
if self.log is True:print("beatmap hasn't been generated yet. Generating...")
|
53 |
+
|
54 |
+
if lib.split('.')[0]=='madmom':
|
55 |
+
from collections.abc import MutableMapping, MutableSequence
|
56 |
+
import madmom
|
57 |
+
if lib=='madmom.BeatTrackingProcessor':
|
58 |
+
proc = madmom.features.beats.BeatTrackingProcessor(fps=100)
|
59 |
+
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
60 |
+
self.beatmap= proc(act)*self.samplerate
|
61 |
+
if lib=='madmom.BeatTrackingProcessor.constant':
|
62 |
+
proc = madmom.features.beats.BeatTrackingProcessor(fps=100, look_ahead=None)
|
63 |
+
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
64 |
+
self.beatmap= proc(act)*self.samplerate
|
65 |
+
if lib=='madmom.BeatTrackingProcessor.consistent':
|
66 |
+
proc = madmom.features.beats.BeatTrackingProcessor(fps=100, look_ahead=None, look_aside=0)
|
67 |
+
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
68 |
+
self.beatmap= proc(act)*self.samplerate
|
69 |
+
elif lib=='madmom.BeatDetectionProcessor':
|
70 |
+
proc = madmom.features.beats.BeatDetectionProcessor(fps=100)
|
71 |
+
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
72 |
+
self.beatmap= proc(act)*self.samplerate
|
73 |
+
elif lib=='madmom.BeatDetectionProcessor.consistent':
|
74 |
+
proc = madmom.features.beats.BeatDetectionProcessor(fps=100, look_aside=0)
|
75 |
+
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
76 |
+
self.beatmap= proc(act)*self.samplerate
|
77 |
+
elif lib=='madmom.CRFBeatDetectionProcessor':
|
78 |
+
proc = madmom.features.beats.CRFBeatDetectionProcessor(fps=100)
|
79 |
+
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
80 |
+
self.beatmap= proc(act)*self.samplerate
|
81 |
+
elif lib=='madmom.CRFBeatDetectionProcessor.constant':
|
82 |
+
proc = madmom.features.beats.CRFBeatDetectionProcessor(fps=100, use_factors=True, factors=[0.5, 1, 2])
|
83 |
+
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
84 |
+
self.beatmap= proc(act)*self.samplerate
|
85 |
+
elif lib=='madmom.DBNBeatTrackingProcessor':
|
86 |
+
proc = madmom.features.beats.DBNBeatTrackingProcessor(fps=100)
|
87 |
+
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
88 |
+
self.beatmap= proc(act)*self.samplerate
|
89 |
+
elif lib=='madmom.DBNBeatTrackingProcessor.1000':
|
90 |
+
proc = madmom.features.beats.DBNBeatTrackingProcessor(fps=100, transition_lambda=1000)
|
91 |
+
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
92 |
+
self.beatmap= proc(act)*self.samplerate
|
93 |
+
elif lib=='madmom.DBNDownBeatTrackingProcessor':
|
94 |
+
proc = madmom.features.downbeats.DBNDownBeatTrackingProcessor(beats_per_bar=[4], fps=100)
|
95 |
+
act = madmom.features.downbeats.RNNDownBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
96 |
+
self.beatmap= proc(act)*self.samplerate
|
97 |
+
self.beatmap=self.beatmap[:,0]
|
98 |
+
elif lib=='madmom.PatternTrackingProcessor': #broken
|
99 |
+
from madmom.models import PATTERNS_BALLROOM
|
100 |
+
proc = madmom.features.downbeats.PatternTrackingProcessor(PATTERNS_BALLROOM, fps=50)
|
101 |
+
from madmom.audio.spectrogram import LogarithmicSpectrogramProcessor, SpectrogramDifferenceProcessor, MultiBandSpectrogramProcessor
|
102 |
+
from madmom.processors import SequentialProcessor
|
103 |
+
log = LogarithmicSpectrogramProcessor()
|
104 |
+
diff = SpectrogramDifferenceProcessor(positive_diffs=True)
|
105 |
+
mb = MultiBandSpectrogramProcessor(crossover_frequencies=[270])
|
106 |
+
pre_proc = SequentialProcessor([log, diff, mb])
|
107 |
+
act = pre_proc(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
108 |
+
self.beatmap= proc(act)*self.samplerate
|
109 |
+
self.beatmap=self.beatmap[:,0]
|
110 |
+
elif lib=='madmom.DBNBarTrackingProcessor': #broken
|
111 |
+
beats = self.generate(audio=self.audio, samplerate=self.samplerate, filename=self.filename, lib='madmom.DBNBeatTrackingProcessor', caching = caching)
|
112 |
+
proc = madmom.features.downbeats.DBNBarTrackingProcessor(beats_per_bar=[4], fps=100)
|
113 |
+
act = madmom.features.downbeats.RNNBarProcessor()(((madmom.audio.signal.Signal(self.audio.T, self.samplerate)), beats))
|
114 |
+
self.beatmap= proc(act)*self.samplerate
|
115 |
+
elif lib=='librosa': #broken in 3.9, works in 3.8
|
116 |
+
import librosa
|
117 |
+
beat_frames = librosa.beat.beat_track(y=self.audio[0], sr=self.samplerate, hop_length=512)
|
118 |
+
self.beatmap = librosa.frames_to_samples(beat_frames[1])
|
119 |
+
# elif lib=='BeatNet':
|
120 |
+
# from BeatNet.BeatNet import BeatNet # doesn't seem to work well for some reason
|
121 |
+
# estimator = BeatNet(1, mode='offline', inference_model='DBN', plot=[], thread=False)
|
122 |
+
# beatmap = estimator.process(filename)
|
123 |
+
# beatmap=beatmap[:,0]*self.samplerate
|
124 |
+
# elif lib=='jump-reward-inference': # doesn't seem to work well for some reason
|
125 |
+
# from jump_reward_inference.joint_tracker import joint_inference
|
126 |
+
# estimator = joint_inference(1, plot=False)
|
127 |
+
# beatmap = estimator.process(filename)
|
128 |
+
# beatmap=beatmap[:,0]*self.samplerate
|
129 |
+
|
130 |
+
elif lib=='split':
|
131 |
+
self.beatmap= list(range(0, len(self.audio[0]), len(self.audio[0])//split))
|
132 |
+
elif lib=='stunlocked':
|
133 |
+
from . import analyze
|
134 |
+
self.beatmap = analyze.detect_bpm(self.audio, self.samplerate)
|
135 |
+
if lib.split('.')[0]=='madmom':
|
136 |
+
self.beatmap=numpy.absolute(self.beatmap-500)
|
137 |
+
if caching is True and self.caching is True: numpy.savetxt(cacheDir, self.beatmap.astype(int), fmt='%d')
|
138 |
+
self.bpm=numpy.average(self.beatmap)/self.samplerate
|
139 |
+
if isinstance(self.beatmap, list): self.beatmap=numpy.asarray(self.beatmap, dtype=int)
|
140 |
+
self.beatmap=self.beatmap.astype(int)
|
141 |
+
|
142 |
+
def scale(self, scale:float):
|
143 |
+
if isinstance(scale, str): scale = _safer_eval(scale)
|
144 |
+
#print(scale)
|
145 |
+
assert scale>0, f"scale should be > 0, your scale is {scale}"
|
146 |
+
#print(self.beatmap)
|
147 |
+
import math
|
148 |
+
if scale!=1:
|
149 |
+
if self.log is True: print(f'scale={scale}; ')
|
150 |
+
a=0
|
151 |
+
b=numpy.array([], dtype=int)
|
152 |
+
if scale%1==0:
|
153 |
+
while a <len( self.beatmap):
|
154 |
+
#print(a, self.beatmap[int(a)], end=', ')
|
155 |
+
b=numpy.append(b,self.beatmap[int(a)])
|
156 |
+
a+=scale
|
157 |
+
#print(self.beatmap[int(a)])
|
158 |
+
else:
|
159 |
+
while a+1 <len( self.beatmap):
|
160 |
+
#print(a,b)
|
161 |
+
b=numpy.append(b, int((1-(a%1))*self.beatmap[math.floor(a)]+(a%1)*self.beatmap[math.ceil(a)]))
|
162 |
+
a+=scale
|
163 |
+
self.beatmap=b
|
164 |
+
|
165 |
+
def autoscale(self):
|
166 |
+
if self.log is True: print(f'autoscaling; ')
|
167 |
+
bpm=(self.beatmap[-1]-self.beatmap[0])/(len(self.beatmap)-1)
|
168 |
+
#print('BPM =', (bpm/self.samplerate) * 240, bpm)
|
169 |
+
if bpm>=160000: scale=1/8
|
170 |
+
elif (bpm)>=80000: scale=1/4
|
171 |
+
elif (bpm)>=40000: scale=1/2
|
172 |
+
elif (bpm)<=20000: scale=2
|
173 |
+
elif (bpm)<=10000: scale=4
|
174 |
+
elif (bpm)<=5000: scale=8
|
175 |
+
self.scale(scale)
|
176 |
+
|
177 |
+
def autoinsert(self):
|
178 |
+
if self.log is True: print(f'autoinserting; ')
|
179 |
+
diff=(self.beatmap[1]-self.beatmap[0])
|
180 |
+
a=0
|
181 |
+
while diff<self.beatmap[0] and a<100:
|
182 |
+
self.beatmap=numpy.insert(self.beatmap, 0, self.beatmap[0]-diff)
|
183 |
+
a+=1
|
184 |
+
|
185 |
+
def shift(self, shift: float):
|
186 |
+
if isinstance(shift, str): shift = _safer_eval(shift)
|
187 |
+
if shift!=0 and self.log is True: print(f'shift={shift}; ')
|
188 |
+
elif shift==0: return
|
189 |
+
if shift<0:
|
190 |
+
shift=-shift # so that floor division works correctly
|
191 |
+
# add integer number of beats to the start
|
192 |
+
if shift >= 1: self.beatmap=numpy.insert(self.beatmap, 0, list(i+1 for i in range(int(shift//1))))
|
193 |
+
if shift%1!=0:
|
194 |
+
# shift by modulus from the end
|
195 |
+
shift=shift%1
|
196 |
+
for i in reversed(range(len(self.beatmap))):
|
197 |
+
if i==0: continue
|
198 |
+
#print(i, ', ',self.beatmap[i], '-', shift, '* (', self.beatmap[i], '-', self.beatmap[i-1],') =', self.beatmap[i] - shift * (self.beatmap[i] - self.beatmap[i-1]))
|
199 |
+
self.beatmap[i] = int(self.beatmap[i] - shift * (self.beatmap[i] - self.beatmap[i-1]))
|
200 |
+
|
201 |
+
elif shift>0:
|
202 |
+
# remove integer number of beats from the start
|
203 |
+
if shift >= 1: self.beatmap=self.beatmap[int(shift//1):]
|
204 |
+
if shift%1!=0:
|
205 |
+
# shift by modulus
|
206 |
+
shift=shift%1
|
207 |
+
for i in range(len(self.beatmap)-int(shift)-1):
|
208 |
+
#print(self.beatmap[i], '+', shift, '* (', self.beatmap[i+1], '-', self.beatmap[i],') =', self.beatmap[i] + shift * (self.beatmap[i+1] - self.beatmap[i]))
|
209 |
+
self.beatmap[i] = int(self.beatmap[i] + shift * (self.beatmap[i+1] - self.beatmap[i]))
|
210 |
+
|
211 |
+
self.beatmap=sorted(list(self.beatmap))
|
212 |
+
while True:
|
213 |
+
n,done=0,[]
|
214 |
+
for i in range(len(self.beatmap)):
|
215 |
+
if self.beatmap.count(self.beatmap[i])>1 and i not in done:
|
216 |
+
self.beatmap[i]+=1
|
217 |
+
n+=1
|
218 |
+
done.append(i)
|
219 |
+
if n==0: break
|
220 |
+
self.beatmap=sorted(list(self.beatmap))
|
221 |
+
|
222 |
+
def cut(self, start=0, end=None):
|
223 |
+
if start!=0 or end is not None and self.log is True: print(f'start={start}; end={end}; ')
|
224 |
+
start*=self.samplerate
|
225 |
+
self.beatmap=self.beatmap[self.beatmap>=start].astype(int)
|
226 |
+
if end is not None: self.beatmap=self.beatmap[self.beatmap<=end].astype(int)
|
227 |
+
|
228 |
+
class hitmap(beatmap):
|
229 |
+
def generate(self, lib='madmom.madmom.RNNBeatProcessor', caching=True):
|
230 |
+
if self.log is True: print(f'analyzing hits using {lib}; ')
|
231 |
+
self.hitlib=lib
|
232 |
+
"""Finds positions of actual instrument/drum hits."""
|
233 |
+
if caching is True and self.caching is True:
|
234 |
+
audio_id=hex(len(self.audio[0]))
|
235 |
+
import os
|
236 |
+
if not os.path.exists('SavedBeatmaps'):
|
237 |
+
os.mkdir('SavedBeatmaps')
|
238 |
+
cacheDir="SavedBeatmaps/" + ''.join(self.filename.split('/')[-1]) + "_"+lib+"_"+audio_id+'.txt'
|
239 |
+
try:
|
240 |
+
cached=False
|
241 |
+
self.beatmap=numpy.loadtxt(cacheDir)
|
242 |
+
cached=True
|
243 |
+
except OSError: cached=False
|
244 |
+
if cached is False:
|
245 |
+
if lib=='madmom.RNNBeatProcessor':
|
246 |
+
import madmom
|
247 |
+
proc = madmom.features.beats.RNNBeatProcessor()
|
248 |
+
self.beatmap = proc(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
249 |
+
elif lib=='madmom.MultiModelSelectionProcessor':
|
250 |
+
import madmom
|
251 |
+
proc = madmom.features.beats.RNNBeatProcessor(post_processor=None)
|
252 |
+
predictions = proc(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
253 |
+
mm_proc = madmom.features.beats.MultiModelSelectionProcessor(num_ref_predictions=None)
|
254 |
+
self.beatmap= mm_proc(predictions)*self.samplerate
|
255 |
+
self.beatmap/= numpy.max(self.beatmap)
|
256 |
+
if caching is True and self.caching is True: numpy.savetxt(cacheDir, self.beatmap)
|
257 |
+
|
258 |
+
def osu(self, difficulties = [0.2, 0.1, 0.08, 0.06, 0.04, 0.02, 0.01, 0.005]):
|
259 |
+
if self.log is True: print(f'generating osu file')
|
260 |
+
def _process(self, threshold):
|
261 |
+
hitmap=[]
|
262 |
+
actual_samplerate=int(self.samplerate/100)
|
263 |
+
beat_middle=int(actual_samplerate/2)
|
264 |
+
for i in range(len(self.beatmap)):
|
265 |
+
if self.beatmap[i]>threshold: hitmap.append(i*actual_samplerate + beat_middle)
|
266 |
+
hitmap=numpy.asarray(hitmap)
|
267 |
+
clump=[]
|
268 |
+
for i in range(len(hitmap)-1):
|
269 |
+
#print(i, abs(self.beatmap[i]-self.beatmap[i+1]), clump)
|
270 |
+
if abs(hitmap[i] - hitmap[i+1]) < self.samplerate/16: clump.append(i)
|
271 |
+
elif clump!=[]:
|
272 |
+
clump.append(i)
|
273 |
+
actual_time=hitmap[clump[0]]
|
274 |
+
hitmap[numpy.array(clump)]=0
|
275 |
+
#print(self.beatmap)
|
276 |
+
hitmap[clump[0]]=actual_time
|
277 |
+
clump=[]
|
278 |
+
|
279 |
+
hitmap=hitmap[hitmap!=0]
|
280 |
+
return hitmap
|
281 |
+
|
282 |
+
osufile=lambda title,artist,version: ("osu file format v14\n"
|
283 |
+
"\n"
|
284 |
+
"[General]\n"
|
285 |
+
f"AudioFilename: {self.path.split('/')[-1]}\n"
|
286 |
+
"AudioLeadIn: 0\n"
|
287 |
+
"PreviewTime: -1\n"
|
288 |
+
"Countdown: 0\n"
|
289 |
+
"SampleSet: Normal\n"
|
290 |
+
"StackLeniency: 0.5\n"
|
291 |
+
"Mode: 0\n"
|
292 |
+
"LetterboxInBreaks: 0\n"
|
293 |
+
"WidescreenStoryboard: 0\n"
|
294 |
+
"\n"
|
295 |
+
"[Editor]\n"
|
296 |
+
"DistanceSpacing: 1.1\n"
|
297 |
+
"BeatDivisor: 4\n"
|
298 |
+
"GridSize: 8\n"
|
299 |
+
"TimelineZoom: 1.6\n"
|
300 |
+
"\n"
|
301 |
+
"[Metadata]\n"
|
302 |
+
f"Title:{title}\n"
|
303 |
+
f"TitleUnicode:{title}\n"
|
304 |
+
f"Artist:{artist}\n"
|
305 |
+
f"ArtistUnicode:{artist}\n"
|
306 |
+
f'Creator:{self.hitlib} + BeatManipulator\n'
|
307 |
+
f'Version:{version} {self.hitlib}\n'
|
308 |
+
'Source:\n'
|
309 |
+
'Tags:BeatManipulator\n'
|
310 |
+
'BeatmapID:0\n'
|
311 |
+
'BeatmapSetID:-1\n'
|
312 |
+
'\n'
|
313 |
+
'[Difficulty]\n'
|
314 |
+
'HPDrainRate:4\n'
|
315 |
+
'CircleSize:4\n'
|
316 |
+
'OverallDifficulty:7.5\n'
|
317 |
+
'ApproachRate:10\n'
|
318 |
+
'SliderMultiplier:3.3\n'
|
319 |
+
'SliderTickRate:1\n'
|
320 |
+
'\n'
|
321 |
+
'[Events]\n'
|
322 |
+
'//Background and Video events\n'
|
323 |
+
'//Break Periods\n'
|
324 |
+
'//Storyboard Layer 0 (Background)\n'
|
325 |
+
'//Storyboard Layer 1 (Fail)\n'
|
326 |
+
'//Storyboard Layer 2 (Pass)\n'
|
327 |
+
'//Storyboard Layer 3 (Foreground)\n'
|
328 |
+
'//Storyboard Layer 4 (Overlay)\n'
|
329 |
+
'//Storyboard Sound Samples\n'
|
330 |
+
'\n'
|
331 |
+
'[TimingPoints]\n'
|
332 |
+
'0,140.0,4,1,0,100,1,0\n'
|
333 |
+
'\n'
|
334 |
+
'\n'
|
335 |
+
'[HitObjects]\n')
|
336 |
+
# remove the clumps
|
337 |
+
#print(self.beatmap)
|
338 |
+
|
339 |
+
#print(self.beatmap)
|
340 |
+
|
341 |
+
|
342 |
+
#print(len(osumap))
|
343 |
+
#input('banana')
|
344 |
+
import shutil, os
|
345 |
+
if os.path.exists('BeatManipulator_TEMP'): shutil.rmtree('BeatManipulator_TEMP')
|
346 |
+
os.mkdir('BeatManipulator_TEMP')
|
347 |
+
hitmap=[]
|
348 |
+
import random
|
349 |
+
for difficulty in difficulties:
|
350 |
+
for i in range(4):
|
351 |
+
#print(i)
|
352 |
+
this_difficulty=_process(self, difficulty)
|
353 |
+
hitmap.append(this_difficulty)
|
354 |
+
for k in range(len(hitmap)):
|
355 |
+
osumap=numpy.vstack((hitmap[k],numpy.zeros(len(hitmap[k])),numpy.zeros(len(hitmap[k])))).T
|
356 |
+
difficulty= difficulties[k]
|
357 |
+
for i in range(len(osumap)-1):
|
358 |
+
if i==0:continue
|
359 |
+
dist=(osumap[i,0]-osumap[i-1,0])*(1-(difficulty**0.3))
|
360 |
+
if dist<1000: dist=0.005
|
361 |
+
elif dist<2000: dist=0.01
|
362 |
+
elif dist<3000: dist=0.015
|
363 |
+
elif dist<4000: dist=0.02
|
364 |
+
elif dist<5000: dist=0.25
|
365 |
+
elif dist<6000: dist=0.35
|
366 |
+
elif dist<7000: dist=0.45
|
367 |
+
elif dist<8000: dist=0.55
|
368 |
+
elif dist<9000: dist=0.65
|
369 |
+
elif dist<10000: dist=0.75
|
370 |
+
elif dist<12500: dist=0.85
|
371 |
+
elif dist<15000: dist=0.95
|
372 |
+
elif dist<20000: dist=1
|
373 |
+
#elif dist<30000: dist=0.8
|
374 |
+
prev_x=osumap[i-1,1]
|
375 |
+
prev_y=osumap[i-1,2]
|
376 |
+
if prev_x>0: prev_x=prev_x-dist*0.1
|
377 |
+
elif prev_x<0: prev_x=prev_x+dist*0.1
|
378 |
+
if prev_y>0: prev_y=prev_y-dist*0.1
|
379 |
+
elif prev_y<0: prev_y=prev_y+dist*0.1
|
380 |
+
dirx=random.uniform(-dist,dist)
|
381 |
+
diry=dist-abs(dirx)*random.choice([-1, 1])
|
382 |
+
if abs(prev_x+dirx)>1: dirx=-dirx
|
383 |
+
if abs(prev_y+diry)>1: diry=-diry
|
384 |
+
x=prev_x+dirx
|
385 |
+
y=prev_y+diry
|
386 |
+
#print(dirx,diry,x,y)
|
387 |
+
#print(x>1, x<1, y>1, y<1)
|
388 |
+
if x>1: x=0.8
|
389 |
+
if x<-1: x=-0.8
|
390 |
+
if y>1: y=0.8
|
391 |
+
if y<-1: y=-0.8
|
392 |
+
#print(dirx,diry,x,y)
|
393 |
+
osumap[i,1]=x
|
394 |
+
osumap[i,2]=y
|
395 |
+
|
396 |
+
osumap[:,1]*=300
|
397 |
+
osumap[:,1]+=300
|
398 |
+
osumap[:,2]*=180
|
399 |
+
osumap[:,2]+=220
|
400 |
+
|
401 |
+
file=osufile(self.artist, self.title, difficulty)
|
402 |
+
for j in osumap:
|
403 |
+
#print('285,70,'+str(int(int(i)*1000/self.samplerate))+',1,0')
|
404 |
+
file+=f'{int(j[1])},{int(j[2])},{str(int(int(j[0])*1000/self.samplerate))},1,0\n'
|
405 |
+
with open(f'BeatManipulator_TEMP/{self.artist} - {self.title} [BeatManipulator {difficulty} {self.hitlib}].osu', 'x', encoding="utf-8") as f:
|
406 |
+
f.write(file)
|
407 |
+
|
408 |
+
def autoinsert(): raise NotImplementedError("autoinserting won't work on hitmaps")
|
409 |
+
def autoscale(): raise NotImplementedError("autoscale won't work on hitmaps")
|
beat_manipulator/effect.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy
|
2 |
+
|
3 |
+
def normalize(audio):
|
4 |
+
audio=audio-(numpy.min(audio)+numpy.max(audio))/2
|
5 |
+
return audio*(1-(max(numpy.max(audio), abs(numpy.min(audio)))))
|
6 |
+
|
7 |
+
def pitch(audio, pitch, grain):
|
8 |
+
grain=int(grain)
|
9 |
+
if len(audio)>10: audio=[audio]
|
10 |
+
if type(audio) is not list: audio=audio.tolist()
|
11 |
+
length=len(audio[0])
|
12 |
+
if pitch<1:
|
13 |
+
pitch=int(1//pitch)
|
14 |
+
if grain%2!=0: grain+=1
|
15 |
+
for i in range(len(audio)):
|
16 |
+
n=0
|
17 |
+
while n+grain<length:
|
18 |
+
#print(len(audio[i]))
|
19 |
+
#print(n)
|
20 |
+
audio[i][n:n+grain]=numpy.repeat(audio[i][n:n+int(grain/2)], 2)
|
21 |
+
#print(len(audio[i]))
|
22 |
+
n+=grain
|
23 |
+
elif pitch>1:
|
24 |
+
pitch=int(pitch)
|
25 |
+
for i in range(len(audio)):
|
26 |
+
n=0
|
27 |
+
while n+grain<length:
|
28 |
+
audio[i][n:n+grain]=audio[i][n:n+grain:pitch]*pitch
|
29 |
+
n+=grain
|
30 |
+
return audio
|
31 |
+
|
32 |
+
def pitchB(audio, pitch, grain):
|
33 |
+
grain=int(grain)
|
34 |
+
if len(audio)>10: audio=[audio]
|
35 |
+
if type(audio) is not list: audio=audio.tolist()
|
36 |
+
length=len(audio[0])
|
37 |
+
if pitch<1:
|
38 |
+
pitch=int(1//pitch)
|
39 |
+
if grain%2!=0: grain+=1
|
40 |
+
for i in range(len(audio)):
|
41 |
+
n=0
|
42 |
+
while n+grain<length:
|
43 |
+
#print(len(audio[i]))
|
44 |
+
#print(n)
|
45 |
+
audio[i][n:n+grain]=numpy.repeat(audio[i][n:n+int(grain/2)], 2)
|
46 |
+
#print(len(audio[i]))
|
47 |
+
n+=grain
|
48 |
+
|
49 |
+
elif pitch>1:
|
50 |
+
pitch=int(pitch)
|
51 |
+
for i in range(len(audio)):
|
52 |
+
n=0
|
53 |
+
while n+grain<length:
|
54 |
+
audio2=audio[i][n:n+grain:pitch]
|
55 |
+
for j in range(pitch-1):
|
56 |
+
#print(j)
|
57 |
+
audio2.extend(audio2[::1] if j%2==1 else audio2[::-1])
|
58 |
+
audio[i][n:n+grain]=audio2
|
59 |
+
n+=grain
|
60 |
+
return audio
|
61 |
+
|
62 |
+
def grain(audio, grain):
|
63 |
+
grain=int(grain)
|
64 |
+
if len(audio)>10: audio=[audio]
|
65 |
+
if type(audio) is not list: audio=audio.tolist()
|
66 |
+
length=len(audio[0])
|
67 |
+
n=0
|
68 |
+
for i in range(len(audio)):
|
69 |
+
while n+2*grain<length:
|
70 |
+
audio[i][n+grain:n+2*grain]=audio[i][n:n+grain]
|
71 |
+
n+=grain*2
|
72 |
+
return audio
|
73 |
+
|
74 |
+
def ftt(audio, inverse=True):
|
75 |
+
"""headphone warning: cursed effect"""
|
76 |
+
import scipy.fft
|
77 |
+
audio=numpy.asarray(audio).copy()
|
78 |
+
for i in range(len(audio)):
|
79 |
+
if inverse is False:
|
80 |
+
audio[i]= scipy.fft.fft(audio[i], axis=0)
|
81 |
+
else:
|
82 |
+
audio[i]= scipy.fft.ifft(audio[i], axis=0)
|
83 |
+
audio=(audio*(2/numpy.max(audio)))-1
|
84 |
+
return normalize(audio)
|
85 |
+
|
86 |
+
def fourier_shift(audio, value=5):
|
87 |
+
"""modulates volume for some reason"""
|
88 |
+
import scipy.ndimage
|
89 |
+
audio=numpy.asarray(audio).copy()
|
90 |
+
audio= numpy.asarray(list(scipy.ndimage.fourier_shift(i, value, axis=-1) for i in audio)).astype(float)
|
91 |
+
return normalize(audio)
|
92 |
+
|
93 |
+
def gradient(audio):
|
94 |
+
"""acts as an interesting high pass filter that removes drums"""
|
95 |
+
audio=numpy.asarray(audio).copy()
|
96 |
+
return numpy.gradient(audio, axis=0)
|
97 |
+
|
98 |
+
def gradient_inverse(audio):
|
99 |
+
"""supposed to be inverse of a gradient, but it just completely destroys the audio into a distorted mess"""
|
100 |
+
audio=numpy.asarray(audio).copy()
|
101 |
+
for i in range(len(audio)):
|
102 |
+
a = audio[i]
|
103 |
+
audio[i] = a[0] + 2 * numpy.c_[numpy.r_[0, a[1:-1:2].cumsum()], a[::2].cumsum() - a[0] / 2].ravel()[:len(a)]
|
104 |
+
audio=normalize(audio)
|
105 |
+
return numpy.gradient(audio, axis=0)
|
106 |
+
|
beat_manipulator/generate.py
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy
|
2 |
+
def sidechain(samplerate:int = 44100,
|
3 |
+
length: float = 0.5,
|
4 |
+
curve: float = 2,
|
5 |
+
vol0: float = 0,
|
6 |
+
vol1: float = 1,
|
7 |
+
smoothing: int = 40,
|
8 |
+
channels:int = 2) -> tuple:
|
9 |
+
|
10 |
+
x = numpy.linspace(vol0,vol1,int(length*samplerate))**curve
|
11 |
+
if smoothing is not None:
|
12 |
+
x = numpy.concatenate(numpy.linspace(1,0,smoothing),x)
|
13 |
+
return tuple(x for _ in range(channels)) if channels>1 else x
|
14 |
+
|
15 |
+
def sine(len, freq, samplerate, volume=1):
|
16 |
+
return numpy.sin(numpy.linspace(0, freq*3.1415926*2*len, int(len*samplerate)))*volume
|
17 |
+
|
18 |
+
def saw(len, freq, samplerate, volume=1):
|
19 |
+
return (numpy.linspace(0, freq*2*len, int(len*samplerate))%2 - 1)*volume
|
20 |
+
|
21 |
+
def square(len, freq, samplerate, volume=1):
|
22 |
+
return ((numpy.linspace(0, freq*2*len, int(len*samplerate)))//1%2 * 2 - 1)*volume
|
beat_manipulator/image.py
ADDED
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy
|
2 |
+
|
3 |
+
class image:
|
4 |
+
def __init__(self, image=None, audio=None, samplerate=None, beatmap=None, log=None):
|
5 |
+
self.image=image
|
6 |
+
self.audio = audio
|
7 |
+
self.samplerate=samplerate
|
8 |
+
self.beatmap=beatmap
|
9 |
+
self.log=log
|
10 |
+
|
11 |
+
def __getitem__(self, var):
|
12 |
+
return self.beatmap[var]
|
13 |
+
|
14 |
+
def _printlog(self, string, end=None, force = False, forcei = False):
|
15 |
+
if (self.log is True or force is True) and forcei is False:
|
16 |
+
if end is None: print(string)
|
17 |
+
else:print(string,end=end)
|
18 |
+
|
19 |
+
def _toshape(self):
|
20 |
+
if self.image.ndim == 2:
|
21 |
+
self.image = [self.image]
|
22 |
+
if self.image.ndim == 3:
|
23 |
+
if len(self.image[0][0]) == 3:
|
24 |
+
self.image = [self.image]
|
25 |
+
|
26 |
+
def _channel(self):
|
27 |
+
if self.image.ndim == 2: yield self.image
|
28 |
+
if self.image.ndim == 3:
|
29 |
+
if len(self.image[0][0]) == 3:
|
30 |
+
yield self.image
|
31 |
+
if self.image.ndim > 3 or len(self.image[0][0] != 3):
|
32 |
+
for i in self.image:
|
33 |
+
yield i
|
34 |
+
|
35 |
+
@property
|
36 |
+
def combined(self):
|
37 |
+
for i, channel in enumerate(self._channel()):
|
38 |
+
if i==0: combined = channel.copy()
|
39 |
+
else: combined += channel
|
40 |
+
return combined
|
41 |
+
|
42 |
+
def open(self, path):
|
43 |
+
import cv2
|
44 |
+
self.image = cv2.imread(path)
|
45 |
+
|
46 |
+
def display(self):
|
47 |
+
import cv2
|
48 |
+
cv2.imshow('image', self.combined)
|
49 |
+
|
50 |
+
def write(self, output, rotate = True, mode = 'square', maximum = 4096):
|
51 |
+
import cv2
|
52 |
+
mode = mode.lower()
|
53 |
+
image = self.combined
|
54 |
+
if mode=='square':
|
55 |
+
y=min(len(image), len(image[0]), maximum)
|
56 |
+
y=max(y, maximum)
|
57 |
+
image = cv2.resize(image, (y,y), interpolation=cv2.INTER_NEAREST)
|
58 |
+
elif mode=='tosmallest':
|
59 |
+
y=min(len(image), len(image[0]))
|
60 |
+
image = cv2.resize(image, (x,x), interpolation=cv2.INTER_NEAREST)
|
61 |
+
elif mode=='maximum':
|
62 |
+
x = min(len(image), maximum)
|
63 |
+
y = min(len(image[0]), maximum)
|
64 |
+
image = cv2.resize(image, (x,y), interpolation=cv2.INTER_NEAREST)
|
65 |
+
if rotate is True: image=image.T
|
66 |
+
cv2.imwrite(output, image)
|
67 |
+
|
68 |
+
|
69 |
+
def effect_blur(self, value=(5,5)):
|
70 |
+
"""similar to echo"""
|
71 |
+
import cv2
|
72 |
+
if isinstance(value, int) or isinstance(value, float): value = (value, value)
|
73 |
+
for i in range(len(self.image)):
|
74 |
+
self.image[i]=cv2.blur(self.image[i], value)
|
75 |
+
|
76 |
+
def effect_median(self, value=5):
|
77 |
+
"""similar to echo"""
|
78 |
+
import scipy.signal
|
79 |
+
for i in range(len(self.image)):
|
80 |
+
self.image[i]=scipy.signal.medfilt2d(self.image[i], value)
|
81 |
+
|
82 |
+
def effect_uniform(self, value=5):
|
83 |
+
"""similar to echo"""
|
84 |
+
import scipy.ndimage
|
85 |
+
for i in range(len(self.image)):
|
86 |
+
self.image[i]= scipy.ndimage.uniform_filter(self.image[i], value)
|
87 |
+
|
88 |
+
def effect_shift2d(self, value=5):
|
89 |
+
"""very weird effect, mostly produces silence"""
|
90 |
+
import scipy.ndimage
|
91 |
+
self.image= scipy.ndimage.fourier_gaussian(self.image, value)
|
92 |
+
self.image=self.image*(255/numpy.max(self.image))
|
93 |
+
|
94 |
+
def effect_spline(self, value=3):
|
95 |
+
"""barely noticeable echo"""
|
96 |
+
import scipy.ndimage
|
97 |
+
for i in range(len(self.image)):
|
98 |
+
self.image[i]= scipy.ndimage.spline_filter(self.image[i], value)
|
99 |
+
|
100 |
+
def effect_rotate(self, value=0.1):
|
101 |
+
"""rotates self.image in degrees"""
|
102 |
+
import scipy.ndimage
|
103 |
+
image = [0 for _ in range(len(self.image))]
|
104 |
+
for i in range(len(image)):
|
105 |
+
image[i] = scipy.ndimage.rotate(self.image[i], value)
|
106 |
+
self.image = numpy.asarray(image)
|
107 |
+
|
108 |
+
def effect_gradient(self):
|
109 |
+
self.image=numpy.asarray(numpy.gradient(self.image)[0])
|
110 |
+
|
111 |
+
class spectogram(image):
|
112 |
+
def generate(self, hop_length:int = 512):
|
113 |
+
self.hop_length=hop_length
|
114 |
+
import librosa
|
115 |
+
self.image=librosa.feature.melspectrogram(y=self.audio, sr=self.samplerate, hop_length=hop_length)
|
116 |
+
self.mask = numpy.full(self.image.shape, True)
|
117 |
+
self._toshape()
|
118 |
+
|
119 |
+
def toaudio(self):
|
120 |
+
import librosa
|
121 |
+
self.audio=librosa.feature.inverse.mel_to_audio(M=numpy.swapaxes(numpy.swapaxes(numpy.dstack(( self.image[0,:,:], self.image[1,:,:])), 0, 2), 1,2), sr=self.samplerate, hop_length=self.hop_length)
|
122 |
+
return self.audio
|
123 |
+
|
124 |
+
|
125 |
+
class beat_image(image):
|
126 |
+
def generate(self, mode='median'):
|
127 |
+
"""Turns song into an image based on beat positions."""
|
128 |
+
assert self.beatmap is not None, 'Please run song.beatmap.generate() first. beat_image.generate needs beatmap to work.'
|
129 |
+
self._printlog('generating beat-image; ')
|
130 |
+
mode=mode.lower()
|
131 |
+
if isinstance(self.audio,numpy.ndarray): self.audio=numpy.ndarray.tolist(self.audio)
|
132 |
+
# add the bits before first beat
|
133 |
+
self.image=([self.audio[0][0:self.beatmap[0]],], [self.audio[1][0:self.beatmap[0]],])
|
134 |
+
# maximum is needed to make the array homogeneous
|
135 |
+
maximum=self.beatmap[0]
|
136 |
+
values=[]
|
137 |
+
values.append(self.beatmap[0])
|
138 |
+
for i in range(len(self.beatmap)-1):
|
139 |
+
self.image[0].append(self.audio[0][self.beatmap[i]:self.beatmap[i+1]])
|
140 |
+
self.image[1].append(self.audio[1][self.beatmap[i]:self.beatmap[i+1]])
|
141 |
+
maximum = max(self.beatmap[i+1]-self.beatmap[i], maximum)
|
142 |
+
values.append(self.beatmap[i+1]-self.beatmap[i])
|
143 |
+
if 'max' in mode: norm=maximum
|
144 |
+
elif 'med' in mode: norm=numpy.median(values)
|
145 |
+
elif 'av' in mode: norm=numpy.average(values)
|
146 |
+
for i in range(len(self.image[0])):
|
147 |
+
beat_diff=int(norm-len(self.image[0][i]))
|
148 |
+
if beat_diff>0:
|
149 |
+
self.image[0][i].extend([numpy.nan]*beat_diff)
|
150 |
+
self.image[1][i].extend([numpy.nan]*beat_diff)
|
151 |
+
elif beat_diff<0:
|
152 |
+
self.image[0][i]=self.image[0][i][:beat_diff]
|
153 |
+
self.image[1][i]=self.image[1][i][:beat_diff]
|
154 |
+
self.image=numpy.asarray(self.image)*255
|
155 |
+
self.mask = self.image == numpy.nan
|
156 |
+
self.image=numpy.nan_to_num(self.image)
|
157 |
+
self._toshape()
|
158 |
+
|
159 |
+
def toaudio(self):
|
160 |
+
self._printlog('converting beat-image to audio; ')
|
161 |
+
image=numpy.asarray(self.image)/255
|
162 |
+
try: image[self.mask]=numpy.nan
|
163 |
+
except IndexError: pass
|
164 |
+
audio=list([] for _ in range(len(image)))
|
165 |
+
#print(audio)
|
166 |
+
#print(len(image), len(image[0]), len(image[1]), len(image[0][0]), len(image[1][0]), len(image[0][1]), len(image[1][1]))
|
167 |
+
for j in range(len(image)):
|
168 |
+
for i in range(len(image[j])):
|
169 |
+
beat=image[j][i]
|
170 |
+
#print(i,j, len(image[0][j]), len(image[1][j]), len(beat), end=' ')
|
171 |
+
beat=beat[~numpy.isnan(beat)]
|
172 |
+
#print(len(beat), end=' ')
|
173 |
+
audio[j].extend(beat)
|
174 |
+
#print(len(audio[0]), len(audio[1]))
|
175 |
+
self.audio=numpy.asarray(audio)
|
176 |
+
return self.audio
|
BeatManipulator.py → beat_manipulator/main.py
RENAMED
@@ -1,14 +1,33 @@
|
|
1 |
import numpy
|
2 |
numpy.set_printoptions(suppress=True)
|
|
|
|
|
3 |
|
4 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5 |
if filename is None:
|
6 |
from tkinter.filedialog import askopenfilename
|
7 |
filename = askopenfilename(title='select song', filetypes=[("mp3", ".mp3"),("wav", ".wav"),("flac", ".flac"),("ogg", ".ogg"),("wma", ".wma")])
|
8 |
filename=filename.replace('\\', '/')
|
9 |
if lib=='pedalboard.io':
|
10 |
-
|
11 |
-
with AudioFile(filename) as f:
|
12 |
audio = f.read(f.frames)
|
13 |
samplerate = f.samplerate
|
14 |
elif lib=='librosa':
|
@@ -35,64 +54,162 @@ def open_audio(filename=None, lib='auto'):
|
|
35 |
audio,samplerate=open_audio(filename, i)
|
36 |
break
|
37 |
except Exception as e:
|
38 |
-
print(e)
|
39 |
-
if len(audio)<2: audio=[audio
|
40 |
return audio,samplerate
|
41 |
|
42 |
|
43 |
-
def
|
44 |
-
x=numpy.concatenate((numpy.linspace(1,0,smoothing),numpy.linspace(vol0,vol1,int(length*samplerate))**curve))
|
45 |
-
return(x,x)
|
46 |
-
|
47 |
-
def outputfilename(output, filename, suffix='_beatswap'):
|
48 |
if not (output.lower().endswith('.mp3') or output.lower().endswith('.wav') or output.lower().endswith('.flac') or output.lower().endswith('.ogg') or
|
49 |
output.lower().endswith('.aac') or output.lower().endswith('.ac3') or output.lower().endswith('.aiff') or output.lower().endswith('.wma')):
|
50 |
-
return output+''.join(''.join(filename.split('/')[-1]).split('.')[:-1])+suffix+'.
|
51 |
|
|
|
|
|
|
|
|
|
|
|
52 |
|
53 |
-
|
54 |
-
|
|
|
|
|
|
|
55 |
|
56 |
-
|
57 |
-
return (numpy.linspace(0, freq*2*len, int(len*samplerate))%2 - 1)*volume
|
58 |
|
59 |
-
|
60 |
-
return ((numpy.linspace(0, freq*2*len, int(len*samplerate)))//1%2 * 2 - 1)*volume
|
61 |
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
|
66 |
-
|
67 |
-
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
else:
|
70 |
-
self.
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
self.
|
75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
76 |
self.samplerate=int(self.samplerate)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
|
78 |
-
def
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
80 |
if lib=='pedalboard.io':
|
81 |
-
if not isinstance(self.audio,numpy.ndarray): self.audio=numpy.asarray(self.audio)
|
82 |
#print(audio)
|
83 |
-
|
84 |
-
with AudioFile(output, 'w', self.samplerate, self.audio.shape[0]) as f:
|
85 |
f.write(self.audio)
|
86 |
elif lib=='soundfile':
|
87 |
-
if not isinstance(self.audio,numpy.ndarray): self.audio=numpy.asarray(self.audio)
|
88 |
audio=self.audio.T
|
89 |
import soundfile
|
90 |
soundfile.write(output, audio, self.samplerate)
|
91 |
del audio
|
92 |
elif lib=='auto':
|
93 |
-
for i in
|
94 |
try:
|
95 |
-
|
96 |
break
|
97 |
except Exception as e:
|
98 |
print(e)
|
@@ -106,163 +223,38 @@ class song:
|
|
106 |
# output = output + '.' + format
|
107 |
# song.export(output, format=format)
|
108 |
|
109 |
-
def
|
110 |
-
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
self.bpm=numpy.average(self.beatmap)/self.samplerate
|
130 |
-
return
|
131 |
-
except OSError: pass
|
132 |
-
|
133 |
-
if lib.split('.')[0]=='madmom':
|
134 |
-
from collections.abc import MutableMapping, MutableSequence
|
135 |
-
import madmom
|
136 |
-
if lib=='madmom.BeatTrackingProcessor':
|
137 |
-
proc = madmom.features.beats.BeatTrackingProcessor(fps=100)
|
138 |
-
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
139 |
-
self.beatmap= proc(act)*self.samplerate
|
140 |
-
if lib=='madmom.BeatTrackingProcessor.constant':
|
141 |
-
proc = madmom.features.beats.BeatTrackingProcessor(fps=100, look_ahead=None)
|
142 |
-
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
143 |
-
self.beatmap= proc(act)*self.samplerate
|
144 |
-
if lib=='madmom.BeatTrackingProcessor.consistent':
|
145 |
-
proc = madmom.features.beats.BeatTrackingProcessor(fps=100, look_ahead=None, look_aside=0)
|
146 |
-
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
147 |
-
self.beatmap= proc(act)*self.samplerate
|
148 |
-
elif lib=='madmom.BeatDetectionProcessor':
|
149 |
-
proc = madmom.features.beats.BeatDetectionProcessor(fps=100)
|
150 |
-
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
151 |
-
self.beatmap= proc(act)*self.samplerate
|
152 |
-
elif lib=='madmom.BeatDetectionProcessor.consistent':
|
153 |
-
proc = madmom.features.beats.BeatDetectionProcessor(fps=100, look_aside=0)
|
154 |
-
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
155 |
-
self.beatmap= proc(act)*self.samplerate
|
156 |
-
elif lib=='madmom.CRFBeatDetectionProcessor':
|
157 |
-
proc = madmom.features.beats.CRFBeatDetectionProcessor(fps=100)
|
158 |
-
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
159 |
-
self.beatmap= proc(act)*self.samplerate
|
160 |
-
elif lib=='madmom.CRFBeatDetectionProcessor.constant':
|
161 |
-
proc = madmom.features.beats.CRFBeatDetectionProcessor(fps=100, use_factors=True, factors=[0.5, 1, 2])
|
162 |
-
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
163 |
-
self.beatmap= proc(act)*self.samplerate
|
164 |
-
elif lib=='madmom.DBNBeatTrackingProcessor':
|
165 |
-
proc = madmom.features.beats.DBNBeatTrackingProcessor(fps=100)
|
166 |
-
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
167 |
-
self.beatmap= proc(act)*self.samplerate
|
168 |
-
elif lib=='madmom.DBNBeatTrackingProcessor.1000':
|
169 |
-
proc = madmom.features.beats.DBNBeatTrackingProcessor(fps=100, transition_lambda=1000)
|
170 |
-
act = madmom.features.beats.RNNBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
171 |
-
self.beatmap= proc(act)*self.samplerate
|
172 |
-
elif lib=='madmom.MultiModelSelectionProcessor': #broken
|
173 |
-
proc = madmom.features.beats.RNNBeatProcessor(post_processor=None)
|
174 |
-
predictions = proc(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
175 |
-
mm_proc = madmom.features.beats.MultiModelSelectionProcessor(num_ref_predictions=None)
|
176 |
-
self.beatmap= numpy.sort(mm_proc(predictions)*self.samplerate)
|
177 |
-
elif lib=='madmom.DBNDownBeatTrackingProcessor':
|
178 |
-
proc = madmom.features.downbeats.DBNDownBeatTrackingProcessor(beats_per_bar=[4], fps=100)
|
179 |
-
act = madmom.features.downbeats.RNNDownBeatProcessor()(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
180 |
-
self.beatmap= proc(act)*self.samplerate
|
181 |
-
self.beatmap=self.beatmap[:,0]
|
182 |
-
elif lib=='madmom.PatternTrackingProcessor': #broken
|
183 |
-
from madmom.models import PATTERNS_BALLROOM
|
184 |
-
proc = madmom.features.downbeats.PatternTrackingProcessor(PATTERNS_BALLROOM, fps=50)
|
185 |
-
from madmom.audio.spectrogram import LogarithmicSpectrogramProcessor, SpectrogramDifferenceProcessor, MultiBandSpectrogramProcessor
|
186 |
-
from madmom.processors import SequentialProcessor
|
187 |
-
log = LogarithmicSpectrogramProcessor()
|
188 |
-
diff = SpectrogramDifferenceProcessor(positive_diffs=True)
|
189 |
-
mb = MultiBandSpectrogramProcessor(crossover_frequencies=[270])
|
190 |
-
pre_proc = SequentialProcessor([log, diff, mb])
|
191 |
-
act = pre_proc(madmom.audio.signal.Signal(self.audio.T, self.samplerate))
|
192 |
-
self.beatmap= proc(act)*self.samplerate
|
193 |
-
self.beatmap=self.beatmap[:,0]
|
194 |
-
elif lib=='madmom.DBNBarTrackingProcessor': #broken
|
195 |
-
beats = song.analyze_beats(self,lib='madmom.DBNBeatTrackingProcessor', caching = caching)
|
196 |
-
proc = madmom.features.downbeats.DBNBarTrackingProcessor(beats_per_bar=[4], fps=100)
|
197 |
-
act = madmom.features.downbeats.RNNBarProcessor()(((madmom.audio.signal.Signal(self.audio.T, self.samplerate)), beats))
|
198 |
-
self.beatmap= proc(act)*self.samplerate
|
199 |
-
elif lib=='librosa': #broken in 3.9, works in 3.8
|
200 |
-
import librosa
|
201 |
-
beat_frames = librosa.beat.beat_track(y=self.audio[0], sr=self.samplerate,hop_length=512)
|
202 |
-
self.beatmap = librosa.frames_to_samples(beat_frames[1])
|
203 |
-
# elif lib=='BeatNet':
|
204 |
-
# from BeatNet.BeatNet import BeatNet # doesn't seem to work well for some reason
|
205 |
-
# estimator = BeatNet(1, mode='offline', inference_model='DBN', plot=[], thread=False)
|
206 |
-
# beatmap = estimator.process(filename)
|
207 |
-
# beatmap=beatmap[:,0]*samplerate
|
208 |
-
# elif lib=='jump-reward-inference': # doesn't seem to work well for some reason
|
209 |
-
# from jump_reward_inference.joint_tracker import joint_inference
|
210 |
-
# estimator = joint_inference(1, plot=False)
|
211 |
-
# beatmap = estimator.process(filename)
|
212 |
-
# beatmap=beatmap[:,0]*samplerate
|
213 |
-
|
214 |
-
elif lib=='split':
|
215 |
-
self.beatmap= list(range(0, len(self.audio), len(self.audio)//split))
|
216 |
-
if lib.split('.')[0]=='madmom':
|
217 |
-
self.beatmap=numpy.absolute(self.beatmap-500)
|
218 |
-
|
219 |
-
if caching is True: numpy.savetxt(cacheDir, self.beatmap.astype(int), fmt='%d')
|
220 |
-
self.bpm=numpy.average(self.beatmap)/self.samplerate
|
221 |
-
self.beatmap=self.beatmap.astype(int)
|
222 |
-
|
223 |
-
def audio_autotrim(self):
|
224 |
n=0
|
225 |
for i in self.audio[0]:
|
226 |
if i>=0.0001:break
|
227 |
n+=1
|
|
|
228 |
self.audio = numpy.asarray([self.audio[0,n:], self.audio[1,n:]])
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
def beatmap_autoscale(self):
|
236 |
-
bpm=(self.beatmap[-1]-self.beatmap[0])/(len(self.beatmap)-1)
|
237 |
-
#print('BPM =', (bpm/samplerate) * 240, bpm)
|
238 |
-
if bpm>=160000: scale=1/8
|
239 |
-
elif (bpm)>=80000: scale=1/4
|
240 |
-
elif (bpm)>=40000: scale=1/2
|
241 |
-
elif (bpm)<=20000: scale=2
|
242 |
-
elif (bpm)<=10000: scale=4
|
243 |
-
elif (bpm)<=5000: scale=8
|
244 |
-
song.beatmap_scale(self,scale)
|
245 |
-
|
246 |
-
def beatmap_autoinsert(self):
|
247 |
-
diff=(self.beatmap[1]-self.beatmap[0])
|
248 |
-
a=0
|
249 |
-
while diff<self.beatmap[0] and a<100:
|
250 |
-
self.beatmap=numpy.insert(self.beatmap, 0, self.beatmap[0]-diff)
|
251 |
-
a+=1
|
252 |
-
|
253 |
-
def beatmap_shift(self, shift: float):
|
254 |
-
if shift>0:
|
255 |
-
for i in range(len(self.beatmap)-1):
|
256 |
-
self.beatmap[i] = self.beatmap[i] + shift * (self.beatmap[i+1] - self.beatmap[i])
|
257 |
-
elif shift<0:
|
258 |
-
for i in reversed(range(len(self.beatmap)-1)):
|
259 |
-
self.beatmap[i+1] = self.beatmap[i+1] - shift * (self.beatmap[i] - self.beatmap[i+1])
|
260 |
-
|
261 |
-
def beatmap_trim(self, start=0, end=None):
|
262 |
-
start*=self.samplerate
|
263 |
-
self.beatmap=self.beatmap[self.beatmap>=start].astype(int)
|
264 |
-
if end is not None: self.beatmap=self.beatmap[self.beatmap<=end].astype(int)
|
265 |
-
|
266 |
|
267 |
def beatswap(self, pattern: str, sep=',', smoothing=40, smoothing_mode='replace'):
|
268 |
import math, numpy
|
@@ -270,6 +262,7 @@ class song:
|
|
270 |
size=0
|
271 |
#cut processing??? not worth it, it is really fast anyways
|
272 |
pattern=pattern.replace(' ', '').split(sep)
|
|
|
273 |
for j in pattern:
|
274 |
s=''
|
275 |
if '?' not in j:
|
@@ -277,66 +270,59 @@ class song:
|
|
277 |
if i.isdigit() or i=='.' or i=='-' or i=='/' or i=='+' or i=='%': s=str(s)+str(i)
|
278 |
elif i==':':
|
279 |
if s=='': s='0'
|
280 |
-
#print(s,
|
281 |
-
size=max(math.ceil(float(
|
282 |
s=''
|
283 |
elif s!='': break
|
284 |
if s=='': s='0'
|
285 |
if s=='': s='0'
|
286 |
-
size=max(math.ceil(float(
|
287 |
-
|
288 |
-
if isinstance(self.audio,numpy.ndarray): self.audio=numpy.ndarray.tolist(self.audio)
|
289 |
-
if self.beatmap.dtype!='int32': self.beatmap=self.beatmap.astype(int)
|
290 |
-
|
291 |
-
#beat=[]
|
292 |
-
#start=audio[:beatmap[0]]
|
293 |
-
#end=audio[beatmap[-1]:audio[-1]]
|
294 |
-
#for i in range(len(beatmap)-1):
|
295 |
-
# beat[i]=audio[beatmap[i]:beatmap[i+1]]
|
296 |
-
|
297 |
-
# audio is a tuple with l and r channels
|
298 |
-
#print(len(audio))
|
299 |
|
|
|
|
|
|
|
300 |
self.audio=(self.audio[0], self.audio[1])
|
301 |
-
|
|
|
302 |
result=(self.audio[0][:self.beatmap[0]],self.audio[1][:self.beatmap[0]])
|
303 |
beat=numpy.asarray([[],[]])
|
304 |
|
305 |
# size, iterations are integers
|
306 |
size=int(max(size//1, 1))
|
307 |
|
308 |
-
|
309 |
-
# add beat to the end
|
310 |
-
self.beatmap=numpy.unique(numpy.abs(numpy.append(self.beatmap, len(self.audio[0]))))
|
311 |
|
312 |
iterations=int(len(self.beatmap)//size)
|
313 |
|
314 |
if 'random' in pattern[0].lower():
|
315 |
import random
|
316 |
for i in range(len(self.beatmap)):
|
|
|
317 |
choice=random.randint(1,len(self.beatmap)-1)
|
318 |
for a in range(len(self.audio)):
|
319 |
-
|
320 |
-
|
321 |
-
|
|
|
|
|
322 |
self.audio = result
|
323 |
return
|
324 |
|
325 |
if 'reverse' in pattern[0].lower():
|
326 |
for a in range(len(self.audio)):
|
327 |
for i in list(reversed(range(len(self.beatmap))))[:-1]:
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
|
|
|
|
|
333 |
|
334 |
self.audio = result
|
335 |
return
|
336 |
|
337 |
#print(len(result[0]))
|
338 |
-
|
339 |
-
|
340 |
def beatswap_getnum(i: str, c: str):
|
341 |
if c in i:
|
342 |
try:
|
@@ -367,8 +353,8 @@ class song:
|
|
367 |
|
368 |
# If character is : - get start
|
369 |
elif s!='' and c==':':
|
370 |
-
#print ('Beat start:',s,'=',
|
371 |
-
try: st=self.beatmap[int(
|
372 |
except IndexError: break
|
373 |
s=''
|
374 |
|
@@ -377,18 +363,18 @@ class song:
|
|
377 |
|
378 |
# start already exists
|
379 |
if st is not None:
|
380 |
-
#print ('Beat end: ',s,'=',
|
381 |
try:
|
382 |
-
s=self.beatmap[int(
|
383 |
#print(s)
|
384 |
except IndexError: break
|
385 |
else:
|
386 |
# start doesn't exist
|
387 |
-
#print ('Beat start:',s,'=',
|
388 |
-
#print ('Beat end: ',s,'=',
|
389 |
try:
|
390 |
-
st=self.beatmap[int(
|
391 |
-
s=self.beatmap[int(
|
392 |
except IndexError: break
|
393 |
|
394 |
if st>s:
|
@@ -408,25 +394,25 @@ class song:
|
|
408 |
z=beatswap_getnum(i,'c')
|
409 |
if z is not None:
|
410 |
if z=='': beat[0],beat[1]=beat[1],beat[0]
|
411 |
-
elif
|
412 |
else:beat[1]*=0
|
413 |
|
414 |
# volume
|
415 |
z=beatswap_getnum(i,'v')
|
416 |
if z is not None:
|
417 |
if z=='': z='0'
|
418 |
-
beat*=
|
419 |
|
420 |
z=beatswap_getnum(i,'t')
|
421 |
if z is not None:
|
422 |
if z=='': z='2'
|
423 |
-
beat**=1/
|
424 |
|
425 |
# speed
|
426 |
z=beatswap_getnum(i,'s')
|
427 |
if z is not None:
|
428 |
if z=='': z='2'
|
429 |
-
z=
|
430 |
if z<1:
|
431 |
beat=numpy.asarray((numpy.repeat(beat[0],int(1//z)),numpy.repeat(beat[1],int(1//z))))
|
432 |
else:
|
@@ -436,7 +422,7 @@ class song:
|
|
436 |
z=beatswap_getnum(i,'b')
|
437 |
if z is not None:
|
438 |
if z=='': z='3'
|
439 |
-
z=1/
|
440 |
if z<1: beat=beat*z
|
441 |
beat=numpy.around(beat, max(int(z), 1))
|
442 |
if z<1: beat=beat/z
|
@@ -445,7 +431,7 @@ class song:
|
|
445 |
z=beatswap_getnum(i,'d')
|
446 |
if z is not None:
|
447 |
if z=='': z='3'
|
448 |
-
z=int(
|
449 |
beat=numpy.asarray((numpy.repeat(beat[0,::z],z),numpy.repeat(beat[1,::z],z)))
|
450 |
|
451 |
# convert to list
|
@@ -461,26 +447,52 @@ class song:
|
|
461 |
#print('Adding beat... a, s, st:', a, s, st, sep=', ')
|
462 |
#print(result[a][-1])
|
463 |
#print(beat[a][0])
|
464 |
-
|
465 |
-
|
|
|
|
|
466 |
#print(len(result[0]))
|
467 |
|
468 |
#
|
469 |
break
|
470 |
-
#print(time.process_time() - benchmark)
|
471 |
|
472 |
self.audio = result
|
|
|
473 |
|
474 |
def beatsample(self, audio2, shift=0):
|
|
|
475 |
try: l=len(audio2[0])
|
476 |
except (TypeError, IndexError):
|
477 |
l=len(audio2)
|
478 |
audio2=numpy.vstack((audio2,audio2))
|
479 |
for i in range(len(self.beatmap)):
|
|
|
480 |
try: self.audio[:,int(self.beatmap[i]) + int(float(shift) * (int(self.beatmap[i+1])-int(self.beatmap[i]))) : int(self.beatmap[i])+int(float(shift) * (int(self.beatmap[i+1])-int(self.beatmap[i])))+int(l)]+=audio2
|
481 |
except (IndexError, ValueError): pass
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
482 |
|
483 |
def sidechain(self, audio2, shift=0, smoothing=40):
|
|
|
484 |
try: l=len(audio2[0])
|
485 |
except (TypeError, IndexError):
|
486 |
l=len(audio2)
|
@@ -488,8 +500,9 @@ class song:
|
|
488 |
for i in range(len(self.beatmap)):
|
489 |
try: self.audio[:,int(self.beatmap[i])-smoothing + int(float(shift) * (int(self.beatmap[i+1])-int(self.beatmap[i]))) : int(self.beatmap[i])-smoothing+int(float(shift) * (int(self.beatmap[i+1])-int(self.beatmap[i])))+int(l)]*=audio2
|
490 |
except (IndexError, ValueError): break
|
|
|
491 |
|
492 |
-
def quick_beatswap(self, output:str='', pattern:str=None, scale:float=1, shift:float=0, start:float=0, end:float=None, autotrim:bool=True, autoscale:bool=False, autoinsert:bool=False, suffix:str='
|
493 |
"""Generates beatmap if it isn't generated, applies beatswapping to the song and writes the processed song it next to the .py file. If you don't want to write the file, set output=None
|
494 |
|
495 |
output: can be a relative or an absolute path to a folder or to a file. Filename will be created from the original filename + a suffix to avoid overwriting. If path already contains a filename which ends with audio file extension, such as .mp3, that filename will be used.
|
@@ -513,26 +526,53 @@ class song:
|
|
513 |
suffix: suffix that will be appended to the filename
|
514 |
|
515 |
lib: beat detection library"""
|
516 |
-
if self.
|
517 |
-
|
518 |
-
|
519 |
-
|
520 |
-
|
521 |
-
|
522 |
-
|
523 |
-
|
524 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
525 |
|
526 |
if output is not None:
|
527 |
if not (output.lower().endswith('.mp3') or output.lower().endswith('.wav') or output.lower().endswith('.flac') or output.lower().endswith('.ogg') or
|
528 |
output.lower().endswith('.aac') or output.lower().endswith('.ac3') or output.lower().endswith('.aiff') or output.lower().endswith('.wma')):
|
529 |
-
output=output+''.join(''.join(self.
|
530 |
-
|
531 |
|
532 |
-
self.beatmap=save
|
|
|
|
|
|
|
533 |
|
534 |
|
535 |
-
def quick_sidechain(self, output:str='', audio2:numpy.array=None, scale:float=1, shift:float=0, start:float=0, end:float=None, autotrim:bool=True, autoscale:bool=False, autoinsert:bool=False, filename2:str=None, suffix:str='
|
536 |
"""Generates beatmap if it isn't generated, applies fake sidechain on each beat to the song and writes the processed song it next to the .py file. If you don't want to write the file, set output=None
|
537 |
|
538 |
output: can be a relative or an absolute path to a folder or to a file. Filename will be created from the original filename + a suffix to avoid overwriting. If path already contains a filename which ends with audio file extension, such as .mp3, that filename will be used.
|
@@ -558,31 +598,40 @@ class song:
|
|
558 |
suffix: suffix that will be appended to the filename
|
559 |
|
560 |
lib: beat detection library"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
561 |
if filename2 is None and audio2 is None:
|
562 |
-
|
|
|
563 |
|
564 |
if audio2 is None:
|
565 |
audio2, samplerate2=open_audio(filename2)
|
566 |
|
567 |
-
if self.
|
568 |
-
if autotrim is True:
|
569 |
-
save=self.beatmap
|
570 |
-
if autoscale is True:
|
571 |
-
if shift!=0:
|
572 |
-
if scale!=1:
|
573 |
-
if autoinsert is True:
|
574 |
-
if start!=0 or end is not None:
|
575 |
-
|
576 |
|
577 |
if output is not None:
|
578 |
if not (output.lower().endswith('.mp3') or output.lower().endswith('.wav') or output.lower().endswith('.flac') or output.lower().endswith('.ogg') or
|
579 |
output.lower().endswith('.aac') or output.lower().endswith('.ac3') or output.lower().endswith('.aiff') or output.lower().endswith('.wma')):
|
580 |
-
output=output+''.join(''.join(self.
|
581 |
-
|
582 |
|
583 |
-
self.beatmap=save
|
|
|
584 |
|
585 |
-
def quick_beatsample(self, output:str='', filename2:str=None, scale:float=1, shift:float=0, start:float=0, end:float=None, autotrim:bool=True, autoscale:bool=False, autoinsert:bool=False, audio2:numpy.array=None, suffix:str='
|
586 |
"""Generates beatmap if it isn't generated, adds chosen sample to each beat of the song and writes the processed song it next to the .py file. If you don't want to write the file, set output=None
|
587 |
|
588 |
output: can be a relative or an absolute path to a folder or to a file. Filename will be created from the original filename + a suffix to avoid overwriting. If path already contains a filename which ends with audio file extension, such as .mp3, that filename will be used.
|
@@ -606,6 +655,13 @@ class song:
|
|
606 |
suffix: suffix that will be appended to the filename
|
607 |
|
608 |
lib: beat detection library"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
609 |
if filename2 is None and audio2 is None:
|
610 |
from tkinter.filedialog import askopenfilename
|
611 |
filename2 = askopenfilename(title='select sidechain impulse', filetypes=[("mp3", ".mp3"),("wav", ".wav"),("flac", ".flac"),("ogg", ".ogg"),("wma", ".wma")])
|
@@ -613,96 +669,97 @@ class song:
|
|
613 |
if audio2 is None:
|
614 |
audio2, samplerate2=open_audio(filename2)
|
615 |
|
616 |
-
if self.
|
617 |
-
if autotrim is True:
|
618 |
-
save=
|
619 |
-
if autoscale is True:
|
620 |
-
if shift!=0:
|
621 |
-
if scale!=1:
|
622 |
-
if autoinsert is True:
|
623 |
-
if start!=0 or end is not None:
|
624 |
-
|
625 |
|
626 |
if output is not None:
|
627 |
if not (output.lower().endswith('.mp3') or output.lower().endswith('.wav') or output.lower().endswith('.flac') or output.lower().endswith('.ogg') or
|
628 |
output.lower().endswith('.aac') or output.lower().endswith('.ac3') or output.lower().endswith('.aiff') or output.lower().endswith('.wma')):
|
629 |
-
output=output+''.join(''.join(self.
|
630 |
-
|
631 |
-
self.beatmap=save
|
632 |
-
|
633 |
-
|
634 |
-
|
635 |
-
|
636 |
-
|
|
|
|
|
637 |
|
638 |
-
def spectogram_audio(self):
|
639 |
-
import librosa
|
640 |
-
self.audio=librosa.feature.inverse.mel_to_audio(M=numpy.swapaxes(numpy.swapaxes(numpy.dstack(( self.spectogram[0,:,:], self.spectogram[1,:,:])), 0, 2), 1,2), sr=self.samplerate, hop_length=self.hop_length)
|
641 |
-
|
642 |
-
def write_image(self):
|
643 |
-
"""Turns song into an image based on beat positions. Currently semi-broken"""
|
644 |
-
import cv2
|
645 |
-
audio=self.audio[0].tolist()
|
646 |
-
height=len(audio)/len(self.beatmap)
|
647 |
-
width=len(self.beatmap)
|
648 |
-
height*=3
|
649 |
-
if height>width:
|
650 |
-
increase_length=int(height/width)
|
651 |
-
reduce_width=1
|
652 |
-
else:
|
653 |
-
reduce_width=int(width/height)
|
654 |
-
increase_length=1
|
655 |
-
increase_length/=10
|
656 |
-
reduce_width*=10
|
657 |
-
image=[audio[0:self.beatmap[0]]]
|
658 |
-
maximum=len(image)
|
659 |
-
for i in range(len(self.beatmap)-1):
|
660 |
-
image.append(audio[self.beatmap[i]:self.beatmap[i+1]])
|
661 |
-
maximum=max(maximum,len(image[i]))
|
662 |
-
for i in range(len(image)):
|
663 |
-
image[i].extend((maximum-len(image[i]))*[0])
|
664 |
-
image[i]=image[i][::reduce_width]
|
665 |
-
|
666 |
-
audio=self.audio[1].tolist()
|
667 |
-
image2=[audio[0:self.beatmap[0]]]
|
668 |
-
for i in range(len(self.beatmap)-1):
|
669 |
-
image2.append(audio[self.beatmap[i]:self.beatmap[i+1]])
|
670 |
-
for i in range(len(image2)):
|
671 |
-
image2[i].extend((maximum-len(image2[i]))*[0])
|
672 |
-
image2[i]=image2[i][::reduce_width]
|
673 |
-
print(len(image[i]), len(image2[i]))
|
674 |
-
|
675 |
-
image=numpy.asarray(image)*255
|
676 |
-
image2=numpy.asarray(image2)*255
|
677 |
-
image3=numpy.add(image, image2)/2
|
678 |
-
image,image2,image3=numpy.repeat(image,increase_length,axis=0),numpy.repeat(image2,increase_length,axis=0),numpy.repeat(image3,increase_length,axis=0)
|
679 |
-
image=cv2.merge([image.T,image2.T, image3.T])
|
680 |
-
|
681 |
-
#image=image.astype('uint8')
|
682 |
-
#image=cv2.resize(image, (0,0), fx=len(image))
|
683 |
-
cv2.imwrite('cv2_output.png', image)
|
684 |
|
685 |
def fix_beatmap(filename, lib='madmom.BeatDetectionProcessor', scale=1, shift=0):
|
|
|
|
|
|
|
686 |
track=song(filename)
|
687 |
-
track.
|
688 |
-
track.
|
689 |
-
track.beatmap_scale(scale)
|
690 |
-
id=hex(len(track.audio[0]))
|
691 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
692 |
if not os.path.exists('SavedBeatmaps'):
|
693 |
os.mkdir('SavedBeatmaps')
|
694 |
-
cacheDir="
|
695 |
-
|
696 |
-
|
697 |
-
|
|
|
|
|
|
|
|
|
698 |
|
699 |
def delete_beatmap(filename, lib='madmom.BeatDetectionProcessor'):
|
700 |
-
track=
|
701 |
-
|
702 |
import os
|
703 |
if not os.path.exists('SavedBeatmaps'):
|
704 |
os.mkdir('SavedBeatmaps')
|
705 |
-
cacheDir="SavedBeatmaps/" + ''.join(track.filename.split('/')[-1]) + "_"+lib+"_"+
|
706 |
-
|
707 |
-
|
708 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import numpy
|
2 |
numpy.set_printoptions(suppress=True)
|
3 |
+
from .beatmap import beatmap, hitmap
|
4 |
+
from .image import spectogram, beat_image
|
5 |
|
6 |
+
def _safer_eval(string:str) -> float:
|
7 |
+
if isinstance(string, str):
|
8 |
+
#print(''.join([i for i in string if i.isdecimal() or i in '.+-*/']))
|
9 |
+
string = eval(''.join([i for i in string if i.isdecimal() or i in '.+-*/']))
|
10 |
+
return string
|
11 |
+
|
12 |
+
def open_audio(filename=None, lib='auto') -> numpy.ndarray:
|
13 |
+
"""Opens audio from path, returns (audio, samplerate) tuple.
|
14 |
+
|
15 |
+
Audio is returned as an array with normal volume range between -1, 1.
|
16 |
+
|
17 |
+
Example of returned audio:
|
18 |
+
|
19 |
+
[
|
20 |
+
[0.35, -0.25, ... -0.15, -0.15],
|
21 |
+
|
22 |
+
[0.31, -0.21, ... -0.11, -0.07]
|
23 |
+
]"""
|
24 |
if filename is None:
|
25 |
from tkinter.filedialog import askopenfilename
|
26 |
filename = askopenfilename(title='select song', filetypes=[("mp3", ".mp3"),("wav", ".wav"),("flac", ".flac"),("ogg", ".ogg"),("wma", ".wma")])
|
27 |
filename=filename.replace('\\', '/')
|
28 |
if lib=='pedalboard.io':
|
29 |
+
import pedalboard.io
|
30 |
+
with pedalboard.io.AudioFile(filename) as f:
|
31 |
audio = f.read(f.frames)
|
32 |
samplerate = f.samplerate
|
33 |
elif lib=='librosa':
|
|
|
54 |
audio,samplerate=open_audio(filename, i)
|
55 |
break
|
56 |
except Exception as e:
|
57 |
+
print(f'open_audio with {i}: {e}')
|
58 |
+
if len(audio)<2: audio=[audio]
|
59 |
return audio,samplerate
|
60 |
|
61 |
|
62 |
+
def _outputfilename(output, filename, suffix=' (beatswap)', ext='mp3'):
|
|
|
|
|
|
|
|
|
63 |
if not (output.lower().endswith('.mp3') or output.lower().endswith('.wav') or output.lower().endswith('.flac') or output.lower().endswith('.ogg') or
|
64 |
output.lower().endswith('.aac') or output.lower().endswith('.ac3') or output.lower().endswith('.aiff') or output.lower().endswith('.wma')):
|
65 |
+
return output+'.'.join(''.join(filename.split('/')[-1]).split('.')[:-1])+suffix+'.'+ext
|
66 |
|
67 |
+
class song:
|
68 |
+
def __init__(self, path:str=None, audio:numpy.array=None, samplerate:int=None, bmap:list=None, caching=True, filename=None, copied=False, log=True):
|
69 |
+
"""song can be loaded from path to an audio file, or from a list/numpy array and samplerate. Audio array should have values from -1 to 1, multiple channels should be stacked vertically. Optionally you can provide your own beat map.
|
70 |
+
|
71 |
+
Song object has the following attributes:
|
72 |
|
73 |
+
path - file system path to load the audio file from. Can be absolute or relative.
|
74 |
+
|
75 |
+
audio - either a numpy array with shape=(channels, values) or a list with two lists. Audio is converted to list for certain operations to improve performance.
|
76 |
+
|
77 |
+
samplerate - integer, for example 44100. Determined automatically when audio is loaded from a path.
|
78 |
|
79 |
+
bmap - list of integers, with positions of each beat in samples.
|
|
|
80 |
|
81 |
+
caching = True - if True, generated beatmaps will be saved to SavedBeatmaps folder and loaded when the same audio file is opened again, instead of generating beatmap each time.
|
|
|
82 |
|
83 |
+
log = True - if True, minimal info about all operations will be printed.
|
84 |
+
"""
|
85 |
+
assert not (audio is not None and samplerate is None), 'If audio is provided, samplerate should be provided as well, for example samplerate=44100'
|
86 |
+
|
87 |
+
self.audio=audio
|
88 |
+
self.samplerate=samplerate
|
89 |
+
|
90 |
+
# ask for a path if audio isn't specified
|
91 |
+
if path is None and filename is None:
|
92 |
+
if audio is None:
|
93 |
+
from tkinter.filedialog import askopenfilename
|
94 |
+
self.path = askopenfilename(title='select song')
|
95 |
+
else:
|
96 |
+
# generate unique identifier for storing beatmap cache
|
97 |
+
audio_id = numpy.sum(audio[0][1000:2000]) if len(audio<2000) else numpy.sum(audio[1000:2000])
|
98 |
+
self.filename = 'unknown ' + str(hex(int(audio_id))) + ' ' + str(hex(int((audio_id%1)*(10**18))))
|
99 |
+
self.path = self.filename
|
100 |
+
print(self.filename)
|
101 |
else:
|
102 |
+
if path is None: self.path=filename
|
103 |
+
else: self.path=path
|
104 |
+
|
105 |
+
# load from zip
|
106 |
+
if self.path.lower().endswith('.zip'):
|
107 |
+
import shutil,os
|
108 |
+
if os.path.exists('BeatManipulator_TEMP'): shutil.rmtree('BeatManipulator_TEMP')
|
109 |
+
os.mkdir('BeatManipulator_TEMP')
|
110 |
+
shutil.unpack_archive(self.path, 'BeatManipulator_TEMP')
|
111 |
+
for root,dirs,files in os.walk('BeatManipulator_TEMP'):
|
112 |
+
for fname in files:
|
113 |
+
if fname.lower().endswith('.mp3') or fname.lower().endswith('.wav') or fname.lower().endswith('.ogg') or fname.lower().endswith('.flac'):
|
114 |
+
self.audio, self.samplerate=open_audio(root.replace('\\','/')+'/'+fname)
|
115 |
+
stop=True
|
116 |
+
break
|
117 |
+
if stop is True: break
|
118 |
+
shutil.rmtree('BeatManipulator_TEMP')
|
119 |
+
|
120 |
+
# open audio from path
|
121 |
+
if self.audio is None or self.samplerate is None:
|
122 |
+
self.audio, self.samplerate=open_audio(self.path)
|
123 |
+
|
124 |
+
# mono to stereo
|
125 |
+
if len(self.audio)>16:
|
126 |
+
self.audio=numpy.asarray((self.audio,self.audio))
|
127 |
+
|
128 |
+
# stuff
|
129 |
+
self.path=self.path.replace('\\', '/')
|
130 |
+
if filename is None: self.filename=self.path.split('/')[-1]
|
131 |
+
else: self.filename=filename.replace('\\', '/').split('/')[-1]
|
132 |
self.samplerate=int(self.samplerate)
|
133 |
+
|
134 |
+
# artist, title
|
135 |
+
if ' - ' in self.path.split('/')[-1]:
|
136 |
+
self.artist = self.path.split('/')[-1].split(' - ')[0]
|
137 |
+
self.title= '.'.join(self.path.split('/')[-1].split(' - ')[1].split('.')[:-1])
|
138 |
+
elif path is not None or filename is not None:
|
139 |
+
self.title=''.join(self.path.split('/')[-1].split('.')[:-1])
|
140 |
+
self.artist=None
|
141 |
+
else:
|
142 |
+
self.title = None
|
143 |
+
self.artist = None
|
144 |
+
self.caching=caching
|
145 |
+
self.log=log
|
146 |
+
if copied is False and self.log is True:
|
147 |
+
if self.artist is not None or self.title is not None: print(f'Loaded {self.artist} - {self.title}; ')
|
148 |
+
elif filename is not None: print(f'Loaded {self.filename}; ')
|
149 |
+
elif path is not None: print(f'Loaded {self.path}; ')
|
150 |
+
else: print(f'Loaded audio file; ')
|
151 |
+
self.audio_isarray = True
|
152 |
+
|
153 |
+
if isinstance(bmap, beatmap): self.beatmap=bmap
|
154 |
+
else: self.beatmap = beatmap(beatmap = bmap, audio= self.audio, samplerate=self.samplerate, filename=self.filename, caching = caching, log=log, path=self.path, artist=self.artist, title=self.title)
|
155 |
+
self.hitmap = hitmap(audio= self.audio, samplerate=self.samplerate, filename=self.filename, caching = caching, log=log, path=self.path, artist=self.artist, title=self.title)
|
156 |
+
self.spectogram = spectogram(audio=self.audio, samplerate=self.samplerate, beatmap=self.beatmap, log=self.log)
|
157 |
+
self.beat_image = beat_image(audio=self.audio, samplerate=self.samplerate, beatmap=self.beatmap, log=self.log)
|
158 |
+
|
159 |
+
@property
|
160 |
+
def bm(self):
|
161 |
+
return self.beatmap.beatmap
|
162 |
+
|
163 |
+
@property
|
164 |
+
def hm(self):
|
165 |
+
return self.hitmap.beatmap
|
166 |
+
|
167 |
+
def _printlog(self, string, end=None, force = False, forcei = False):
|
168 |
+
if (self.log is True or force is True) and forcei is False:
|
169 |
+
if end is None: print(string)
|
170 |
+
else:print(string,end=end)
|
171 |
|
172 |
+
def _audio_tolist(self, force = True):
|
173 |
+
if self.audio_isarray:
|
174 |
+
self.audio = self.audio.tolist()
|
175 |
+
self.audio_isarray = False
|
176 |
+
elif force is True:
|
177 |
+
self.audio = self.audio.tolist()
|
178 |
+
self.audio_isarray = False
|
179 |
+
|
180 |
+
def _audio_toarray(self, force = True):
|
181 |
+
if not self.audio_isarray:
|
182 |
+
self.audio = numpy.asarray(self.audio)
|
183 |
+
self.audio_isarray = True
|
184 |
+
elif force is True:
|
185 |
+
self.audio = numpy.asarray(self.audio)
|
186 |
+
self.audio_isarray = True
|
187 |
+
|
188 |
+
def _update(self):
|
189 |
+
self.beatmap.audio = self.audio
|
190 |
+
self.hitmap.audio = self.audio
|
191 |
+
self.spectogram.audio = self.audio
|
192 |
+
self.beat_image.audio = self.audio
|
193 |
+
self.beat_image.beatmap = self.bm
|
194 |
+
|
195 |
+
def write(self, output:str, lib:str='auto', libs=('pedalboard.io', 'soundfile')):
|
196 |
+
""""writes audio to path specified by output. Path should end with file extension, for example `folder/audio.mp3`"""
|
197 |
+
self._audio_toarray()
|
198 |
+
if lib!='auto': self._printlog(f'writing {output} with {lib}')
|
199 |
if lib=='pedalboard.io':
|
|
|
200 |
#print(audio)
|
201 |
+
import pedalboard.io
|
202 |
+
with pedalboard.io.AudioFile(output, 'w', self.samplerate, self.audio.shape[0]) as f:
|
203 |
f.write(self.audio)
|
204 |
elif lib=='soundfile':
|
|
|
205 |
audio=self.audio.T
|
206 |
import soundfile
|
207 |
soundfile.write(output, audio, self.samplerate)
|
208 |
del audio
|
209 |
elif lib=='auto':
|
210 |
+
for i in libs:
|
211 |
try:
|
212 |
+
self.write(output, i)
|
213 |
break
|
214 |
except Exception as e:
|
215 |
print(e)
|
|
|
223 |
# output = output + '.' + format
|
224 |
# song.export(output, format=format)
|
225 |
|
226 |
+
# def generate_beatmap(self, lib='madmom.BeatDetectionProcessor', split=None):
|
227 |
+
# self.beatmap = beatmap(beatmap=None, samplerate=self.samplerate, length=len(self.audio[0]),caching=self.caching,log=self.log)
|
228 |
+
# self.beatmap.generate(audio=self.audio, samplerate=self.samplerate, lib=lib, caching=self.caching, split=split, filename=self.filename)
|
229 |
+
|
230 |
+
# def generate_hitmap(self, lib='madmom.BeatDetectionProcessor'):
|
231 |
+
# self.hitmap=hitmap(beatmap=None, samplerate=self.samplerate, length = len(self.audio), caching=self.caching, log=self.log)
|
232 |
+
# self.hitmap.generate(audio=self.audio, samplerate=self.samplerate, lib=lib, caching=self.caching, filename=self.filename)
|
233 |
+
|
234 |
+
def generate_osu_beatmap(self, difficulties = [0.2, 0.1, 0.08, 0.06, 0.04, 0.02, 0.01, 0.005]):
|
235 |
+
self.hitmap.osu(self, difficulties = difficulties)
|
236 |
+
import shutil, os
|
237 |
+
if self.path is not None:
|
238 |
+
shutil.copyfile(self.path, 'BeatManipulator_TEMP/'+self.path.split('/')[-1])
|
239 |
+
else: self.write('BeatManipulator_TEMP/audio.mp3')
|
240 |
+
shutil.make_archive('BeatManipulator_TEMP', 'zip', 'BeatManipulator_TEMP')
|
241 |
+
os.rename('BeatManipulator_TEMP.zip', _outputfilename('', self.path, '_'+self.hm, 'osz'))
|
242 |
+
shutil.rmtree('BeatManipulator_TEMP')
|
243 |
+
|
244 |
+
def autotrim(self):
|
245 |
+
self._printlog(f'autotrimming; ')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
246 |
n=0
|
247 |
for i in self.audio[0]:
|
248 |
if i>=0.0001:break
|
249 |
n+=1
|
250 |
+
if type(self.audio) is tuple or list: self.audio = numpy.asarray(self.audio)
|
251 |
self.audio = numpy.asarray([self.audio[0,n:], self.audio[1,n:]])
|
252 |
+
if self.bm is not None:
|
253 |
+
self.beatmap.beatmap=numpy.absolute(self.beatmap.beatmap-n)
|
254 |
+
if self.hm is not None:
|
255 |
+
print(self.hm)
|
256 |
+
self.hitmap.beatmap=numpy.absolute(self.hitmap.beatmap-n)
|
257 |
+
self._update()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
258 |
|
259 |
def beatswap(self, pattern: str, sep=',', smoothing=40, smoothing_mode='replace'):
|
260 |
import math, numpy
|
|
|
262 |
size=0
|
263 |
#cut processing??? not worth it, it is really fast anyways
|
264 |
pattern=pattern.replace(' ', '').split(sep)
|
265 |
+
self._printlog(f"beatswapping with {' '.join(pattern)}; ")
|
266 |
for j in pattern:
|
267 |
s=''
|
268 |
if '?' not in j:
|
|
|
270 |
if i.isdigit() or i=='.' or i=='-' or i=='/' or i=='+' or i=='%': s=str(s)+str(i)
|
271 |
elif i==':':
|
272 |
if s=='': s='0'
|
273 |
+
#print(s, _safer_eval(s))
|
274 |
+
size=max(math.ceil(float(_safer_eval(s))), size)
|
275 |
s=''
|
276 |
elif s!='': break
|
277 |
if s=='': s='0'
|
278 |
if s=='': s='0'
|
279 |
+
size=max(math.ceil(float(_safer_eval(s))), size)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
280 |
|
281 |
+
self._audio_tolist()
|
282 |
+
self.beatmap._toarray()
|
283 |
+
# turns audio into a tuple with L and R channels
|
284 |
self.audio=(self.audio[0], self.audio[1])
|
285 |
+
|
286 |
+
# adds the part before the first beat
|
287 |
result=(self.audio[0][:self.beatmap[0]],self.audio[1][:self.beatmap[0]])
|
288 |
beat=numpy.asarray([[],[]])
|
289 |
|
290 |
# size, iterations are integers
|
291 |
size=int(max(size//1, 1))
|
292 |
|
293 |
+
self.beatmap._add_beat_to_end()
|
|
|
|
|
294 |
|
295 |
iterations=int(len(self.beatmap)//size)
|
296 |
|
297 |
if 'random' in pattern[0].lower():
|
298 |
import random
|
299 |
for i in range(len(self.beatmap)):
|
300 |
+
|
301 |
choice=random.randint(1,len(self.beatmap)-1)
|
302 |
for a in range(len(self.audio)):
|
303 |
+
try:
|
304 |
+
beat=self.audio[a][self.beatmap[choice-1]:self.beatmap[choice]-smoothing]
|
305 |
+
if smoothing>0: result[a].extend(numpy.linspace(result[a][-1],beat[0],smoothing))
|
306 |
+
result[a].extend(beat)
|
307 |
+
except IndexError: pass
|
308 |
self.audio = result
|
309 |
return
|
310 |
|
311 |
if 'reverse' in pattern[0].lower():
|
312 |
for a in range(len(self.audio)):
|
313 |
for i in list(reversed(range(len(self.beatmap))))[:-1]:
|
314 |
+
try:
|
315 |
+
beat=self.audio[a][self.beatmap[i-1]:self.beatmap[i]-smoothing]
|
316 |
+
#print(self.beatmap[i-1],self.beatmap[i])
|
317 |
+
#print(result[a][-1], beat[0])
|
318 |
+
if smoothing>0: result[a].extend(numpy.linspace(result[a][-1],beat[0],smoothing))
|
319 |
+
result[a].extend(beat)
|
320 |
+
except IndexError: pass
|
321 |
|
322 |
self.audio = result
|
323 |
return
|
324 |
|
325 |
#print(len(result[0]))
|
|
|
|
|
326 |
def beatswap_getnum(i: str, c: str):
|
327 |
if c in i:
|
328 |
try:
|
|
|
353 |
|
354 |
# If character is : - get start
|
355 |
elif s!='' and c==':':
|
356 |
+
#print ('Beat start:',s,'=', _safer_eval(s),'=',int(_safer_eval(s)//1), '+',j,'*',size,' =',int(_safer_eval(s)//1)+j*size, ', mod=',_safer_eval(s)%1)
|
357 |
+
try: st=self.beatmap[int(_safer_eval(s)//1)+j*size ] + _safer_eval(s)%1* (self.beatmap[int(_safer_eval(s)//1)+j*size +1] - self.beatmap[int(_safer_eval(s)//1)+j*size])
|
358 |
except IndexError: break
|
359 |
s=''
|
360 |
|
|
|
363 |
|
364 |
# start already exists
|
365 |
if st is not None:
|
366 |
+
#print ('Beat end: ',s,'=', _safer_eval(s),'=',int(_safer_eval(s)//1), '+',j,'*',size,' =',int(_safer_eval(s)//1)+j*size, ', mod=',_safer_eval(s)%1)
|
367 |
try:
|
368 |
+
s=self.beatmap[int(_safer_eval(s)//1)+j*size ] + _safer_eval(s)%1* (self.beatmap[int(_safer_eval(s)//1)+j*size +1] - self.beatmap[int(_safer_eval(s)//1)+j*size])
|
369 |
#print(s)
|
370 |
except IndexError: break
|
371 |
else:
|
372 |
# start doesn't exist
|
373 |
+
#print ('Beat start:',s,'=', _safer_eval(s),'=',int(_safer_eval(s)//1), '+',j,'*',size,'- 1 =',int(_safer_eval(s)//1)+j*size, ', mod=',_safer_eval(s)%1)
|
374 |
+
#print ('Beat end: ',s,'=', _safer_eval(s),'=',int(_safer_eval(s)//1), '+',j,'*',size,' =',int(_safer_eval(s)//1)+j*size+1, ', mod=',_safer_eval(s)%1)
|
375 |
try:
|
376 |
+
st=self.beatmap[int(_safer_eval(s)//1)+j*size-1 ] + _safer_eval(s)%1* (self.beatmap[int(_safer_eval(s)//1)+j*size +1] - self.beatmap[int(_safer_eval(s)//1)+j*size])
|
377 |
+
s=self.beatmap[int(_safer_eval(s)//1)+j*size ] + _safer_eval(s)%1* (self.beatmap[int(_safer_eval(s)//1)+j*size +1] - self.beatmap[int(_safer_eval(s)//1)+j*size])
|
378 |
except IndexError: break
|
379 |
|
380 |
if st>s:
|
|
|
394 |
z=beatswap_getnum(i,'c')
|
395 |
if z is not None:
|
396 |
if z=='': beat[0],beat[1]=beat[1],beat[0]
|
397 |
+
elif _safer_eval(z)==0:beat[0]*=0
|
398 |
else:beat[1]*=0
|
399 |
|
400 |
# volume
|
401 |
z=beatswap_getnum(i,'v')
|
402 |
if z is not None:
|
403 |
if z=='': z='0'
|
404 |
+
beat*=_safer_eval(z)
|
405 |
|
406 |
z=beatswap_getnum(i,'t')
|
407 |
if z is not None:
|
408 |
if z=='': z='2'
|
409 |
+
beat**=1/_safer_eval(z)
|
410 |
|
411 |
# speed
|
412 |
z=beatswap_getnum(i,'s')
|
413 |
if z is not None:
|
414 |
if z=='': z='2'
|
415 |
+
z=_safer_eval(z)
|
416 |
if z<1:
|
417 |
beat=numpy.asarray((numpy.repeat(beat[0],int(1//z)),numpy.repeat(beat[1],int(1//z))))
|
418 |
else:
|
|
|
422 |
z=beatswap_getnum(i,'b')
|
423 |
if z is not None:
|
424 |
if z=='': z='3'
|
425 |
+
z=1/_safer_eval(z)
|
426 |
if z<1: beat=beat*z
|
427 |
beat=numpy.around(beat, max(int(z), 1))
|
428 |
if z<1: beat=beat/z
|
|
|
431 |
z=beatswap_getnum(i,'d')
|
432 |
if z is not None:
|
433 |
if z=='': z='3'
|
434 |
+
z=int(_safer_eval(z))
|
435 |
beat=numpy.asarray((numpy.repeat(beat[0,::z],z),numpy.repeat(beat[1,::z],z)))
|
436 |
|
437 |
# convert to list
|
|
|
447 |
#print('Adding beat... a, s, st:', a, s, st, sep=', ')
|
448 |
#print(result[a][-1])
|
449 |
#print(beat[a][0])
|
450 |
+
try:
|
451 |
+
if smoothing>0: result[a].extend(numpy.linspace(result[a][-1],beat[a][0],smoothing))
|
452 |
+
result[a].extend(beat[a])
|
453 |
+
except IndexError: pass
|
454 |
#print(len(result[0]))
|
455 |
|
456 |
#
|
457 |
break
|
|
|
458 |
|
459 |
self.audio = result
|
460 |
+
self._update()
|
461 |
|
462 |
def beatsample(self, audio2, shift=0):
|
463 |
+
self._printlog(f'beatsample; ')
|
464 |
try: l=len(audio2[0])
|
465 |
except (TypeError, IndexError):
|
466 |
l=len(audio2)
|
467 |
audio2=numpy.vstack((audio2,audio2))
|
468 |
for i in range(len(self.beatmap)):
|
469 |
+
#print(self.beatmap[i])
|
470 |
try: self.audio[:,int(self.beatmap[i]) + int(float(shift) * (int(self.beatmap[i+1])-int(self.beatmap[i]))) : int(self.beatmap[i])+int(float(shift) * (int(self.beatmap[i+1])-int(self.beatmap[i])))+int(l)]+=audio2
|
471 |
except (IndexError, ValueError): pass
|
472 |
+
self._update()
|
473 |
+
|
474 |
+
def hitsample(self, audio2=None):
|
475 |
+
self._printlog(f'hitsample; ')
|
476 |
+
from . import generate
|
477 |
+
if audio2 is None:audio2=generate.saw(0.05, 1000, self.samplerate)
|
478 |
+
try: l=len(audio2[0])
|
479 |
+
except (TypeError, IndexError):
|
480 |
+
l=len(audio2)
|
481 |
+
audio2=numpy.vstack((audio2,audio2))
|
482 |
+
#print(self.audio)
|
483 |
+
self.audio=numpy.array(self.audio).copy()
|
484 |
+
#print(self.audio)
|
485 |
+
for i in range(len(self.hitmap)):
|
486 |
+
try:
|
487 |
+
#print('before', self.audio[:,int(self.hitmap[i])])
|
488 |
+
self.audio[:,int(self.hitmap[i]) : int(self.hitmap[i]+l)]+=audio2
|
489 |
+
#print('after ', self.audio[:,int(self.hitmap[i])])
|
490 |
+
#print(self.hitmap[i])
|
491 |
+
except (IndexError, ValueError): pass
|
492 |
+
self._update()
|
493 |
|
494 |
def sidechain(self, audio2, shift=0, smoothing=40):
|
495 |
+
self._printlog(f'sidechain; ')
|
496 |
try: l=len(audio2[0])
|
497 |
except (TypeError, IndexError):
|
498 |
l=len(audio2)
|
|
|
500 |
for i in range(len(self.beatmap)):
|
501 |
try: self.audio[:,int(self.beatmap[i])-smoothing + int(float(shift) * (int(self.beatmap[i+1])-int(self.beatmap[i]))) : int(self.beatmap[i])-smoothing+int(float(shift) * (int(self.beatmap[i+1])-int(self.beatmap[i])))+int(l)]*=audio2
|
502 |
except (IndexError, ValueError): break
|
503 |
+
self._update()
|
504 |
|
505 |
+
def quick_beatswap(self, output:str='', pattern:str=None, scale:float=1, shift:float=0, start:float=0, end:float=None, autotrim:bool=True, autoscale:bool=False, autoinsert:bool=False, suffix:str=' (beatswap)', lib:str='madmom.BeatDetectionProcessor', log = True):
|
506 |
"""Generates beatmap if it isn't generated, applies beatswapping to the song and writes the processed song it next to the .py file. If you don't want to write the file, set output=None
|
507 |
|
508 |
output: can be a relative or an absolute path to a folder or to a file. Filename will be created from the original filename + a suffix to avoid overwriting. If path already contains a filename which ends with audio file extension, such as .mp3, that filename will be used.
|
|
|
526 |
suffix: suffix that will be appended to the filename
|
527 |
|
528 |
lib: beat detection library"""
|
529 |
+
if log is False and self.log is True:
|
530 |
+
self.log = False
|
531 |
+
self.beatmap.log=False
|
532 |
+
log_disabled = True
|
533 |
+
else: log_disabled = False
|
534 |
+
self._printlog('___')
|
535 |
+
scale = _safer_eval(scale)
|
536 |
+
shift = _safer_eval(shift)
|
537 |
+
if self.bm is None: self.beatmap.generate(lib=lib)
|
538 |
+
if autotrim is True: self.autotrim()
|
539 |
+
save=self.beatmap.beatmap.copy()
|
540 |
+
if autoscale is True: self.beatmap.autoscale()
|
541 |
+
if shift!=0: self.beatmap.shift(shift)
|
542 |
+
if scale!=1: self.beatmap.scale(scale)
|
543 |
+
if autoinsert is True: self.beatmap.autoinsert()
|
544 |
+
if start!=0 or end is not None: self.beatmap.cut(start, end)
|
545 |
+
self._printlog(f'pattern = {pattern}')
|
546 |
+
if 'test' in pattern.lower():
|
547 |
+
self.audio*=0.7
|
548 |
+
self.beatmap.beatmap=save.copy()
|
549 |
+
if autoinsert is True: self.beatmap.autoinsert()
|
550 |
+
if start!=0 or end is not None: self.beatmap.cut(start, end)
|
551 |
+
audio2, samplerate2=open_audio('samples/cowbell.flac')
|
552 |
+
song.quick_beatsample(self, output=None, audio2=list(i[::3] for i in audio2), scale=8*scale, shift=0+shift, log=log)
|
553 |
+
song.quick_beatsample(self, output=None, audio2=list(i[::2] for i in audio2), scale=8*scale, shift=1*scale+shift, log=log)
|
554 |
+
song.quick_beatsample(self, output=None, audio2=audio2, scale=8*scale, shift=2*scale+shift, log=log)
|
555 |
+
song.quick_beatsample(self, output=None, audio2=numpy.repeat(audio2,2,axis=1), scale=8*scale, shift=3*scale+shift, log=log)
|
556 |
+
song.quick_beatsample(self, output=None, audio2=numpy.repeat(audio2,3,axis=1), scale=8*scale, shift=4*scale+shift, log=log)
|
557 |
+
song.quick_beatsample(self, output=None, audio2=numpy.repeat(audio2,2,axis=1), scale=8*scale, shift=5*scale+shift, log=log)
|
558 |
+
song.quick_beatsample(self, output=None, audio2=audio2, scale=8*scale, shift=6*scale+shift, log=log)
|
559 |
+
song.quick_beatsample(self, output=None, audio2=list(i[::2] for i in audio2), scale=8*scale, shift=7*scale+shift, log=log)
|
560 |
+
|
561 |
+
else: self.beatswap(pattern)
|
562 |
|
563 |
if output is not None:
|
564 |
if not (output.lower().endswith('.mp3') or output.lower().endswith('.wav') or output.lower().endswith('.flac') or output.lower().endswith('.ogg') or
|
565 |
output.lower().endswith('.aac') or output.lower().endswith('.ac3') or output.lower().endswith('.aiff') or output.lower().endswith('.wma')):
|
566 |
+
output=output+'.'.join(''.join(self.path.split('/')[-1]).split('.')[:-1])+suffix+'.mp3'
|
567 |
+
self.write(output)
|
568 |
|
569 |
+
self.beatmap.beatmap=save.copy()
|
570 |
+
if log_disabled is True:
|
571 |
+
self.log = True
|
572 |
+
self.beatmap.log=True
|
573 |
|
574 |
|
575 |
+
def quick_sidechain(self, output:str='', audio2:numpy.array=None, scale:float=1, shift:float=0, start:float=0, end:float=None, autotrim:bool=True, autoscale:bool=False, autoinsert:bool=False, filename2:str=None, suffix:str=' (sidechain)', lib:str='madmom.BeatDetectionProcessor', log=True):
|
576 |
"""Generates beatmap if it isn't generated, applies fake sidechain on each beat to the song and writes the processed song it next to the .py file. If you don't want to write the file, set output=None
|
577 |
|
578 |
output: can be a relative or an absolute path to a folder or to a file. Filename will be created from the original filename + a suffix to avoid overwriting. If path already contains a filename which ends with audio file extension, such as .mp3, that filename will be used.
|
|
|
598 |
suffix: suffix that will be appended to the filename
|
599 |
|
600 |
lib: beat detection library"""
|
601 |
+
if log is False and self.log is True:
|
602 |
+
self.log = False
|
603 |
+
log_disabled = True
|
604 |
+
else: log_disabled = False
|
605 |
+
self._printlog('___')
|
606 |
+
scale = _safer_eval(scale)
|
607 |
+
shift = _safer_eval(shift)
|
608 |
if filename2 is None and audio2 is None:
|
609 |
+
from . import generate
|
610 |
+
audio2=generate.sidechain()
|
611 |
|
612 |
if audio2 is None:
|
613 |
audio2, samplerate2=open_audio(filename2)
|
614 |
|
615 |
+
if self.bm is None: self.beatmap.generate(lib=lib)
|
616 |
+
if autotrim is True: self.autotrim()
|
617 |
+
save=self.beatmap.beatmap.copy()
|
618 |
+
if autoscale is True: self.beatmap.autoscale()
|
619 |
+
if shift!=0: self.beatmap.shift(shift)
|
620 |
+
if scale!=1: self.beatmap.scale(scale)
|
621 |
+
if autoinsert is True: self.beatmap.autoinsert()
|
622 |
+
if start!=0 or end is not None: self.beatmap.cut(start, end)
|
623 |
+
self.sidechain(audio2)
|
624 |
|
625 |
if output is not None:
|
626 |
if not (output.lower().endswith('.mp3') or output.lower().endswith('.wav') or output.lower().endswith('.flac') or output.lower().endswith('.ogg') or
|
627 |
output.lower().endswith('.aac') or output.lower().endswith('.ac3') or output.lower().endswith('.aiff') or output.lower().endswith('.wma')):
|
628 |
+
output=output+'.'.join(''.join(self.path.split('/')[-1]).split('.')[:-1])+suffix+'.mp3'
|
629 |
+
self.write(output)
|
630 |
|
631 |
+
self.beatmap.beatmap=save.copy()
|
632 |
+
if log_disabled is True: self.log = True
|
633 |
|
634 |
+
def quick_beatsample(self, output:str='', filename2:str=None, scale:float=1, shift:float=0, start:float=0, end:float=None, autotrim:bool=True, autoscale:bool=False, autoinsert:bool=False, audio2:numpy.array=None, suffix:str=' (BeatSample)', lib:str='madmom.BeatDetectionProcessor', log=True):
|
635 |
"""Generates beatmap if it isn't generated, adds chosen sample to each beat of the song and writes the processed song it next to the .py file. If you don't want to write the file, set output=None
|
636 |
|
637 |
output: can be a relative or an absolute path to a folder or to a file. Filename will be created from the original filename + a suffix to avoid overwriting. If path already contains a filename which ends with audio file extension, such as .mp3, that filename will be used.
|
|
|
655 |
suffix: suffix that will be appended to the filename
|
656 |
|
657 |
lib: beat detection library"""
|
658 |
+
if log is False and self.log is True:
|
659 |
+
self.log = False
|
660 |
+
log_disabled = True
|
661 |
+
else: log_disabled = False
|
662 |
+
self._printlog('___')
|
663 |
+
scale = _safer_eval(scale)
|
664 |
+
shift = _safer_eval(shift)
|
665 |
if filename2 is None and audio2 is None:
|
666 |
from tkinter.filedialog import askopenfilename
|
667 |
filename2 = askopenfilename(title='select sidechain impulse', filetypes=[("mp3", ".mp3"),("wav", ".wav"),("flac", ".flac"),("ogg", ".ogg"),("wma", ".wma")])
|
|
|
669 |
if audio2 is None:
|
670 |
audio2, samplerate2=open_audio(filename2)
|
671 |
|
672 |
+
if self.bm is None: self.beatmap.generate(lib=lib)
|
673 |
+
if autotrim is True: self.autotrim()
|
674 |
+
save=self.beatmap.beatmap.copy()
|
675 |
+
if autoscale is True: self.beatmap.autoscale()
|
676 |
+
if shift!=0: self.beatmap.shift(shift)
|
677 |
+
if scale!=1: self.beatmap.scale(scale)
|
678 |
+
if autoinsert is True: self.beatmap.autoinsert()
|
679 |
+
if start!=0 or end is not None: self.beatmap.cut(start, end)
|
680 |
+
self.beatsample(audio2)
|
681 |
|
682 |
if output is not None:
|
683 |
if not (output.lower().endswith('.mp3') or output.lower().endswith('.wav') or output.lower().endswith('.flac') or output.lower().endswith('.ogg') or
|
684 |
output.lower().endswith('.aac') or output.lower().endswith('.ac3') or output.lower().endswith('.aiff') or output.lower().endswith('.wma')):
|
685 |
+
output=output+'.'.join(''.join(self.path.split('/')[-1]).split('.')[:-1])+suffix+'.mp3'
|
686 |
+
self.write(output)
|
687 |
+
self.beatmap.beatmap=save.copy()
|
688 |
+
if log_disabled is True: self.log = True
|
689 |
+
|
690 |
+
def spectogram_to_audio(self):
|
691 |
+
self.audio = self.spectogram.toaudio()
|
692 |
+
|
693 |
+
def beat_image_to_audio(self):
|
694 |
+
self.audio = self.beat_image.toaudio()
|
695 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
696 |
|
697 |
def fix_beatmap(filename, lib='madmom.BeatDetectionProcessor', scale=1, shift=0):
|
698 |
+
if scale==1 and shift==0:
|
699 |
+
print('scale = 1, shift = 0: no changes have been made.')
|
700 |
+
return
|
701 |
track=song(filename)
|
702 |
+
audio_id=hex(len(track.audio[0]))
|
703 |
+
cacheDir="SavedBeatmaps/" + ''.join(track.filename.split('/')[-1]) + "_"+lib+"_"+audio_id+'.txt'
|
|
|
|
|
704 |
import os
|
705 |
+
if not os.path.exists(cacheDir):
|
706 |
+
print(f"beatmap isn't generated: {filename}")
|
707 |
+
return
|
708 |
+
track.beatmap.generate(lib=lib)
|
709 |
+
track.beatmap.shift(shift)
|
710 |
+
track.beatmap.scale(scale)
|
711 |
if not os.path.exists('SavedBeatmaps'):
|
712 |
os.mkdir('SavedBeatmaps')
|
713 |
+
a=input(f'Are you sure you want to overwrite {cacheDir} using scale = {scale}; shift = {shift}? ("y" to continue): ')
|
714 |
+
if 'n' in a.lower() or not 'y' in a.lower():
|
715 |
+
print('Operation canceled.')
|
716 |
+
return
|
717 |
+
else:
|
718 |
+
track.beatmap._toarray()
|
719 |
+
numpy.savetxt(cacheDir, track.bm.astype(int), fmt='%d')
|
720 |
+
print('Beatmap overwritten.')
|
721 |
|
722 |
def delete_beatmap(filename, lib='madmom.BeatDetectionProcessor'):
|
723 |
+
track=song(filename)
|
724 |
+
audio_id=hex(len(track.audio[0]))
|
725 |
import os
|
726 |
if not os.path.exists('SavedBeatmaps'):
|
727 |
os.mkdir('SavedBeatmaps')
|
728 |
+
cacheDir="SavedBeatmaps/" + ''.join(track.filename.split('/')[-1]) + "_"+lib+"_"+audio_id+'.txt'
|
729 |
+
if not os.path.exists(cacheDir):
|
730 |
+
print(f"beatmap doesn't exist: {filename}")
|
731 |
+
return
|
732 |
+
a=input(f'Are you sure you want to delete {cacheDir}? ("y" to continue): ')
|
733 |
+
if 'n' in a.lower() or not 'y' in a.lower():
|
734 |
+
print('Operation canceled.')
|
735 |
+
return
|
736 |
+
else:
|
737 |
+
os.remove(cacheDir)
|
738 |
+
print('Beatmap deleted.')
|
739 |
+
|
740 |
+
|
741 |
+
def _tosong(audio, bmap, samplerate, log):
|
742 |
+
from .wrapper import _song_copy
|
743 |
+
if isinstance(audio, str) or audio is None: audio = song(audio, bmap=bmap, log = log)
|
744 |
+
elif isinstance(audio, list) or isinstance(audio, numpy.ndarray) or isinstance(audio, tuple):
|
745 |
+
assert samplerate is not None, "If audio is an array, samplerate must be provided"
|
746 |
+
if len(audio)>16 and isinstance(audio[0], list) or isinstance(audio[0], numpy.ndarray) or isinstance(audio[0], tuple):
|
747 |
+
audio = numpy.asarray(audio).T
|
748 |
+
audio = song(audio=audio, samplerate=samplerate, bmap=bmap, log = log)
|
749 |
+
elif isinstance(audio, song):
|
750 |
+
audio = _song_copy(audio)
|
751 |
+
audio.log, audio.beatmap.log, audio.beat_image.log = log, log, log
|
752 |
+
else: assert False, f"Audio should be either a path to a file, a list/array/tuple, a beat_manipulator.song object, or None for a pick file dialogue, but it is {type(audio)}"
|
753 |
+
return audio
|
754 |
+
|
755 |
+
def beatswap(pattern: str, audio = None, scale: float = 1, shift: float = 0, output='', samplerate = None, bmap = None, log = True, suffix=' (beatswap)'):
|
756 |
+
audio = _tosong(audio=audio, bmap=bmap, samplerate=samplerate, log=log)
|
757 |
+
output = _outputfilename(output=output, filename=audio.path, suffix=suffix)
|
758 |
+
audio.quick_beatswap(pattern = pattern, scale=scale, shift=shift, output=output)
|
759 |
+
|
760 |
+
def generate_beat_image(audio = None, output='', samplerate = None, bmap = None, log = True, ext='png', maximum=4096):
|
761 |
+
audio = _tosong(audio=audio, bmap=bmap, samplerate=samplerate, log=log)
|
762 |
+
output = _outputfilename(output=output, filename=audio.path, ext=ext, suffix = '')
|
763 |
+
audio.beatmap.generate()
|
764 |
+
audio.beat_image.generate()
|
765 |
+
audio.beat_image.write(output=output, maximum = maximum)
|
beat_manipulator/mix.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import numpy
|
2 |
+
from . import main as bm
|
3 |
+
def mix_shuffle_approx_random(audio1, audio2, iterations, minlength=0, maxlength=None, bias=0):
|
4 |
+
import random
|
5 |
+
if isinstance(audio1, bm.song):
|
6 |
+
minlength*=audio1.samplerate
|
7 |
+
if maxlength is not None: maxlength*=audio1.samplerate
|
8 |
+
audio1=audio1.audio
|
9 |
+
else:
|
10 |
+
minlength*=44100
|
11 |
+
if maxlength is not None: maxlength*=44100
|
12 |
+
if isinstance(audio2, bm.song): audio2=audio2.audio
|
13 |
+
if len(audio1)>16: audio1=numpy.asarray([audio1,audio1])
|
14 |
+
if len(audio2)>16: audio1=numpy.asarray([audio2,audio2])
|
15 |
+
shape2=len(audio2)
|
16 |
+
mono1=numpy.abs(numpy.gradient(audio1[0]))
|
17 |
+
mono2=numpy.abs(numpy.gradient(audio2[0]))
|
18 |
+
length1=len(mono1)
|
19 |
+
length2=len(mono2)
|
20 |
+
result=numpy.zeros(shape=(shape2, length2))
|
21 |
+
result_diff=numpy.zeros(shape=length2)
|
22 |
+
old_difference=numpy.sum(mono2)
|
23 |
+
random_result=result_diff.copy()
|
24 |
+
for i in range(iterations):
|
25 |
+
rstart=random.randint(0, length1)
|
26 |
+
if maxlength is not None:
|
27 |
+
rlength=random.randint(minlength, min(length1-rstart, maxlength))
|
28 |
+
else: rlength=random.randint(minlength, minlength+length1-rstart)
|
29 |
+
rplace=random.randint(0, length2-rlength)
|
30 |
+
random_result=numpy.array(result_diff, copy=True)
|
31 |
+
random_result[rplace:rplace + rlength] = mono1[rstart:rstart + rlength]
|
32 |
+
difference = numpy.sum(numpy.abs(mono2 - random_result))
|
33 |
+
if difference<old_difference-bias:
|
34 |
+
print(i, difference)
|
35 |
+
result[:, rplace:rplace + rlength] = audio1[:, rstart:rstart + rlength]
|
36 |
+
result_diff=random_result
|
37 |
+
old_difference = difference
|
38 |
+
return result
|
39 |
+
# 10 5 4 1
|
40 |
+
# 10 0 0 0
|
41 |
+
# 0 5 4 1 10
|
42 |
+
# 10 5 4 1
|
43 |
+
# 10 5 4 1
|
44 |
+
|
beat_manipulator/tests.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from . import main as bm
|
2 |
+
audio=list(i/100 for i in range(-100,100,1))
|
3 |
+
beatmap=list(range(0,100,10))+list(range(100,201,20))
|
4 |
+
|
5 |
+
def printb(text):
|
6 |
+
print(f'\033[1m{text}\033[0m')
|
7 |
+
def printe(text):
|
8 |
+
print(f'\x1b[0;31;40m{text}\x1b[0m')
|
9 |
+
|
10 |
+
# audio:
|
11 |
+
# [-1.0, -0.99, -0.98, -0.97, -0.96, -0.95, -0.94, -0.93, -0.92, -0.91, -0.9, -0.89, -0.88, -0.87, -0.86, -0.85, -0.84, -0.83, -0.82, -0.81, -0.8, -0.79, -0.78, -0.77, -0.76, -0.75, -0.74, -0.73, -0.72, -0.71, -0.7, -0.69, -0.68, -0.67, -0.66, -0.65, -0.64, -0.63, -0.62, -0.61, -0.6, -0.59, -0.58, -0.57, -0.56, -0.55, -0.54, -0.53, -0.52, -0.51, -0.5, -0.49, -0.48, -0.47, -0.46, -0.45, -0.44, -0.43, -0.42, -0.41, -0.4, -0.39, -0.38, -0.37, -0.36, -0.35, -0.34, -0.33, -0.32, -0.31, -0.3, -0.29, -0.28, -0.27, -0.26, -0.25, -0.24, -0.23, -0.22, -0.21, -0.2, -0.19, -0.18, -0.17, -0.16, -0.15, -0.14, -0.13, -0.12, -0.11, -0.1, -0.09, -0.08, -0.07, -0.06, -0.05, -0.04, -0.03, -0.02, -0.01, 0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, 0.2, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.3, 0.31, 0.32, 0.33, 0.34, 0.35, 0.36, 0.37, 0.38, 0.39, 0.4, 0.41, 0.42, 0.43, 0.44, 0.45, 0.46, 0.47, 0.48, 0.49, 0.5, 0.51, 0.52, 0.53, 0.54, 0.55, 0.56, 0.57, 0.58, 0.59, 0.6, 0.61, 0.62, 0.63, 0.64, 0.65, 0.66, 0.67, 0.68, 0.69, 0.7, 0.71, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79, 0.8, 0.81, 0.82, 0.83, 0.84, 0.85, 0.86, 0.87, 0.88, 0.89, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99]
|
12 |
+
# audio at beatmap:
|
13 |
+
# [-1.0, -0.9, -0.8, -0.7, -0.6, -0.5, -0.4, -0.3, -0.2, -0.1, 0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
|
14 |
+
|
15 |
+
test=bm.song(audio=audio, samplerate=2, bmap=beatmap, filename='test.mp3', log=False)
|
16 |
+
if list(test.bm) == beatmap: printb('beatmap assignment passed')
|
17 |
+
else: printe(f'''beatmap assignment error.
|
18 |
+
{beatmap}
|
19 |
+
{test.beatmap}''')
|
20 |
+
|
21 |
+
test.beatmap.shift(2)
|
22 |
+
if list(test.bm) == beatmap[2:]: printb('beatmap_shift(2) passed')
|
23 |
+
else: printe(f'''beatmap_shift(2) error, 1st line is the expected result:
|
24 |
+
{[0, 1, 2] + beatmap[1:]}
|
25 |
+
{test.beatmap}''')
|
26 |
+
|
27 |
+
test.beatmap.beatmap = beatmap.copy()
|
28 |
+
test.beatmap.shift(-2)
|
29 |
+
if list(test.bm) == list([0, 1, 2] + beatmap[1:]): printb ('beatmap_shift(-2) passed')
|
30 |
+
else:printe(f'''beatmap_shift(-2) error, 1st line is the expected result:
|
31 |
+
{beatmap[2:]}
|
32 |
+
{test.beatmap}''')
|
33 |
+
|
34 |
+
test.beatmap.beatmap = beatmap.copy()
|
35 |
+
should=[5, 15, 25, 35, 45, 55, 65, 75, 85, 95, 110, 130, 150, 170, 190, 200]
|
36 |
+
test.beatmap.shift(0.5)
|
37 |
+
if list(test.bm) == should: printb('beatmap_shift(0.5) passed')
|
38 |
+
else:printe(f'''beatmap_shift(0.5) error, 1st line is the expected result:
|
39 |
+
{should}
|
40 |
+
{test.beatmap}''')
|
41 |
+
|
42 |
+
test.beatmap.beatmap = beatmap.copy()
|
43 |
+
should=[0, 5, 15, 25, 35, 45, 55, 65, 75, 85, 95, 110, 130, 150, 170, 190]
|
44 |
+
test.beatmap.shift(-0.5)
|
45 |
+
if list(test.bm) == should: printb('beatmap_shift(-0.5) passed')
|
46 |
+
else:printe(f'''beatmap_shift(-0.5) error, 1st line is the expected result:
|
47 |
+
{should}
|
48 |
+
{test.beatmap}''')
|
49 |
+
|
50 |
+
test.beatmap.beatmap = beatmap.copy()
|
51 |
+
should=[25, 35, 45, 55, 65, 75, 85, 95, 110, 130, 150, 170, 190, 200]
|
52 |
+
test.beatmap.shift(2.5)
|
53 |
+
if list(test.bm) == should: printb('beatmap_shift(2.5) passed')
|
54 |
+
else:printe(f'''beatmap_shift(2.5) error, 1st line is the expected result:
|
55 |
+
{should}
|
56 |
+
{list(test.beatmap)}''')
|
57 |
+
|
58 |
+
test.beatmap.beatmap = beatmap.copy()
|
59 |
+
should=[1, 2, 3, 5, 15, 25, 35, 45, 55, 65, 75, 85, 95, 110, 130, 150, 170, 190]
|
60 |
+
test.beatmap.shift(-2.5)
|
61 |
+
if list(test.bm) == should: printb('beatmap_shift(-2.5) passed')
|
62 |
+
else:printe(f'''beatmap_shift(-2.5) error, 1st line is the expected result:
|
63 |
+
{should}
|
64 |
+
{list(test.beatmap)}''')
|
65 |
+
|
66 |
+
test.beatmap.beatmap = beatmap.copy()
|
67 |
+
should=[0, 20, 40, 60, 80, 100, 140, 180]
|
68 |
+
test.beatmap.scale(2)
|
69 |
+
if list(test.bm) == should: printb('beatmap_scale(2) passed')
|
70 |
+
else:printe(f'''beatmap_scale(2) error, 1st line is the expected result:
|
71 |
+
{should}
|
72 |
+
{list(test.beatmap)}''')
|
73 |
+
|
74 |
+
test.beatmap.beatmap = beatmap.copy()
|
75 |
+
should=[0, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 55, 60, 65, 70, 75, 80, 85, 90, 95, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190]
|
76 |
+
test.beatmap.scale(0.5)
|
77 |
+
if list(test.bm) == should: printb('beatmap_scale(2) passed')
|
78 |
+
else:printe(f'''beatmap_scale(2) error, 1st line is the expected result:
|
79 |
+
{should}
|
80 |
+
{list(test.beatmap)}''')
|
81 |
+
|
82 |
+
test.beatmap.beatmap = beatmap.copy()
|
83 |
+
test.beatswap("1, 3, 2, 4", smoothing=0)
|
84 |
+
should=[[-1.0, -0.99, -0.98, -0.97, -0.96, -0.95, -0.94, -0.93, -0.92, -0.91, -0.8, -0.79, -0.78, -0.77, -0.76, -0.75, -0.74, -0.73, -0.72, -0.71, -0.9, -0.89, -0.88, -0.87, -0.86, -0.85, -0.84, -0.83, -0.82, -0.81, -0.7, -0.69, -0.68, -0.67, -0.66, -0.65, -0.64, -0.63, -0.62, -0.61, -0.6, -0.59, -0.58, -0.57, -0.56, -0.55, -0.54, -0.53, -0.52, -0.51, -0.4, -0.39, -0.38, -0.37, -0.36, -0.35, -0.34, -0.33, -0.32, -0.31, -0.5, -0.49, -0.48, -0.47, -0.46, -0.45, -0.44, -0.43, -0.42, -0.41, -0.3, -0.29, -0.28, -0.27, -0.26, -0.25, -0.24, -0.23, -0.22, -0.21, -0.2, -0.19, -0.18, -0.17, -0.16, -0.15, -0.14, -0.13, -0.12, -0.11, 0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, -0.1, -0.09, -0.08, -0.07, -0.06, -0.05, -0.04, -0.03, -0.02, -0.01, 0.2, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.3, 0.31, 0.32, 0.33, 0.34, 0.35, 0.36, 0.37, 0.38, 0.39, 0.4, 0.41, 0.42, 0.43, 0.44, 0.45, 0.46, 0.47, 0.48, 0.49, 0.5, 0.51, 0.52, 0.53, 0.54, 0.55, 0.56, 0.57, 0.58, 0.59, 0.8, 0.81, 0.82, 0.83, 0.84, 0.85, 0.86, 0.87, 0.88, 0.89, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99, 0.6, 0.61, 0.62, 0.63, 0.64, 0.65, 0.66, 0.67, 0.68, 0.69, 0.7, 0.71, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79], [-1.0, -0.99, -0.98, -0.97, -0.96, -0.95, -0.94, -0.93, -0.92, -0.91, -0.8, -0.79, -0.78, -0.77, -0.76, -0.75, -0.74, -0.73, -0.72, -0.71, -0.9, -0.89, -0.88, -0.87, -0.86, -0.85, -0.84, -0.83, -0.82, -0.81, -0.7, -0.69, -0.68, -0.67, -0.66, -0.65, -0.64, -0.63, -0.62, -0.61, -0.6, -0.59, -0.58, -0.57, -0.56, -0.55, -0.54, -0.53, -0.52, -0.51, -0.4, -0.39, -0.38, -0.37, -0.36, -0.35, -0.34, -0.33, -0.32, -0.31, -0.5, -0.49, -0.48, -0.47, -0.46, -0.45, -0.44, -0.43, -0.42, -0.41, -0.3, -0.29, -0.28, -0.27, -0.26, -0.25, -0.24, -0.23, -0.22, -0.21, -0.2, -0.19, -0.18, -0.17, -0.16, -0.15, -0.14, -0.13, -0.12, -0.11, 0.0, 0.01, 0.02, 0.03, 0.04, 0.05, 0.06, 0.07, 0.08, 0.09, 0.1, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18, 0.19, -0.1, -0.09, -0.08, -0.07, -0.06, -0.05, -0.04, -0.03, -0.02, -0.01, 0.2, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.3, 0.31, 0.32, 0.33, 0.34, 0.35, 0.36, 0.37, 0.38, 0.39, 0.4, 0.41, 0.42, 0.43, 0.44, 0.45, 0.46, 0.47, 0.48, 0.49, 0.5, 0.51, 0.52, 0.53, 0.54, 0.55, 0.56, 0.57, 0.58, 0.59, 0.8, 0.81, 0.82, 0.83, 0.84, 0.85, 0.86, 0.87, 0.88, 0.89, 0.9, 0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99, 0.6, 0.61, 0.62, 0.63, 0.64, 0.65, 0.66, 0.67, 0.68, 0.69, 0.7, 0.71, 0.72, 0.73, 0.74, 0.75, 0.76, 0.77, 0.78, 0.79]]
|
85 |
+
if list(test.audio) == should: printb('beatswap("1, 3, 2, 4") passed')
|
86 |
+
else:printe(f'''beatswap("1, 3, 2, 4") error, 1st line is the expected result:
|
87 |
+
{should}
|
88 |
+
{list(test.audio)}''')
|
89 |
+
|
90 |
+
# [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 120, 140, 160, 180, 200]
|
91 |
+
|
92 |
+
def import_test():
|
93 |
+
audio2=list(i/100000 for i in range(-100000,100000,1))
|
94 |
+
beatmap2=list(range(0,100000,10))+list(range(100,200001,20))
|
95 |
+
test2=bm.song(audio=audio2, samplerate=2, bmap=beatmap2, filename='test2.mp3', log=False)
|
96 |
+
|
97 |
+
def shift_test(number, shift):
|
98 |
+
audio2=list(i/number for i in range(-number,number,1))
|
99 |
+
beatmap2=list(range(0,number,10))+list(range(100,number*2+1,20))
|
100 |
+
test2=bm.song(audio=audio2, samplerate=2, bmap=beatmap2, filename='test2.mp3', log=False)
|
101 |
+
test2.beatmap.shift(shift)
|
102 |
+
|
103 |
+
def scale_test(number, scale):
|
104 |
+
audio2=list(i/number for i in range(-number,number,1))
|
105 |
+
beatmap2=list(range(0,number,10))+list(range(100,number*2+1,20))
|
106 |
+
test2=bm.song(audio=audio2, samplerate=2, bmap=beatmap2, filename='test2.mp3',log=False)
|
107 |
+
test2.beatmap.scale(0.5)
|
108 |
+
|
109 |
+
def beatswap_test(number, pattern):
|
110 |
+
audio2=list((i/number)*100 for i in range(-number,number,1))
|
111 |
+
beatmap2=list(range(0,number*100,1000))+list(range(10000,number*200+1,2000))
|
112 |
+
test2=bm.song(audio=audio2, samplerate=2, bmap=beatmap2, filename='test2.mp3',log=False)
|
113 |
+
test2.beatswap(pattern)
|
114 |
+
|
115 |
+
input('run time tests?')
|
116 |
+
import timeit
|
117 |
+
printb(f'beatmap_shift(-2.5) for 1000 beats takes {timeit.timeit(lambda: shift_test(1000,shift=-2.5), number=1)}') #0.0028216999489814043
|
118 |
+
printb(f'beatmap_shift(-2.5) for 20000 beats takes {timeit.timeit(lambda: shift_test(20000,shift=-2.5), number=1)}') #0.6304191001690924
|
119 |
+
printb(f'beatmap_scale(0.5) for 20000 beats takes {timeit.timeit(lambda: scale_test(20000,scale=0.5), number=1)}') #0.10623739985749125
|
120 |
+
printb(f'test2.beatswap("1,3,2,4") for 20000 beats takes {timeit.timeit(lambda: beatswap_test(20000,pattern="1,3,2,4"), number=1)}') #0.406920799985528
|
121 |
+
printb(f'test2.beatswap("1v2, 0:0.5b5, 1:1.5r, 3c, 4:3") for 20000 beats takes {timeit.timeit(lambda: beatswap_test(20000,pattern="1v2, 0:0.5b5, 1:1.5r, 3c, 4:3"), number=1)}') #0.5667359000071883
|
wrapper.py → beat_manipulator/wrapper.py
RENAMED
@@ -1,54 +1,93 @@
|
|
1 |
-
import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
with open("presets.json", "r") as f:
|
3 |
presets=f.read()
|
4 |
|
5 |
presets=json.loads(presets)
|
6 |
|
7 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
'''basically a way to quickly test scale and offset'''
|
9 |
if type(filename)==str :
|
10 |
-
song=bm.song(filename)
|
11 |
samplerate=song.samplerate
|
12 |
-
else:
|
13 |
song=filename
|
14 |
-
song
|
15 |
-
song.
|
16 |
-
song
|
17 |
-
song.
|
18 |
-
|
19 |
-
|
20 |
-
song.quick_beatsample(output=None, lib=lib, audio2=bm.generate_saw(0.05, 500, samplerate), scale=8*scale, shift=6*scale+shift)
|
21 |
-
song.quick_beatsample(output=output, suffix=' ('+lib+')',lib=lib, audio2=bm.generate_saw(0.05, 1000, samplerate), scale=8*scale, shift=7*scale+shift)
|
22 |
-
del song
|
23 |
-
|
24 |
-
def lib_test_full(filename,samplerate):
|
25 |
'''A way to test all beat detection modules to see which one performs better.'''
|
26 |
print(filename)
|
27 |
-
lib_test(filename, samplerate,'madmom.BeatDetectionProcessor')
|
28 |
-
lib_test(filename, samplerate,'madmom.BeatDetectionProcessor.consistent')
|
29 |
#lib_test(filename, samplerate,'madmom.BeatTrackingProcessor') # better for live performances with variable BPM
|
30 |
#lib_test(filename, samplerate,'madmom.BeatTrackingProcessor.constant') # results identical to madmom.BeatDetectionProcessor
|
31 |
-
lib_test(filename, samplerate,'madmom.BeatTrackingProcessor.consistent')
|
32 |
-
lib_test(filename, samplerate,'madmom.CRFBeatDetectionProcessor')
|
33 |
-
lib_test(filename, samplerate,'madmom.CRFBeatDetectionProcessor.constant')
|
34 |
#lib_test(filename, samplerate,'madmom.DBNBeatTrackingProcessor') # better for live performances with variable BPM
|
35 |
-
lib_test(filename, samplerate,'madmom.DBNBeatTrackingProcessor.1000')
|
36 |
-
lib_test(filename, samplerate,'madmom.DBNDownBeatTrackingProcessor')
|
37 |
import gc
|
38 |
gc.collect()
|
39 |
|
40 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
#print(preset)
|
42 |
-
if 'pattern' in preset:
|
43 |
-
scale=scale*(preset['scale'] if 'scale' in preset else 1)
|
44 |
shift=shift+(preset['shift'] if 'shift' in preset else 0)
|
45 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
46 |
elif preset['type'] =='sidechain':
|
47 |
length=preset['sc length'] if 'sc length' in preset else 0.5
|
48 |
curve=preset['sc curve'] if 'sc curve' in preset else 2
|
49 |
vol0=preset['sc vol0'] if 'sc vol0' in preset else 0
|
50 |
vol1=preset['sc vol1'] if 'sc vol1' in preset else 1
|
51 |
-
|
|
|
52 |
scale=scale*(preset['scale'] if 'scale' in preset else 1)
|
53 |
shift=shift+(preset['shift'] if 'shift' in preset else 0)
|
54 |
song.quick_sidechain(output=None, audio2=sidechain, scale=scale, shift=shift)
|
@@ -60,9 +99,10 @@ def process(song:bm.song, preset: str, scale:float, shift:float)->bm.song:
|
|
60 |
return song
|
61 |
|
62 |
|
63 |
-
def use_preset(output:str,
|
64 |
-
song
|
65 |
-
|
|
|
66 |
#print(song.samplerate)
|
67 |
if preset is None:
|
68 |
weights=[]
|
@@ -71,46 +111,99 @@ def use_preset(output:str,filename: str, preset: str, presets=presets, scale=1,
|
|
71 |
import random
|
72 |
preset = random.choices(population=list(presets), weights=weights, k=1)[0]
|
73 |
name=preset
|
74 |
-
preset=presets[preset]
|
75 |
-
if test is True:
|
76 |
-
testsong=
|
77 |
-
lib_test(testsong, output, samplerate=testsong.samplerate)
|
78 |
del testsong
|
79 |
#print(name, preset)
|
|
|
|
|
|
|
|
|
80 |
if '1' in preset:
|
81 |
for i in preset:
|
82 |
-
if type(preset[i])==dict:song=
|
83 |
-
else: song=
|
84 |
-
song
|
85 |
-
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
del testsong
|
91 |
-
for i in presets:
|
92 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
93 |
|
94 |
|
95 |
|
96 |
# ___ my stuff ___
|
97 |
|
98 |
-
|
99 |
-
filename='F:/Stuff/Music/Tracks/
|
100 |
#filename = 'F:/Stuff/Music/Tracks/'+random.choice(os.listdir("F:\Stuff\Music\Tracks"))
|
|
|
101 |
|
102 |
-
|
103 |
-
shift=0
|
104 |
-
|
|
|
105 |
|
106 |
-
#
|
107 |
-
#
|
108 |
-
|
109 |
-
#use_preset ('', filename, 'jungle B', scale=scale, shift=shift, beat='normal', test=test)
|
110 |
#use_preset ('', filename, None, scale=scale, shift=shift, test=False)
|
111 |
-
all('',filename, scale=1, shift=0, test=
|
112 |
|
|
|
113 |
#song=bm.song(filename)
|
114 |
-
#song.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
#song.write_image()
|
116 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from . import main as bm
|
2 |
+
from .main import _outputfilename
|
3 |
+
import json
|
4 |
+
def _safer_eval(string:str) -> float:
|
5 |
+
if isinstance(string, str):
|
6 |
+
#print(''.join([i for i in string if i.isdecimal() or i in '.+-*/']))
|
7 |
+
string = eval(''.join([i for i in string if i.isdecimal() or i in '.+-*/']))
|
8 |
+
return string
|
9 |
+
|
10 |
with open("presets.json", "r") as f:
|
11 |
presets=f.read()
|
12 |
|
13 |
presets=json.loads(presets)
|
14 |
|
15 |
+
def _song_copy(audio:bm.song):
|
16 |
+
return bm.song(path=audio.path, audio=audio.audio, samplerate=audio.samplerate, bmap=audio.beatmap, caching=audio.caching, filename=audio.filename, copied=True)
|
17 |
+
|
18 |
+
def _normalize(song: bm.song, beat, pattern=None, scale=None, shift=None):
|
19 |
+
beat=beat.lower()
|
20 |
+
if pattern is not None:
|
21 |
+
if scale is None: scale=1
|
22 |
+
if shift is None: shift=0
|
23 |
+
song.quick_beatswap(output=None, pattern=pattern, scale=scale,shift=shift)
|
24 |
+
elif beat=='normal' or beat is None: pass
|
25 |
+
elif beat=='shifted': song.quick_beatswap(output=None, pattern='1,2,3,4,5,7,6,8', scale=0.5)
|
26 |
+
elif beat=='shifted2': song.quick_beatswap(output=None, pattern='1,2,3,4,5,6,8,7', scale=0.5)
|
27 |
+
else: print(f"'{beat}' is not a valid beat")
|
28 |
+
return song
|
29 |
+
|
30 |
+
def lib_test(filename,output='', samplerate=44100, lib='madmom.BeatDetectionProcessor', scale=1, shift=0,beat='normal', log=False):
|
31 |
'''basically a way to quickly test scale and offset'''
|
32 |
if type(filename)==str :
|
33 |
+
song=bm.song(filename)
|
34 |
samplerate=song.samplerate
|
35 |
+
else:
|
36 |
song=filename
|
37 |
+
if beat!='normal' and beat is not None: song=_normalize(song=song, beat=beat, scale=scale, shift=shift)
|
38 |
+
song.quick_beatswap(output=None, pattern='test', scale=scale, shift=shift, log=log)
|
39 |
+
if beat!='normal' and beat is not None: song=_normalize(song=song, beat=beat, scale=scale, shift=shift)
|
40 |
+
song.write_audio(output=bm.outputfilename('', song.filename, f' ({lib} x{scale} {shift})'))
|
41 |
+
|
42 |
+
def lib_test_full(filename,samplerate, log):
|
|
|
|
|
|
|
|
|
|
|
43 |
'''A way to test all beat detection modules to see which one performs better.'''
|
44 |
print(filename)
|
45 |
+
lib_test(filename, samplerate,'madmom.BeatDetectionProcessor', log=log)
|
46 |
+
lib_test(filename, samplerate,'madmom.BeatDetectionProcessor.consistent', log=log)
|
47 |
#lib_test(filename, samplerate,'madmom.BeatTrackingProcessor') # better for live performances with variable BPM
|
48 |
#lib_test(filename, samplerate,'madmom.BeatTrackingProcessor.constant') # results identical to madmom.BeatDetectionProcessor
|
49 |
+
lib_test(filename, samplerate,'madmom.BeatTrackingProcessor.consistent', log=log)
|
50 |
+
lib_test(filename, samplerate,'madmom.CRFBeatDetectionProcessor', log=log)
|
51 |
+
lib_test(filename, samplerate,'madmom.CRFBeatDetectionProcessor.constant',log=log)
|
52 |
#lib_test(filename, samplerate,'madmom.DBNBeatTrackingProcessor') # better for live performances with variable BPM
|
53 |
+
lib_test(filename, samplerate,'madmom.DBNBeatTrackingProcessor.1000',log=log)
|
54 |
+
lib_test(filename, samplerate,'madmom.DBNDownBeatTrackingProcessor',log=log)
|
55 |
import gc
|
56 |
gc.collect()
|
57 |
|
58 |
+
def _process_list(something)-> list:
|
59 |
+
if isinstance(something, int) or isinstance(something, float): something=(something,)
|
60 |
+
elif isinstance(something,list): False if isinstance(something[0],int) or isinstance(something[0],float) else list(_safer_eval(i) for i in something)
|
61 |
+
else: something=list(_safer_eval(i) for i in something.split(','))
|
62 |
+
return something
|
63 |
+
|
64 |
+
def _process(song:bm.song, preset: str, scale:float, shift:float, random=False, every=False, log=True)->bm.song:
|
65 |
#print(preset)
|
66 |
+
if 'pattern' in preset:
|
|
|
67 |
shift=shift+(preset['shift'] if 'shift' in preset else 0)
|
68 |
+
# Scale can be a list and we either take one value or all of them
|
69 |
+
if 'scale' in preset: pscale=_process_list(preset['scale'])
|
70 |
+
else: pscale=(1,)
|
71 |
+
#input(pscale)
|
72 |
+
if random is True:
|
73 |
+
import random
|
74 |
+
pscale=random.choice(pscale)
|
75 |
+
elif every is True:
|
76 |
+
songs=[]
|
77 |
+
for i in pscale:
|
78 |
+
song2=_song_copy(song)
|
79 |
+
song2.quick_beatswap(output=None, pattern=preset['pattern'], scale=scale*i, shift=shift, log = log)
|
80 |
+
songs.append((song2, i))
|
81 |
+
return songs
|
82 |
+
else: pscale=preset['scale_d'] if 'scale_d' in preset else pscale[0]
|
83 |
+
if every is False: song.quick_beatswap(output=None, pattern=preset['pattern'], scale=scale*pscale, shift=shift, log = log)
|
84 |
elif preset['type'] =='sidechain':
|
85 |
length=preset['sc length'] if 'sc length' in preset else 0.5
|
86 |
curve=preset['sc curve'] if 'sc curve' in preset else 2
|
87 |
vol0=preset['sc vol0'] if 'sc vol0' in preset else 0
|
88 |
vol1=preset['sc vol1'] if 'sc vol1' in preset else 1
|
89 |
+
from . import generate
|
90 |
+
sidechain=bm.open_audio(preset['sc impulse'])[0] if 'sc impulse' in preset else generate.sidechain(samplerate=song.samplerate, length=length, curve=curve, vol0=vol0, vol1=vol1, smoothing=40)
|
91 |
scale=scale*(preset['scale'] if 'scale' in preset else 1)
|
92 |
shift=shift+(preset['shift'] if 'shift' in preset else 0)
|
93 |
song.quick_sidechain(output=None, audio2=sidechain, scale=scale, shift=shift)
|
|
|
99 |
return song
|
100 |
|
101 |
|
102 |
+
def use_preset(output:str,song: str, preset: str, presets=presets, scale=1, shift=0, beat:str='normal', test=False, _normalize=True, random=False, every=False, log = True):
|
103 |
+
if not isinstance(song, bm.song):
|
104 |
+
song=bm.song(song)
|
105 |
+
else: song = _song_copy(song)
|
106 |
#print(song.samplerate)
|
107 |
if preset is None:
|
108 |
weights=[]
|
|
|
111 |
import random
|
112 |
preset = random.choices(population=list(presets), weights=weights, k=1)[0]
|
113 |
name=preset
|
114 |
+
if isinstance(preset, str): preset=presets[preset]
|
115 |
+
if test is True:
|
116 |
+
testsong=_song_copy(song)
|
117 |
+
lib_test(testsong, output, samplerate=testsong.samplerate, log = log)
|
118 |
del testsong
|
119 |
#print(name, preset)
|
120 |
+
if _normalize is True and beat!='normal' and beat is not None:
|
121 |
+
if '_normalize' in preset:
|
122 |
+
if preset['_normalize'] is True:
|
123 |
+
song=_normalize(song, beat)
|
124 |
if '1' in preset:
|
125 |
for i in preset:
|
126 |
+
if type(preset[i])==dict:song=_process(song, preset[i], scale=scale, shift=shift, log=log)
|
127 |
+
else: song=_process(song, preset,scale=scale,shift=shift,random=random, every=every, log=log)
|
128 |
+
if isinstance(song, list):
|
129 |
+
for i in song:
|
130 |
+
i[0].write(output=_outputfilename(output, i[0].filename, suffix=f' ({name}{(" x"+str(round(i[1], 3)))*(len(song)>1)})'))
|
131 |
+
else:
|
132 |
+
out_folder = _outputfilename(output, song.filename, suffix=' ('+name+')')
|
133 |
+
song.write(output=out_folder)
|
134 |
+
return out_folder
|
135 |
+
|
136 |
+
def all(output:str,filename: str, presets:dict=presets, scale=1, shift=0, beat='normal', test=True, boring=False, effects=False, variations=False, log = False):
|
137 |
+
if boring is False:
|
138 |
+
for i in ['2x faster','3x faster','4x faster','8x faster','1.33x faster','1.5x faster','1.5x slower','reverse','random', 'syncopated effect']:
|
139 |
+
if i in presets:
|
140 |
+
#print(i)
|
141 |
+
presets.pop(i)
|
142 |
+
if not isinstance(filename, bm.song): song=bm.song(filename)
|
143 |
+
else: song=filename
|
144 |
+
song__normalized=_normalize(_song_copy(song), beat)
|
145 |
+
if test is True:
|
146 |
+
testsong=_song_copy(song)
|
147 |
+
lib_test(testsong, output, samplerate=testsong.samplerate, log = log)
|
148 |
del testsong
|
149 |
+
for key, i in presets.items():
|
150 |
+
#print(key, i)
|
151 |
+
if 'scale' in i:
|
152 |
+
#print(i['scale'])
|
153 |
+
if isinstance(i['scale'], int) or isinstance(i['scale'], float):
|
154 |
+
if i['scale']<0.01:
|
155 |
+
continue
|
156 |
+
if effects is False:
|
157 |
+
if 'effect - ' in key: continue
|
158 |
+
if '_normalize' in i:
|
159 |
+
if i['_normalize'] is True:
|
160 |
+
song2=_song_copy(song__normalized)
|
161 |
+
else: song2=_song_copy(song)
|
162 |
+
else: song2=_song_copy(song)
|
163 |
+
use_preset(output, song2, preset=key, presets=presets, scale=scale, shift=shift, beat=beat, test=False, _normalize=False, every=variations, log = log)
|
164 |
|
165 |
|
166 |
|
167 |
# ___ my stuff ___
|
168 |
|
169 |
+
# ___ get song ___
|
170 |
+
#filename='F:/Stuff/Music/Tracks/Poseidon & Leon Ross - Parallax.mp3'
|
171 |
#filename = 'F:/Stuff/Music/Tracks/'+random.choice(os.listdir("F:\Stuff\Music\Tracks"))
|
172 |
+
# print(filename)
|
173 |
|
174 |
+
# ___ analyze+fix ___
|
175 |
+
#scale, shift = 1,0
|
176 |
+
#lib_test(filename, scale=scale, shift=shift)
|
177 |
+
#bm.fix_beatmap(filename, scale=scale, shift=shift)
|
178 |
|
179 |
+
# ___ presets ___
|
180 |
+
#use_preset ('', filename, 'dotted kicks', scale=1, shift=0, beat='normal', test=False)
|
|
|
|
|
181 |
#use_preset ('', filename, None, scale=scale, shift=shift, test=False)
|
182 |
+
#all('', filename, scale=1, shift=0, beat='normal', test=False)
|
183 |
|
184 |
+
# ___ beat swap __
|
185 |
#song=bm.song(filename)
|
186 |
+
#song.quick_beatswap(output='', pattern='test', scale=1, shift=0)
|
187 |
+
|
188 |
+
# ___ osu ___
|
189 |
+
#song=bm.song()
|
190 |
+
#song.generate_hitmap()
|
191 |
+
#song.osu()
|
192 |
+
#song.hitsample()
|
193 |
+
|
194 |
+
# ___ saber2osu ___
|
195 |
+
#import Saber2Osu as s2o
|
196 |
+
#osu=s2o.osu_map(threshold=0.3, declumping=100)
|
197 |
+
|
198 |
+
# ___ song to image ___
|
199 |
#song.write_image()
|
200 |
+
|
201 |
+
# ___ randoms ___
|
202 |
+
# while True:
|
203 |
+
# filename = 'F:/Stuff/Music/Tracks/'+random.choice(os.listdir("F:\Stuff\Music\Tracks"))
|
204 |
+
# use_preset ('', filename, None, scale=scale, shift=shift, test=False)
|
205 |
+
|
206 |
+
# ___ effects ___
|
207 |
+
#song = bm.song(filename)
|
208 |
+
#song.audio=bm.pitchB(song.audio, 2, 100)
|
209 |
+
#song.write_audio(bm.outputfilename('',filename, ' (pitch)'))
|
requirements.txt
CHANGED
@@ -1,4 +1,12 @@
|
|
1 |
cython
|
|
|
2 |
numpy
|
|
|
|
|
|
|
|
|
3 |
soundfile
|
|
|
|
|
|
|
4 |
git+https://github.com/CPJKU/madmom
|
|
|
1 |
cython
|
2 |
+
mido
|
3 |
numpy
|
4 |
+
scipy
|
5 |
+
pytest
|
6 |
+
pyaudio
|
7 |
+
pyfftw
|
8 |
soundfile
|
9 |
+
ffmpeg-python
|
10 |
+
librosa
|
11 |
+
pedalboard
|
12 |
git+https://github.com/CPJKU/madmom
|
samples/cowbell.flac
ADDED
Binary file (16.4 kB). View file
|
|