import numpy as np

def __getBeats(musicPath):
  y, sr = librosa.load(musicPath, sr=None)
  #tempo, beats = librosa.beat.beat_track(y=y, sr=sr)
  onset_env = librosa.onset.onset_strength(y, sr=sr, aggregate=np.median)
  tempo, beats = librosa.beat.beat_track(onset_envelope=onset_env, sr=sr)
  beatsRes = np.array(librosa.frames_to_time(beats[:-1], sr=sr)) * 1000
  beatsRes[0] = 0
  return beatsRes

def genFrames(musicPath, f, delay):
  beats = __getBeats(musicPath)
  targetBeats = []
  for i, item in enumerate(beats):
    if i % f == 0:
      targetBeats.append(item)
  
  fps = 24

  diff = []
  frame = []
  for i, item in enumerate(targetBeats):
    if i+2 >= len(targetBeats):
      duration = targetBeats[i+1] - targetBeats[i] - delay
      frameWait = int(duration / (1000 / 24))
      diff.append(duration)
      frame.append(frameWait)
      break

    duration = targetBeats[i+1] - targetBeats[i] - delay
    frameWait = int(duration / (1000 / 24))
    diff.append(duration)
    frame.append(frameWait)

  return frame

# genFrames('C:/Users/Zhang/Desktop/demo/python_demos/gen_video/pitchtime/origin_music/guitar.aac', 2, 0)
# genFrames('C:/Users/Zhang/Desktop/demo/python_demos/gen_video/pitchtime/origin_music/humming.aac', 2, 250)
# genFrames('C:/Users/Zhang/Desktop/demo/python_demos/gen_video/pitchtime/origin_music/letmego.aac', 2, 250)
