# -*- coding:utf-8 -*-
# Author:凌逆战 | Never
# Date: 2023/1/2
"""
.gitingore
"""
import librosa
import numpy as np
import matplotlib.pyplot as plt
import soundfile
from librosa.filters import get_window

sr = 16000
frame_len = 256
window_len = 512
NFFT = 512
fft_window = get_window("hann", Nx=window_len, fftbins=True)  # 用于频率分析
wav = librosa.load("./p225_001.wav", sr=sr)[0]
wav = wav[:len(wav) - len(wav) % frame_len]
print("wav.shape", wav.shape)

# 如果不补零的话，前半帧和后半帧 会因为加窗而无法恢复
wav_pad = np.pad(wav, (frame_len, frame_len), mode="constant")  # center=True
print("wav_pad.shape", wav_pad.shape)
# librosa有专门的分帧函数
frame_array = librosa.util.frame(wav_pad, frame_length=window_len, hop_length=frame_len)  # (帧长,帧数) (512, 129)
frame_num = frame_array.shape[1]
# 加窗、FFT
win_array = np.zeros_like(frame_array)
for frame_index in range(frame_num):
    win_array[:, frame_index] = frame_array[:, frame_index] * fft_window  # (512, 129)


# ifft、加窗、overlap_add
def overlap_add_1(win_array):
    sys_frame = []
    previous_frame = np.zeros((frame_len))
    for frame_index in range(frame_num):
        current_frame = win_array[:, frame_index]  # 当前窗 (512,)
        sys_frame.append(previous_frame + current_frame[:frame_len])
        previous_frame = current_frame[frame_len:]
        if frame_index == frame_num - 1:
            sys_frame.append(current_frame[frame_len:])

    wav_sys = np.concatenate(sys_frame, axis=0)
    wav_sys = wav_sys[frame_len: -frame_len]
    print("wav_sys.shape", wav_sys.shape)
    return wav_sys


def overlap_add_2(win_array):
    wav_sys = np.zeros(window_len + frame_len * (frame_num - 1))
    for frame_index in range(frame_num):
        ytmp = win_array[:, frame_index]
        wav_sys[frame_index * frame_len: (frame_index * frame_len + window_len)] += ytmp
    wav_sys = wav_sys[frame_len: -frame_len]
    print("wav_sys.shape", wav_sys.shape)
    return wav_sys


# https://github.com/miralv/Deep-Learning-for-Speech-Enhancement/blob/b2f3d4e33fdc8a1d75b774f009aadf95616efc99/recoverSignal.py
def overlap_add_3(win_array):
    wav_sys = np.zeros(window_len + frame_len * (frame_num - 1))
    wav_sys[0:frame_len] = win_array[0:frame_len, 0]
    start_point = frame_len
    for i in range(0, (frame_num - 1)):
        # Add the elements corresponding to the current half window
        wav_sys[start_point:start_point + frame_len] = np.add(win_array[frame_len:, i], win_array[0:frame_len, i + 1])
        start_point += frame_len

    # Add the last half window manually
    wav_sys[start_point:] = win_array[frame_len:, frame_num - 1]
    return wav_sys


wav_sys = overlap_add_1(win_array)
soundfile.write("./overlap_add.wav", data=wav_sys, samplerate=sr)

plt.subplot(2, 1, 1)
plt.plot(wav)
plt.subplot(2, 1, 2)
plt.plot(wav_sys)
plt.show()

