import python_speech_features as psf
import numpy as np
# import scipy.signal as signal
import scipy.io.wavfile as wav
import os
from tqdm import tqdm
import json
import random
import librosa
import wave
import soundfile as sf
import PyWave
from audiolazy import lpc as lazy_lpc
import scipy.signal as signal
import scipy.io.wavfile as wavfile
from multiprocessing import Pool
import math


def read_audio(wav_file,target_samplerate,max_duration):
    
    
    # rate, signal = wavfile.read(wav_file)  # rate：采样率    
    # max = np.max(signal)
    # min = np.min(signal)    
    
    # speech_array, sampling_rate = librosa.load(os.path.join(wav_file), sr=16000)
    # max = np.max(speech_array)
    # min = np.min(speech_array)
    
    # waveform, sample_rate = torchaudio.load(wav_file)
    # # waveform = torchaudio.functional.dcshift(waveform,0.0)    
    # max = torch.max(waveform)
    # min = torch.min(waveform)
    # waveform = (waveform - min) / (max - min) * 2.0 - 1.0
    # torchaudio.save(wav_file + '.torchsave.wav',waveform,sample_rate)
     

    wf = PyWave.open(wav_file)
    print(f"This WAVE file [{wav_file}] has the following properties:")
    print(wf.channels, "channels")
    print(wf.frequency, "Hz sample rate")
    print(wf.bitrate, "bits per second")
    print(wf.samples, "total samples")
    print(wf.data_length, "data bytes")
    duration = (float)(wf.samples) / wf.frequency   # 音频持续时间 单位：秒
    print(f'duration : {duration}')
    
    if(wf.channels != 1 or wf.frequency != target_samplerate):
        return None,None,None
    if duration > max_duration:
        return None,None,None

    itype = wf.data_length / wf.samples
    dtype = np.uint8
    if itype == 1:
        dtype = np.uint8
    if itype == 2:
        dtype = np.int16
    if itype == 4:
        dtype = np.float32
    print(itype, "itype")
        
    audiobytes = wf.read((int)(wf.samples * itype))  # 按帧读音频，返回二进制数据
    audio = np.frombuffer(audiobytes, dtype=dtype)
    
    audio = audio.astype(np.float32)
    
    vocal_sep = False
    if vocal_sep:    
        sr = wf.frequency
        S_full, phase = librosa.magphase(librosa.stft(audio))
        S_filter = librosa.decompose.nn_filter(S_full,
                                        aggregate=np.median,
                                        metric='cosine',
                                        width=int(librosa.time_to_frames(2, sr=sr)))
        S_filter = np.minimum(S_full, S_filter)
        margin_i, margin_v = 2, 10
        power = 2

        mask_i = librosa.util.softmask(S_filter,
                                    margin_i * (S_full - S_filter),
                                    power=power)

        mask_v = librosa.util.softmask(S_full - S_filter,
                                    margin_v * S_filter,
                                    power=power)
        S_foreground = mask_v * S_full
        S_background = mask_i * S_full
        audio_foreground = librosa.istft(S_foreground * phase)    
        audio = audio_foreground
    
    
    
    max = np.max(audio).astype(np.float32)
    min = np.min(audio).astype(np.float32)
    
    print(f"min {min} max {max}")
    audio = (audio - min) / (max - min) * 2.0 - 1.0
    # audio *= 255
    return audio,wf.frequency,duration
    # return waveform,wf.frequency,duration

def resample4wavs(frompath, topath, resamplerate):
    '''
    :param frompath: 源文件所在目录
    :param topath: 重置采样率文件存放目录
    源文件目录和重置采样率目录可以是一个目录但是会覆盖原来的文件
    :param resamplerate: 重置采样率
    :return:
    ''' 
    y, sr = librosa.load(frompath)
    to_y = librosa.resample(y, orig_sr=sr, target_sr=resamplerate)
    # librosa.output.write_wav(tofile, to_y, resamplerate)过时代码, 需要换成下面的代码
    sf.write(topath, to_y, resamplerate)

def audio_mfcc_legacy(wav_file, feature_file=None, target_samplerate = 16000, max_duration = 120):
    # load wav
    (rate, sig) = wav.read(wav_file)
    print(rate)

    # parameterss
    videorate = 30
    winlen = 1./videorate # time
    winstep = 0.5/videorate # 2* overlap
    # numcep = 32 # number of cepstrum to return, 0--power 1:31--features, nfilt caide
    numcep = 13 # typical value
    winfunc = np.hanning

    mfcc = psf.mfcc(sig, rate, winlen=winlen, winstep=winstep,
                    numcep=numcep, nfilt=numcep*2, nfft=int(rate/videorate), winfunc = winfunc)
    # print(mfcc_feature[0:5])
    mfcc_delta = psf.base.delta(mfcc, 2)
    mfcc_delta2 = psf.base.delta(mfcc_delta, 2)

    mfcc_all = np.concatenate((mfcc, mfcc_delta, mfcc_delta2), axis=1)
    print(type(mfcc_all), mfcc_all.shape)

    win_size = 64
    win_count = int((len(mfcc_all)-win_size)/2)+1
    output = np.zeros(shape=(win_count, win_size, numcep*3))
    # print(output.shape, mfcc_feature.shape)
    for win in tqdm(range(win_count)):
        output[win] = mfcc_all[2*win : 2*win+win_size]

    if feature_file:
        np.save(feature_file, output)
        
    print("MPCC extraction finished {}".format(output.shape))

    return output

def audio_mfcc_new(wav_file, feature_file=None, target_samplerate = 16000, max_duration = 120):
    # load wav
    
    try:
    
        audio, frequency,duration= read_audio(wav_file,target_samplerate,max_duration)
        if audio is None:
            return None
        
        if isinstance(audio,torch.Tensor):
            audio = audio.numpy()
        # if isinstance(audio,np.array):
        audio = audio.reshape((1,-1))

        
        videorate = 30
        overlap_rate = 0.5
        
        winlen = 1.0 / videorate # time
        winstep = (1 - overlap_rate) * winlen# 2* overlap
        
        win_size = 64
        win_offset = 2
        
        overlap_sample = math.floor(overlap_rate * frequency * winlen)
        winstep_sample = math.ceil( (1 - overlap_rate) * frequency * winlen)
        
        win_sample64 = winstep_sample * win_size + overlap_sample
        win_time64 = win_sample64 / frequency
        
        
        append_duration = win_time64 #frame 520ms
        append_half_duration = append_duration / 2
        a = np.zeros((1,(int)(append_half_duration * frequency)), dtype=np.float32)
        # audio = np.hstack((a,audio,a))
        
        mfcc_len = (audio.shape[1] - overlap_sample) / winstep_sample
        # parameterss
        # winlen = 1./videorate # time
        
        
        # nfft = int(frequency/videorate)
        nfft = int(winlen * frequency)
        
        # numcep = 32 # number of cepstrum to return, 0--power 1:31--features, nfilt caide
        numcep = 13 # typical value
        winfunc = np.hanning
        

        mfcc = psf.mfcc(audio, frequency, winlen=winlen, winstep=winstep,
                        numcep=numcep, nfilt=numcep*2, nfft=nfft, winfunc = winfunc)
        # print(mfcc_feature[0:5])
        mfcc_delta = psf.base.delta(mfcc, 2)
        mfcc_delta2 = psf.base.delta(mfcc_delta, 2)

        mfcc_all = np.concatenate((mfcc, mfcc_delta, mfcc_delta2), axis=1)
        print(type(mfcc_all), mfcc_all.shape)

        win_count = int((len(mfcc_all)-win_size)/win_offset)+1
        output = np.zeros(shape=(win_count, win_size, numcep*3))
        # print(output.shape, mfcc_feature.shape)
        for win in tqdm(range(win_count)):
            output[win] = mfcc_all[win_offset*win : win_offset*win+win_size]

        output = output.astype(np.float32)
        if feature_file:
            np.save(feature_file, output)
            
        print("MPCC extraction finished {}".format(output.shape))

        return output
    except Exception as e:
        print(e)
        return None

def audio_lpc_lazy(wav_file, feature_file=None, target_samplerate = 16000, max_duration = 120,num_worker = 8):
    # (rate, sig) = wav.read(wav_file)
    # print(rate)

    audio,frequency,duration = read_audio(wav_file,target_samplerate,max_duration)
    if audio is None:
        return None
    audio = torch.squeeze(audio,0)
    audio = audio.numpy()

    videorate = 30
    nw = int(frequency/videorate) # time
    inc = int(nw/2) # 2* overlap
    # assert type(inc) == int
    winfunc = np.hanning(nw)

    def enframe(signal, nw, inc, winfunc):
        """turn audio signal to frame.
        parameters:
        signal: original audio signal
        nw: length of each frame(audio samples = audio sample rate * time interval)
        inc: intervals of consecutive frames
        """
        signal_length=len(signal) #length of audio signal
        if signal_length<=nw:
            nf=1
        else: #otherwise, compute the length of audio frame
            nf=int(np.ceil((1.0*signal_length-nw+inc)/inc))
        pad_length=int((nf-1)*inc+nw) #length of flatten all frames
        zeros=np.zeros((pad_length-signal_length,)) #
        pad_signal=np.concatenate((signal,zeros)) #after padding
        #nf*nw matrix
        indices=np.tile(np.arange(0,nw),(nf,1))+np.tile(np.arange(0,nf*inc,inc),(nw,1)).T
        indices=np.array(indices,dtype=np.int32) #turn indices to frames
        frames=pad_signal[indices] #get frames
        win=np.tile(winfunc,(nf,1))  #window function
        return frames*win   #return frame matrix

    sig = signal.detrend(audio, type= 'constant')

    frame = enframe(sig, nw, inc, winfunc)
    assert len(frame) >= 64

    win_size = 64
    K = 32 # number of coefficients
    win_count = int((len(frame)-win_size)/2)+1 # len(frame) or frame.shape[0]
    lpc_feature = np.zeros(shape=(frame.shape[0], K))
    output = np.zeros(shape=(win_count, win_size, K))
    # print(output.shape, mfcc_feature.shape)

    pbar = tqdm(total=len(frame))
    pool = Pool(12)

    # for i in range(len(frame)):
    #     filt = lpc.nautocor(frame[i], order=K)
    #     lpc_feature[i] = filt.numerator[1:]
    #
    #     if i > 0 and i % 10 == 0:
    #         pbar.update(10)
    filt_coef = pool.map(lpc_K, tqdm(frame))
    
    # filt_coef = np.zeros((frame.shape[0],32))
    # for i ,f in enumerate(frame):
    #     cof = lpc_K(f,order=32)
    #     filt_coef[i,:] = cof
    
    lpc_feature[:] = filt_coef

    # pbar.close()
    print(type(lpc_feature), lpc_feature.shape)

    for win in range(win_count):
        output[win] = lpc_feature[2*win : 2*win+win_size]

    if feature_file:
        np.save(feature_file, output)
        
    print("LPC extraction finished {}".format(output.shape))

    return output

def lpc_K(frame, order=32):
    filt = lazy_lpc.nautocor(frame, order=order)
    return filt.numerator[1:] # List of coefficients

dll = None
def audio_lpc_dll(wav_file, feature_file=None, target_samplerate = 16000, max_duration = 120,num_worker = 8):
    # (rate, sig) = wav.read(wav_file)
    # print(rate)
    if dll is None:
        from ctypes import cdll
        project_dir = os.path.dirname(__file__)
        dll = cdll.LoadLibrary(os.path.join(project_dir, 'LPC.dll'))  # 加载 LPC.dll

    signal,rate,duration = read_audio(wav_file,target_samplerate,max_duration)
    if signal is None:
        return None

    signal = torch.squeeze(signal,0)
    signal = signal.numpy()
    
    frames_per_second = 30  # 视频fps
    chunks_length = 260  # 音频分割，520ms
    audio_frameNum = int(len(signal) / rate * frames_per_second)  # 计算音频对应的视频帧数
    print('audio_frameNum: ', audio_frameNum)

    # 前后各添加260ms音频
    a = np.zeros(chunks_length * rate // 1000, dtype=np.int16)
    signal = np.hstack((a, signal, a))

    # signal = signal / (2.**15)
    frames_step = 1000.0 / frames_per_second  # 视频每帧的时长间隔33.3333ms
    rate_kHz = int(rate / 1000)  # 采样率：48kHz

    # 分割音频
    audio_frames = [signal[int(i * frames_step * rate_kHz): 
                            int((i * frames_step + chunks_length * 2) * rate_kHz)]
                                for i in range(audio_frameNum)]
    inputData_array = np.zeros(shape=(1, 64, 32))  # 创建一个空3D数组，该数组(1*32*64)最后需要删除

    frameCount = len(audio_frames)
    for i in range(frameCount):
        print(f'{i} / {frameCount}')
        audio_frame = audio_frames[i]  # 每段音频，8320个采样点

        overlap_frames_apart = 0.008
        overlap = int(rate * overlap_frames_apart)  # 128 samples
        frameSize = int(rate * overlap_frames_apart * 2)  # 256 samples
        numberOfFrames = 64

        # initiate a 2D array with numberOfFrames rows and frame size columns
        frames = np.ndarray((numberOfFrames, frameSize))
        for k in range(0, numberOfFrames):
            for i in range(0, frameSize):
                if ((k * overlap + i) < len(audio_frame)):
                    frames[k][i] = audio_frame[k * overlap + i]
                else:
                    frames[k][i] = 0

        frames *= np.hanning(frameSize)
        frames_lpc_features = []

        # a = (c_double*frameSize)()
        b = (c_double * 32)()

        # linear predictive coding
        for k in range(0, numberOfFrames):
            # temp_list = frames[k]
            a = (c_double * frameSize)(*frames[k])
            # a = (c_double*len(frames[k]))()
            # b = (c_double*32)()
            # LPC(float *in, int size, int order, float *out)
            dll.LPC(pointer(a), frameSize, 32, pointer(b))  # linear predictive coding
            frames_lpc_features.append(list(b))

        image_temp1 = np.array(frames_lpc_features)  # list2array
        # image_temp2 = image_temp1.transpose()  # array转置
        image_temp3 = np.expand_dims(image_temp1, axis=0)  # 升维
        inputData_array = np.concatenate((inputData_array, image_temp3), axis=0)  # array拼接

    # 删除第一行
    inputData_array = inputData_array[1:]

    # #扩展为4维:(,32,64,1)
    # inputData_array = np.expand_dims(inputData_array, axis=3)

    if feature_file:
        np.save(feature_file, inputData_array)
        
    print("LPC extraction finished {}".format(inputData_array.shape))
    return inputData_array

import torch
from lpc import LPCCoefficients
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.enabled   = True
from librosa.core import lpc
import torchaudio

import torchlpc
def create_test_inputs(batch_size, samples):
    start_coeffs = [-0.9, 0.0]
    end_coeffs = [0.0, 1]

    A = (
        torch.stack(
            [torch.linspace(start_coeffs[i], end_coeffs[i], samples) for i in range(2)]
        )
        .T.unsqueeze(0)
        .double()
        .repeat(batch_size, 1, 1)
    )
    x = torch.randn(batch_size, samples).double()
    zi = torch.randn(batch_size, 2).double()
    return x, A, zi

def audio_lpc_torch(wav_file, feature_file=None, target_samplerate = 16000, max_duration = 120,num_worker = 8):
    
    data,sr,duration = read_audio(wav_file,target_samplerate,max_duration)
    if data is None:
        return None

    if not isinstance(data,torch.Tensor):
        data = torch.Tensor(data)
    else:
        data = torch.squeeze(data,0)

    frames_per_second = 30  # 视频fps
    audio_frameNum_all = int(len(data) / sr * frames_per_second)  # 计算音频对应的视频帧数
    frame_duration = 16 / 30 #frame 520ms
    chunks_length = frame_duration / 2
    frame_sample = int(frame_duration * sr)
    chunks_sample = int(chunks_length * sr)
    audio_frameNum = (int)((len(data) - frame_sample) / sr * frames_per_second)
    
    # a = torch.zeros((int)(chunks_length * sr), dtype=torch.float32)
    signal = data
    # signal = torch.hstack((a,data,a))
    # data = torch.unsqueeze(data,0)

    # frame_samples = (int)(frame_duration * sr)
    # frame_offset  = (int)(1 / 30 * sr)
    # data = data.unfold( -1, frame_samples, frame_offset)
    
    frames_step = 1000.0 / frames_per_second  # 视频每帧的时长间隔33.3333ms
    rate_kHz = int(sr / 1000)  # 采样率：48kHz
    
    # 分割音频
    data = [signal[int(i * frames_step * rate_kHz): 
                            int((i * frames_step * rate_kHz + chunks_sample * 2))]
                                for i in range(audio_frameNum)]
    # coffs = torch.zeros((data.shape[0],256,32))
    # for i in range(data.shape[0]): 
    #     f = data[i]
    #     f = f.unfold( -1, 256, 128)
    #     f *= torch.autograd.Variable( torch.hann_window( 256 ), requires_grad = False )
        
    #     x = torch.randn(f.shape[0], f.shape[1]).double()
    #     zi = torch.randn(f.shape[0], 2).double()
    #     coff = torchlpc.LPC.apply(A,x, zi)
    #     print(coff)
        
    
    # Load audio file
    # sr             = target_samplerate # 16 kHz
    # data_, sr_      = torchaudio.load( wav_file,normalize=False)
    # data_           = torchaudio.transforms.Resample( sr_, sr )( data )
    # duration_       = data.size( 1 ) / sr

   # Get audio sample worth of 512 ms
    # worth_duration = duration # 512 ms ( 256 ms before and 256 ms after )
    # worth_size     = int( np.floor( worth_duration * sr ) )
    X              = torch.stack(data)#[ :, :worth_size ]
    # X_duration     = X.size( 1 ) / sr
    # X              = torch.cat( [ X for i in range( 4 ) ] )
    hann_window = torch.hann_window(X.shape[1])
    X = X.clone() * hann_window
  
    # ====================== ME ====================================================
    # Divide in 64 2x overlapping frames
    # frame_duration = 1.0 / 30 # 16 ms
    # frame_overlap  = 0.5
    
    overlap_frames_apart = 0.008
    # frame_duration = 0.008 * 2
    # frame_overlap  = 0.5
    frame_len = 256
    frame_duration = frame_len / sr
    frame_overlap =  (X.shape[1] - frame_len)/ 63 / frame_len
    K              = 32
    lpc_prep       = LPCCoefficients(
        sr,
        frame_duration,
        frame_overlap,
        padded=False
    ).eval( ).cuda( )
    alphas         = lpc_prep( X.cuda( ) ).detach( ).cpu( )
    
    if torch.any(torch.isnan(alphas)) or torch.any(torch.isinf(alphas)):
        indices = torch.isinf(alphas)
        alphas[indices] = 0.0
        indices = torch.isnan(alphas)
        alphas[indices] = 0.0
        # indices = indices.nonzero()
        # print(indices)
        # print('LPC LPC extraction Error')
        # return None

    # Print details
    print( f'[Init]   [Audio]  sr: { sr }, duration: { duration }' )
    # print( f'[Init]   [Sample] size: { X.shape }, duration: { X_duration }' )
    print( f'[Me]     [Alphas] size: { alphas.shape }' )
     
    output = alphas.numpy()
     
    # win_size = 64
    # win_offset = 2
    # win_count = int((alphas.shape[1]-win_size)/win_offset)+1 # len(frame) or frame.shape[0]
    # output = np.zeros(shape=(win_count, win_size, K))

    # for win in range(win_count):
    #     output[win] = alphas[0,win_offset*win : win_offset*win+win_size]


    # # ====================== NOT ME ================================================
    # def librosa_lpc( X, order ):
    #     try:
    #         return lpc( X, order )
    #     except:
    #         res      = np.zeros( ( order + 1, ) )
    #         res[ 0 ] = 1.
    #         return res

    # frames  = lpc_prep.frames( X.cuda( ) )
    # frames  = frames[ 0 ].detach( ).cpu( ).numpy( )
    # _alphas = np.array( [ librosa_lpc( frames[ i ], K - 1 ) for i in range( frames.shape[ 0 ] ) ] )

    if feature_file:
        np.save(feature_file, output)
        
    print("LPC extraction finished {}".format(output.shape))
    return output


def audio_stft_torch(wav_file, feature_file=None, target_samplerate = 16000, max_duration = 120,num_worker = 8):
    
    data,sr,duration = read_audio(wav_file,target_samplerate,max_duration)
    if data is None:
        return None

    if not isinstance(data,torch.Tensor):
        data = torch.Tensor(data)
    else:
        data = torch.squeeze(data,0)
        

    frames_per_second = 30  # 视频fps
    audio_frameNum_all = int(len(data) / sr * frames_per_second)  # 计算音频对应的视频帧数
    frame_duration = 16 / 30 #frame 520ms
    chunks_length = frame_duration / 2
    frame_sample = int(frame_duration * sr)
    chunks_sample = int(chunks_length * sr)
    audio_frameNum = (int)((len(data) - frame_sample) / sr * frames_per_second)
    
    n_fft = 4096
    hop_length = math.floor(len(data) / audio_frameNum)
    frequency_audio = torch.stft(data,n_fft=n_fft,hop_length=hop_length, win_length=256,return_complex=True,onesided=True)
    # time_recover = torch.istft(frequency_audio,n_fft=n_fft,hop_length=hop_length)
    
    output = torch.transpose(frequency_audio,0,1)[:,:-1]
    output = torch.reshape(output,(-1,64,32))

    if feature_file:
        np.save(feature_file, output)
        
    print("stft extraction finished {}".format(output.shape))
    return output.numpy()
