import python_speech_features as psf
import numpy as np
# import scipy.signal as signal
import scipy.io.wavfile as wav
import os
from tqdm import tqdm
import json
import random
import librosa
import wave
import soundfile as sf
import PyWave
from audiolazy import lpc as lazy_lpc
import scipy.signal as signal
import scipy.io.wavfile as wavfile
from multiprocessing import Pool
import math
import audio_encode
import torch
import time

from audio_encode import read_audio
from etmodel import EmoTalk
import os

gEtmodel = None 

def init_et_model(lang,device):
    global gEtmodel
    gEtmodel = EmoTalk(lang,device)
    
    
    model_path = "./checkpoint/et/EmoTalk.pth"
    if(os.path.exists(model_path)):
        gEtmodel.load_state_dict(torch.load(model_path, map_location=torch.device(device)), strict=False)
    gEtmodel = gEtmodel.to(torch.device(device))
    gEtmodel.eval()

def audio_wav2vec_torch(wav_file, feature_file=None, target_samplerate = 16000, max_duration = 120,num_worker = 8):
           
    global gEtmodel
    global infer_device
    data,sr,duration = read_audio(wav_file,target_samplerate,max_duration)
    if data is None:
        return None
    
    if not isinstance(data,torch.Tensor):
        data = torch.Tensor(data)
        data = torch.unsqueeze(data,0)
    else:
        data = torch.squeeze(data,0)

    frame_num11 = (int)(np.ceil(data.shape[1] / sr * 30))
    
    totalSampleCount = data.shape[1]
    stepSampleCount = 30 * sr
    if totalSampleCount % stepSampleCount > 0:
        stepCount = (int)(totalSampleCount / stepSampleCount) + 1
    else:
        stepCount = (int)(totalSampleCount / stepSampleCount)
    
    output = None
    for step in range(stepCount):
        
        start_time = time.time()
        
        startPos = step * stepSampleCount
        endPos = min((step + 1) * stepSampleCount, totalSampleCount)
        
        stepData = data[:,startPos:endPos]
        frameLen = (int)(np.ceil((endPos - startPos) / sr * 30))
    
        inputs12 = gEtmodel.processor(torch.squeeze(stepData), sampling_rate=sr, return_tensors="pt",
                                    padding="longest").input_values.to(gEtmodel.device)
        hidden_states_cont1 = gEtmodel.audio_encoder_cont(inputs12, frame_num=frameLen).last_hidden_state
        hidden_states_cont1 = torch.squeeze(hidden_states_cont1,0)  
           
        #don't use param in etmodel
        use_et_map = False
        if use_et_map:       
            hidden_states_cont1_mapped = gEtmodel.audio_feature_map_cont(hidden_states_cont1)        
            hidden_states_cont1 = torch.cat([hidden_states_cont1,hidden_states_cont1_mapped],dim=1)
            stepData = hidden_states_cont1.resize( hidden_states_cont1.shape[0], 48, 32)
        else:
            stepData = hidden_states_cont1.resize( hidden_states_cont1.shape[0], 32, 32)

        stepDataNp = stepData.detach().cpu().numpy()
        
        if output is None:
            output = stepDataNp
        else:
            output = np.concatenate([output,stepDataNp],axis=0)
            
        past_time = time.time() - start_time
        print(f"step:{step} takes {past_time:.4f} sec!")
    
    if feature_file:
        np.save(feature_file, output)
        
    return output

