import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__),'..'))
import torch
import torch.autograd as autograd
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim

import shutil
import time
from datetime import datetime

from dataset import WavBlendshapeDataset,DataPreprocessor,GetWaveInfo
from models import NvidiaNet, LSTMNvidiaNet, FullyLSTM
# from models_testae import *
from global_config import *
from preprocess import bs_preprocess
from audio2face import *
from txt2json import *
from scipy.signal import savgol_filter
from train import *

from pipeline.utils.bs61 import *
from pipeline.utils.audio.processing.audio_processing import *
# gpu setting
gpu_id = 0
os.environ['CUDA_VISIBLE_DEVICES'] = str(gpu_id)

def mainV2():  
    global model
    global infer_device
    
    files = []
    files.append('woman')
    files.append('recordClip')
    # files.append('1_wayne_0_1_1')
    # files.append('11_nidal_0_100_100')
    # files.append('12_zhao_2_57_64')
    # files.append('22_luqi_0_88_88')
    # files.append('22_luqi_2_87_94')
    # files.append('WRA_JoniErnst0_000')
    # files.append('WRA_JoniErnst1_000')
    n_bs = ARKitBlendShapeLocation.MouthUpperUpRight.value - ARKitBlendShapeLocation.JawForward.value + 1
    # model = Audio2BS(n_bs)
    
    
    checkpoint = torch.load(ckp)
    model.load_state_dict(checkpoint['state_dict'])
    model.to(infer_device)

    for fn in files:    
        start_time = time.time()     
               
        wavpath = os.path.join(projroot,f'data/test/{fn}.wav')
        outpath = os.path.join(projroot,f'data/pipeline/synthesis/{fn}_{checkpoint_name}.json')
        GetWaveInfo(wavpath)
        
        waveform, sr = torchaudio.load(wavpath)
        wavedata = waveform[0]
        l_pad_samples = int((sr * win_length / 2))
        n_pad_samples = int(sr * win_length / 2)
        pad_wavedata = torch.concatenate([torch.zeros(l_pad_samples), wavedata, torch.zeros(2 * n_pad_samples)])
        
        duration = wavedata.shape[0] / sr
        
        bs_array = []
        batch_size = 1024
        
        input_array = None
        output_array = None
        total_frame_count = (int)(duration * fps)
        for idx in range(total_frame_count):   
            start = idx * sr // fps
            end = start + n_pad_samples * 2
            # check if the audio is long enough
            if end > len(pad_wavedata):
                print(f"Audio is not long enough to get fragment: {end} > {len(pad_wavedata)}")
                break
            input = pad_wavedata[start:end]
            input = torch.unsqueeze(input,0)
            
            if input_array == None:
                input_array = input
            else:
                input_array = torch.concatenate((input_array,input),0)
            
            if input_array.shape[0] == batch_size or idx == total_frame_count - 1:     
                input_array = input_array.to(infer_device)       
                output = model(input_array)
                input_array = None
            
                
                if output_array == None:
                    output_array = output
                else:
                    output_array = torch.concatenate((output_array,output),0)
                 
            
        bs = output_array.detach().cpu().numpy()
            
        smooth = True
        if smooth:
            win = 9; polyorder = 2
            for i in range(27):
                power = bs[:,i]
                power_smooth = savgol_filter(power, win, polyorder, mode='nearest')
                bs[:, i] = power_smooth
        
        output_51 = np.zeros([bs.shape[0],51])
        output_51[:,ARKitBlendShapeLocation.JawForward.value - 1 : ARKitBlendShapeLocation.MouthUpperUpRight.value] = bs
            
        # output_51 = bs_postprocess(output_51)
            
        bsJson = tensorToUnityBSJson(output_51,1.2,0.0)
        
        jstr = json.dumps(asdict(bsJson))   


        f2 = open(outpath, 'w')
        f2.write(jstr)
        f2.close()
        
        past_time = time.time() - start_time
        print(f"New Pipeline Infer in {past_time:.4f} sec! filename: {wavpath} save to: {outpath}")


from pipeline.utils.audio.extraction.extract_features import extract_audio_features
def mainV3():  
    global model
    global infer_device
    
    files = []
    # files.append('woman')
    # files.append('recordClip')
    files.append('1_wayne_0_1_1')
    # files.append('11_nidal_0_100_100')
    # files.append('12_zhao_2_57_64')
    # files.append('22_luqi_0_88_88')
    # files.append('22_luqi_2_87_94')
    # files.append('WRA_JoniErnst0_000')
    # files.append('WRA_JoniErnst1_000')
    n_bs = ARKitBlendShapeLocation.MouthUpperUpRight.value - ARKitBlendShapeLocation.JawForward.value + 1
    # model = Audio2BS(n_bs)
    
    fps = 60
    
    checkpoint = torch.load(ckp)
    model.load_state_dict(checkpoint['state_dict'])
    model.to(infer_device)

    for fn in files:    
        start_time = time.time()     
               
        wavpath = os.path.join(projroot,f'data/test/{fn}.wav')
        outpath = os.path.join(projroot,f'data/pipeline/synthesis/{fn}_{checkpoint_name}.json')
          
        audio_features, _ = extract_audio_features(wavpath)
        audio_features = audio_features.astype(np.float32)
        
        
        frame_length = 128
        num_features = audio_features.shape[1]
        num_frames = audio_features.shape[0]
        overlap = 32
        
        start_idx = 0
        all_decoded_outputs = []
        while start_idx < num_frames:
            end_idx = min(start_idx + frame_length, num_frames)

            # Select and pad chunk if needed
            audio_chunk = audio_features[start_idx:end_idx]
            audio_chunk = pad_audio_chunk(audio_chunk, frame_length, num_features)
            audio_chunk = torch.unsqueeze(torch.tensor(audio_chunk),0).to(infer_device)

            # 🔥 Pass config to dynamically choose precision
            decoded_outputs = model(audio_chunk)
            decoded_outputs = torch.squeeze(decoded_outputs,0)
            decoded_outputs = decoded_outputs[:end_idx - start_idx].detach().cpu().numpy()

            # Blend with the last chunk if it exists
            if all_decoded_outputs:
                last_chunk = all_decoded_outputs.pop()
                blended_chunk = blend_chunks(last_chunk, decoded_outputs, overlap)
                all_decoded_outputs.append(blended_chunk)
            else:
                all_decoded_outputs.append(decoded_outputs)

            # Move start index forward by (frame_length - overlap)
            start_idx += frame_length - overlap
                 
            
        # bs61 = output_array.detach().cpu().numpy()
        # bs52 = np.zeros([bs61.shape[0],bs61.shape[1],52])
        # for i in range(0,61):
        #     if i in mapping_61_to_52:
        #         bs52[:,:,map_61_to_52(i)] = bs61[:,:,i]
        
        output_array = all_decoded_outputs[0].reshape(-1,27)
           
        output_51 = np.zeros([output_array.shape[0],51])
        output_51[:,ARKitBlendShapeLocation.JawForward.value - 1 : ARKitBlendShapeLocation.MouthUpperUpRight.value] = output_array
            
    
            
        # smooth = True
        # if smooth:
        #     win = 9; polyorder = 2
        #     for i in range(52):
        #         power = bs52[:,i]
        #         power_smooth = savgol_filter(power, win, polyorder, mode='nearest')
        #         bs52[:, i] = power_smooth
        
        # output_51[:,ARKitBlendShapeLocation.JawForward.value - 1 : ARKitBlendShapeLocation.MouthUpperUpRight.value] = bs
     
        # output_51 = bs_postprocess(output_51)
            
        bsJson = tensorToUnityBSJson(output_51,1.8,0.0)
        
        jstr = json.dumps(asdict(bsJson))   


        f2 = open(outpath, 'w')
        f2.write(jstr)
        f2.close()
        
        past_time = time.time() - start_time
        print(f"New Pipeline Infer in {past_time:.4f} sec! filename: {wavpath} save to: {outpath}")

if __name__ == '__main__':
    # mainV2()
    mainV3()
