from models_lstm import *
from models import *
from models_nvidia import *
import os
import surat
from audio_encode import *
from bs37ToARKit import ARKitBlendShapeLocation


# data path
projroot = os.path.join(os.path.dirname(__file__),'.')
# dataroot = os.path.join(projroot,'data/audio2bs')
dataroot = os.path.join(projroot,'data/beat_langs')
# data_path = os.path.join(dataroot, audio2bs)
data_path = dataroot
checkpoint_base_path = './checkpoint/'

#-------------------------------------All---------------------------------------
# dataset_type = 'HDTF-All'
# dataset_type = 'Beat-12-Zh'
# dataset_type = 'Beat-22-Zh'
dataset_type = 'Beat-1-En'

# model_name = 'lstmae-2dist'
# model_name = 'lstm-nvidia'
# model_name = 'audio2bs-nvidia'
model_name = 'audio2bs-surat'

# feature_type = 'lpc_lazy'
# feature_type = 'lpc_dll'
# feature_type = 'lpc_torch'
feature_type = 'mfcc_legacy'
# feature_type = 'mfcc_new'
# feature_type = 'lpc-bs37'
# feature_type = 'wav2vec_torch_en'
# feature_type = 'wav2vec_torch_cn'
# feature_type = 'stft_torch'

feature_path = os.path.join(dataroot, f'feature-{feature_type}-{dataset_type}/')
combine_path = os.path.join(dataroot, f'combine-{feature_type}-{dataset_type}/')
# combine_path = os.path.join(dataroot, f'combine-{feature_type}-beaten-1/')
bs_num = 51
bs_num = ARKitBlendShapeLocation.MouthUpperUpRight.value - ARKitBlendShapeLocation.JawForward.value + 1
 
infer_device = 'cpu'
# infer_device = 'cuda'
 
def create_audio_decoder():     
        
    start_videoframe = 0
    
    end_videoframe = 0
    
    if feature_type == 'lpc-bs37':
        audio_decoder = audio_lpc_torch
        bs_num = 37
    elif feature_type == 'lpc_torch':
        audio_decoder = audio_lpc_torch
        start_videoframe = 8
        end_videoframe = 8
    elif feature_type == 'lpc_lazy':
        audio_decoder = audio_lpc_lazy
    elif feature_type == 'lpc_dll':
        audio_decoder = audio_lpc_dll
    elif feature_type == 'mfcc_legacy':
        audio_decoder = audio_mfcc_legacy
        start_videoframe = 16
        end_videoframe = 16
    elif feature_type == 'mfcc_new':
        audio_decoder = audio_mfcc_new
        start_videoframe = 16
        end_videoframe = 16
    elif feature_type == 'wav2vec_torch_en':
        try:
            from audio_encode_adv import audio_wav2vec_torch,init_et_model
            init_et_model('en',infer_device)
            audio_decoder = audio_wav2vec_torch
        except Exception as e:
            print(e)
            audio_decoder = None
    elif feature_type == 'wav2vec_torch_cn':
        try:
            from audio_encode_adv import audio_wav2vec_torch,init_et_model
            init_et_model('cn',infer_device)
            audio_decoder = audio_wav2vec_torch
        except Exception as e:
            print(e)
            audio_decoder = None
            
    elif feature_type == 'stft_torch':
        audio_decoder = audio_stft_torch
        

    return audio_decoder,start_videoframe,end_videoframe





#-------------------------------------Preprocess---------------------------------------
# src_dataset_path = 'D:/Beat/beat_english_v0.2.1/1'
# src_dataset_path = 'E:/GoogleDrive/.shortcut-targets-by-id/1CVyJOp3G_A9l1N_CsKdHgXQfB4pXhG8c/BEAT'
# src_dataset_path = 'D:/Beat/BEAT/12'
# src_dataset_path = 'D:/Beat/BEAT/22'
src_dataset_path = 'D:/Beat/beat_english_v0.2.1/1'
# src_dataset_path = 'D:/Beat/3D-ETF/3D-ETF/all'



max_duration = 12000
max_wav_count = 20
copmute_feature = True
need_shuffle = False
# lang='zh'
lang='all'

#-------------------------------------Train---------------------------------------
# hyper-parameters
learning_rate = 0.001
learning_rate_scale = 0.9975
batch_size = 2048
epochs = 200
eval_step = 1
save_step = 100

print_freq = 20
best_loss = 10000000

max_train_data_len = 0
max_val_data_len = 64

preprocess_train = False
motion_loss = True
resume_train = False

checkpoint_ephoch = 'model_best'
checkpoint_ephoch = 'checkpoint-epoch200'

labelType = 'raw'
if preprocess_train:
    labelType = 'preprocessed'
    

lossType = 'losss'
if motion_loss:
    lossType = 'losssm'

unityBSDelay = 0.0
if 'mfcc' in feature_type:
    unityBSDelay = 6.0 / 30 * 2
elif 'wav2vec_torch' in feature_type:
    unityBSDelay = 1.0 / 30 * 2

ckp_rel = os.path.join(model_name ,feature_type, dataset_type, labelType,lossType, checkpoint_ephoch + '.pth.tar')
ckp = os.path.join(projroot,checkpoint_base_path , ckp_rel)

bs_scale = 1.0

postprocess_synthesis=False
if preprocess_train:
    postprocess_synthesis = False

    
def create_model():
    if model_name == 'lstmae-2dist':        
        if str.startswith(feature_type,'lpc'):
            model = LSTMAE2dist(is_concat=True,num_features=32,num_blendshapes=bs_num) 
        elif str.startswith(feature_type,'mfcc'):
            model = LSTMAE2dist(is_concat=True,num_features=39,num_blendshapes=bs_num) 
        else:
            model = LSTMAE2dist(is_concat=True,num_features=32,num_blendshapes=bs_num) 
        lossfunc = torch.nn.SmoothL1Loss()
    elif model_name == 'lstm-nvidia':
        model = LSTMNvidiaNet(num_blendshapes=bs_num)
        
        import surat
        lossfunc = surat.loss
    elif model_name == 'audio2bs-nvidia':
        if str.startswith(feature_type,'mfcc'):
            model = Audio2Face(bs_num,0.5,mfcc=True)
            lossfunc = losses
        else:
            model = Audio2Face(bs_num,0.5,mfcc=False)            
            lossfunc = losses
            
    elif model_name == 'audio2bs-surat':   
            import surat
            model = surat.Model(outNum=bs_num,moodSize=16,feature_type=feature_type)
            lossfunc = surat.loss    
            
    model.to(torch.device(infer_device))
    return model,lossfunc
