import csv
import os
import utils
import json
from tqdm import tqdm
import numpy as np



def get_wav_feature(config,filepath):
    BASE_DIR = config.BASE_DIR
    opensmile_config=config.opensmile_config
    #for opensmile_config in opensmile_config_list:
    single_feat_path = os.path.join(BASE_DIR, config.feature_path, 'single_feature.csv')
    opensmile_config_path = os.path.join(config.opensmile_path, 'config/gemaps', opensmile_config + '.conf')
    cmd = 'cd ' + config.opensmile_path + ' && SMILExtract -C ' + opensmile_config_path + ' -I ' + filepath + ' -O ' + single_feat_path + ' -l 0 '
    os.system(cmd)
    reader = csv.reader(open(single_feat_path, 'r'))
    rows = [row for row in reader]
    feature_vector = rows[-1][1:-1]
    feature_vector = trans_num_type(feature_vector)
    os.remove(single_feat_path)
    return feature_vector

def process(config):
    #"opensmile_features_casia.json"
    json_path = config.opensmile_feature_json_path
    data_path = config.data_path
    feature_dic={}
    for file in tqdm(os.listdir(data_path)):
        file_path=os.path.join(data_path,file)
        a=get_wav_feature(config,file_path)
        feature_dic[file]=a
    with open(json_path, "w") as f:
        json.dump(feature_dic, f)


def trans_num_type(list):
    a=[]
    for i in list:
        a.append(float(i))
    return a

def all_path(dirname):
    result = []
    for maindir, subdir, file_name_list in os.walk(dirname):
        for filename in file_name_list:
            if filename[0]!=".":
                apath = os.path.join(maindir, filename)
                result.append(apath)
    return result

def get_LLDs_dataset_casia(config):
    #file_name="opensmile_features_casia.json"
    #feature_type="IS13_ComParE"
    if config.require_extract:
        process(config)
    file_name=config.opensmile_feature_json_path
    with open(file_name, 'r', encoding='UTF-8') as f:
        load_dict = json.load(f)
    embeddings = []
    label = []
    for k, v in load_dict.items():
        embeddings.append(v)
        label.append(int(k[-5]))

    return embeddings,label

def get_audio_list(data_path):

    audio_list=[]
    for file in os.listdir(data_path):
        a=os.path.splitext(file)
        if a[1]==".wav":
            audio_list.append(os.path.join(data_path,file))
    return audio_list

def get_audio_list(data_path):
    audio_list=[]
    for file in os.listdir(data_path):
        a=os.path.splitext(file)
        if a[1]==".wav":
            audio_list.append(os.path.join(data_path,file))
    return audio_list


if __name__ == '__main__':
    config_file = 'configs/LLD_config.yaml'
    config = utils.parse_opt(config_file)
    # a,b=get_LLDs_dataset_casia(config)
    # process(json_path,config)
    # a,b=get_LLDs_dataset_casia("casia.json")
    #a=(get_wav_feature(config,"/Users/leslie/PycharmProjects/paper_audio/11.wav"))
    import numpy as np
    process_dir="/Users/leslie/Data/paper_audio/processed"
    aa=[]
    features_list=[]
    for dir in os.listdir(process_dir):
        a=get_audio_list(os.path.join(process_dir,dir))
        aa.append(a)
    from tqdm import tqdm
    for wav in tqdm(aa):
        feature_list=[]
        for file in wav:
            feature_list.append(get_wav_feature(config,file))
        ff=np.array(feature_list)
        ff_r=np.resize(ff, (512, 88))  #padding recurrent
        #ff_0=np.pad(ff, ((0, 512 - len(ff)), (0, 0))) #padding 0
        features_list.append(ff_r)
    features_all=d=np.stack(features_list,axis=0)
    np.save('features_all.npy', features_all)
    features_all1=np.load('features_all.npy')
    print(features_all1.shape)




