# coding: utf-8
# Author：WangTianRui
# Date ：2022/3/5 15:53
import sys, os

# sys.path.append(os.path.abspath(os.path.join(os.getcwd(), "../")))
import torch
import librosa
import copy
import numpy as np
import soundfile as sf
import ResNetSE34V2 as ResNetSE34V2
from multiprocessing import Process
from pathlib import Path
from tqdm import tqdm

def get_all_wavs(root):
    files = []
    # for p in Path(root).iterdir():
    #     if str(p).endswith(".wav"):
    #         files.append(str(p))
    #     for s in p.rglob('*.wav'):
    #         files.append(str(s))
    
    for p in Path(root).iterdir():
        if str(p).endswith(".flac"):
            files.append(str(p))
        for s in p.rglob('*.flac'):
            files.append(str(s))
            
    return list(set(files))


def get_embedding(pt_path, pathes, root, save_root):
    embedder_pt = torch.load(pt_path, map_location="cpu")
    embedder = ResNetSE34V2.ResNetSE()
    for key in list(embedder_pt.keys()):
        if str(key).startswith('__S__'):
            embedder_pt[key.replace("__S__.", "")] = embedder_pt[key]
            embedder_pt.pop(key, '404')
        if str(key).startswith('__L__'):
            embedder_pt.pop(key, '404')
    embedder.load_state_dict(embedder_pt)
    embedder.eval()
    model = embedder.cuda()
    for wav_path in tqdm(pathes):
        
        
        if os.path.exists(wav_path.replace(root, save_root) + ".npy"):
            print(wav_path.replace(root, save_root) + ".npy   exits")
            continue
        
        sig, sr = sf.read(wav_path)
        if sr != 16000:
            sig = librosa.resample(sig, orig_sr=sr, target_sr=16000).astype(np.float32)
        with torch.no_grad():
            embedding = model(torch.tensor([np.array(sig, dtype=np.float32)]).cuda())[0].cpu().numpy()
        
        
        speaker=wav_path.split("/")[-2]
        if not os.path.exists(os.path.join(save_root,speaker)):
            os.mkdir(os.path.join(save_root,speaker))
        save_path=wav_path.replace(root, save_root)
        print("embedding",embedding.shape)
        np.save(save_path, embedding)

def load_model():
    pt_path = "/home/wang/codes/py/VC/Model_ResNetSE34/baseline_v2_ap.model"
    embedder_pt = torch.load(pt_path, map_location="cpu")
    embedder = ResNetSE34V2.ResNetSE()
    for key in list(embedder_pt.keys()):
        if str(key).startswith('__S__'):
            embedder_pt[key.replace("__S__.", "")] = embedder_pt[key]
            embedder_pt.pop(key, '404')
        if str(key).startswith('__L__'):
            embedder_pt.pop(key, '404')
    embedder.load_state_dict(embedder_pt)
    embedder.eval()
    # model = embedder.cuda()
    model = embedder
    
    return model

def get_embedding_model(mel,model):

    embedding = model(mel)
    
    return embedding
        

if __name__ == '__main__':
    
    # root = r"/home/wang/datasets/DNS-Challenge/DNS-Challenge/datasets/clean/"
    # save_root = r"/home/wang/datasets/pdns_test/allclean_reader_embedding_resnetse34v2_2/"
    
    root=r"/opt/data/private/VC/git/test_wav/"
    root = r"/home/wang/codes/py/VC/VCTK/Dataset/VCTK-Corpus/wav48_silence_trimmed/"
    save_root = r"/home/wang/codes/py/VC/VCTK/Speaker_embedding/"

    all_pathes = get_all_wavs(root)
    print(len(all_pathes))
    slicen_len = len(all_pathes) // 4
    threads = []
    for thread_index in range(4):
        threads.append(Process(target=get_embedding,
                               args=(r"/home/wang/codes/py/VC/Model_ResNetSE34/baseline_v2_ap.model",
                                     all_pathes[thread_index * slicen_len:(thread_index + 1) * slicen_len],
                                     root, save_root,)))
    for thread in threads:
        thread.start()
    for thread in threads:
        thread.join()
