
import torch

import sys 
sys.path.insert(0, "./VoiceSplit/")
sys.path.insert(0, "./GE2E-Speaker-Encoder/")

from encoder import inference as encoder
from encoder.params_model import model_embedding_size as speaker_embedding_size
from pathlib import Path


from utils.audio_processor import WrapperAudioProcessor as AudioProcessor 
from utils.generic_utils import load_config
import librosa
import os
import numpy as np
import torch
from glob import glob
from tqdm import tqdm
import torch

from models.voicefilter.model import VoiceFilter
from models.voicesplit.model import VoiceSplit

from utils.generic_utils import load_config, load_config_from_str

print("Preparing the encoder, the synthesizer and the vocoder...")
encoder.load_model(Path('encoder/saved_models/pretrained.pt'))
print("Testing your configuration with small inputs.")
print("\tTesting the encoder...")

wav = np.zeros(encoder.sampling_rate)    
embed = encoder.embed_utterance(wav)
print(embed.shape)


def get_embedding(encoder, ap, wave_file_path):
    preprocessed_wav = encoder.preprocess_wav(wave_file_path)
    file_embedding = encoder.embed_utterance(preprocessed_wav)
    return torch.from_numpy(file_embedding.reshape(-1))

# Paths
checkpoint_path = 'best_checkpoint.pt'
# load checkpoint 
checkpoint = torch.load(checkpoint_path, map_location='cuda:0')
# load config from checkpoint
c = load_config_from_str(checkpoint['config_str'])

ap = AudioProcessor(c.audio) # create AudioProcessor for model
model_name = c.model_name
cuda = True

# load model
if(model_name == 'voicefilter'):
    print('inicializado com voicefilter')
    model = VoiceFilter(c)
elif(model_name == 'voicesplit'):
    model = VoiceSplit(c)
else:
    raise Exception(" The model '"+model_name+"' is not suported")

if c.train_config['optimizer'] == 'adam':
    optimizer = torch.optim.Adam(model.parameters(),
                                    lr=c.train_config['learning_rate'])
else:
    raise Exception("The %s  not is a optimizer supported" % c.train['optimizer'])

model.load_state_dict(checkpoint['model'])


optimizer.load_state_dict(checkpoint['optimizer'])
step = checkpoint['step']

print("load model form Step:", step)
# convert model from cuda
if cuda:
    model = model.cuda()

print(model)







# utils for plot spectrogram
import matplotlib
matplotlib.use('Agg')
import matplotlib.pylab as plt
import numpy as np
import imageio
def fig2np(fig):
    data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep='')
    data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,))
    return data

def plot_spectrogram_to_numpy(spectrogram):
    fig, ax = plt.subplots(figsize=(12, 3))
    im = ax.imshow(spectrogram, aspect='auto', origin='lower',
                    interpolation='none')
    plt.colorbar(im, ax=ax)
    plt.xlabel('Frames')
    plt.ylabel('Channels')
    plt.tight_layout()

    fig.canvas.draw()
    data = fig2np(fig)
    plt.close()
    return data

def save_spec(path, spec):
    data = plot_spectrogram_to_numpy(spec)
    imageio.imwrite(path, data)


# -----------------------------------------------------------------------------


# extract caracteristics
def normalise_and_extract_features(encoder, ap, mixed_path, emb_ref_id):
    mixed_path_norm = mixed_path.replace('.wav','-norm.wav')
    # normalise wavs
    os.system('ffmpeg-normalize '+mixed_path+' -ar 16000 -o '+mixed_path_norm+' -f')
    

    # load wavs
    mixed_wav = ap.load_wav(mixed_path_norm)
    

    # normalise wavs
    norm_factor = np.max(np.abs(mixed_wav)) * 1.1
    mixed_wav = mixed_wav/norm_factor
    

    # save embedding ref
    
    # save this is necessary for demo
    librosa.output.write_wav(mixed_path_norm, mixed_wav, 16000)
    emb_root_path = os.path.join('./test/clean_audio', emb_ref_id)
    emb_audio_list = os.listdir(emb_root_path)
    is_first = True
    emb_num = 0
    for emb_audio in emb_audio_list:
        if emb_audio[-9:] == '-norm.wav':
            continue
        else:
            emb_num += 1
            emb_ref_path = os.path.join(emb_root_path, emb_audio)
            emb_ref_path_norm = emb_ref_path.replace('.wav','-norm.wav')
            os.system('ffmpeg-normalize '+emb_ref_path+' -ar 16000 -o '+emb_ref_path_norm+' -f')
            emb_wav = ap.load_wav(emb_ref_path_norm)
            emb_wav = emb_wav/norm_factor
            librosa.output.write_wav(emb_ref_path_norm, emb_wav, 16000)
            if is_first:
                embedding = get_embedding(encoder, ap, emb_ref_path_norm)
                is_first = False
            else:
                embedding += get_embedding(encoder, ap, emb_ref_path_norm)
    embedding /= emb_num
    mixed_spec, mixed_phase = ap.get_spec_from_audio(mixed_wav, return_phase=True)
    return embedding, mixed_spec, mixed_phase, mixed_wav, emb_wav

def predict(encoder, ap, mixed_path, emb_ref_id, outpath='predict.wav', save_img=False):
    embedding, mixed_spec, mixed_phase, mixed_wav, emb_wav = normalise_and_extract_features(encoder, ap, mixed_path, emb_ref_id)
    # use the model
    mixed_spec = torch.from_numpy(mixed_spec).float()

    # append 1 dimension on mixed, its need because the model spected batch
    mixed_spec = mixed_spec.unsqueeze(0)
    embedding = embedding.unsqueeze(0)

    if cuda:
        embedding = embedding.cuda()
        mixed_spec = mixed_spec.cuda()

    mask = model(mixed_spec, embedding)
    output = mixed_spec * mask

    # inverse spectogram to wav
    est_mag = output[0].cpu().detach().numpy()
    mixed_spec = mixed_spec[0].cpu().detach().numpy()
    # use phase from mixed wav for reconstruct the wave
    est_wav = ap.inv_spectrogram(est_mag, phase=mixed_phase)

    librosa.output.write_wav(outpath, est_wav, 16000)
    if save_img:
        img_path = outpath.replace('predict', 'images').replace(' ', '').replace('.wav','-est.png')
        save_spec(img_path, est_mag)
        img_path = outpath.replace('predict', 'images').replace(' ', '').replace('.wav','-mixed.png')
        save_spec(img_path, mixed_spec)
        

    return est_wav, mixed_wav, emb_wav


import pandas as pd
from mir_eval.separation import bss_eval_sources
import numpy as np
# create output path
os.makedirs('test1/predict/',exist_ok=True)
os.makedirs('test1/images/',exist_ok=True)

test_csv = pd.read_csv('data/test_demo.csv', sep=',').values

for noise_utterance, emb_utterance_id, clean_utterance, clean_utterance2 in test_csv:
    print(noise_utterance)
    noise_utterance = os.path.join('test1',noise_utterance).replace(' ', '')
    # emb_utterance = os.path.join('test',emb_utterance).replace(' ', '')
    clean_utterance = os.path.join('test1',clean_utterance).replace(' ', '')
    clean_utterance2 = os.path.join('test1',clean_utterance2).replace(' ', '')
    output_path = noise_utterance.replace('result', 'predict').replace(' ', '')
    est_wav, mixed_wav, emb_wav = predict(encoder, ap, noise_utterance, emb_utterance_id, outpath=output_path, save_img=True)

    len_est = len(est_wav)
    len_mixed = len(mixed_wav)
    if len_est > len_mixed:
        # mixed need is biggest 
        est_wav = est_wav[:len_mixed]
    else:
        # if mixed is biggest than estimation wav we need pad with zeros because is expected that this part is silence
        est_wav = np.pad(est_wav, (0, len(mixed_wav)-len(est_wav)), 'constant', constant_values=(0, 0))

    # just save the wav

    librosa.output.write_wav(clean_utterance, est_wav, 16000)

    '''
    Predicted Audio is est_wav
    Input Audio is mixed_wav
    '''
