
import os
import sys
sys.path.append('../')
import json
import torch
import numpy as np
from ..speechbrainlib.speechbrain.pretrained import EncoderClassifier
import time
from tqdm import tqdm

class AISSpeakerRecognition(EncoderClassifier):
    """A ready-to-use model for speaker recognition. It can be used to
    perform speaker verification with verify_batch().

    ```
    Example
    -------
    >>> import torchaudio
    >>> from speechbrain.pretrained import SpeakerRecognition
    >>> # Model is downloaded from the speechbrain HuggingFace repo
    >>> tmpdir = getfixture("tmpdir")
    >>> verification = SpeakerRecognition.from_hparams(
    ...     source="speechbrain/spkrec-ecapa-voxceleb",
    ...     savedir=tmpdir,
    ... )

    >>> # Perform verification
    >>> signal, fs = torchaudio.load("samples/audio_samples/example1.wav")
    >>> signal2, fs = torchaudio.load("samples/audio_samples/example2.flac")
    >>> score, prediction = verification.verify_batch(signal, signal2)
    """

    MODULES_NEEDED = [
        "compute_features",
        "mean_var_norm",
        "embedding_model",
        "mean_var_norm_emb",
    ]

    
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.similarity = torch.nn.CosineSimilarity(dim=-1, eps=1e-8)
        self.feature = []

    
    def verify_batch(
        self, wavs1, wavs2, wav1_lens=None, wav2_lens=None, threshold=0.25
    ):
        """Performs speaker verification with cosine distance.

        It returns the score and the decision (0 different speakers,
        1 same speakers).

        Arguments
        ---------
        wavs1 : Torch.Tensor
                Tensor containing the speech waveform1 (batch, time).
                Make sure the sample rate is fs=16000 Hz.
        wavs2 : Torch.Tensor
                Tensor containing the speech waveform2 (batch, time).
                Make sure the sample rate is fs=16000 Hz.
        wav1_lens: Torch.Tensor
                Tensor containing the relative length for each sentence
                in the length (e.g., [0.8 0.6 1.0])
        wav2_lens: Torch.Tensor
                Tensor containing the relative length for each sentence
                in the length (e.g., [0.8 0.6 1.0])
        threshold: Float
                Threshold applied to the cosine distance to decide if the
                speaker is different (0) or the same (1).

        Returns
        -------
        score
            The score associated to the binary verification output
            (cosine distance).
        prediction
            The prediction is 1 if the two signals in input are from the same
            speaker and 0 otherwise.
        """
        emb1 = self.encode_batch(wavs1, wav1_lens, normalize=True)
        emb2 = self.encode_batch(wavs2, wav2_lens, normalize=True)
        score = self.similarity(emb1, emb2)
        return score, score > threshold

    
    def verify_files(self, path_x, path_y, threshold=0.25):
        """Speaker verification with cosine distance

        Returns the score and the decision (0 different speakers,
        1 same speakers).

        Returns
        -------
        score
            The score associated to the binary verification output
            (cosine distance).
        prediction
            The prediction is 1 if the two signals in input are from the same
            speaker and 0 otherwise.
        """
        waveform_x = self.load_audio(path_x)
        waveform_y = self.load_audio(path_y)
        # Fake batches:
        batch_x = waveform_x.unsqueeze(0)
        batch_y = waveform_y.unsqueeze(0)

        # Verify:
        score, decision = self.verify_batch(
            batch_x, batch_y, threshold=threshold
        )
        # Squeeze:
        return score[0], decision[0]

    
    def get_audio_feature(self, audio_path, id_feature):
        '''
        Args:
            @audio_path: audio will be getted feature
            @id_feature: feature of specific speaker
        Return:
            @id_feature: feature of specific speaker, add this audio's feature

            get feature from the audio
            add the feature into specific speaker's feature list
        '''

        waveform_audio = self.load_audio(audio_path)
        batch_audio = waveform_audio.unsqueeze(0)
        emb_audio = self.encode_batch(batch_audio, None, normalize=True)
        
        if id_feature != None:
            id_feature = torch.cat((id_feature, emb_audio), 0)
        else:
            id_feature = emb_audio
        
        return id_feature


    def get_id_feature(self, audio_dir):
        '''
        Args:
            @audio_dir: audios of a specific speaker
        Return:
            @id_feature: feature list of specific speaker

            get feature from the all audios in train set
        '''

        id_feature = None
        audio_list = os.listdir(audio_dir)
        
        for audio_file in audio_list:
            audio_path = os.path.join(audio_dir, audio_file)
            id_feature = self.get_audio_feature(audio_path, id_feature)
        
        return id_feature


    def get_feature_lib(self, dataset_dir, save_path):
        '''
        Args:
            @dataset_dir: path of train dataset path
            @save_path: path of feature library

            get feature lib from all data
            format:
                [
                    ID1's feature list,
                    ID2's feature list,
                    ID3's feature list,
                    ...,
                    ID20's feature list,
                ]
        '''

        id_list = os.listdir(dataset_dir)
        id_list = sorted(id_list, key=lambda id: int(id[2:]))

        for idx in tqdm(range(len(id_list)), ncols=70):
            id = id_list[idx]
            id_path = os.path.join(dataset_dir, id)
            id_feature = self.get_id_feature(id_path)
            self.feature.append(id_feature)
            os.system('rm -rf *.wav')

        torch.save(self.feature, save_path)

    
    def verify_file_and_features(self, feature_lib, file):
        '''
            feature_list: [20, x, 1, 192] tensor
            we can get every score
            now we use max+min
        '''

        waveform_audio = self.load_audio(file)
        batch_audio = waveform_audio.unsqueeze(0)
        emb_audio = self.encode_batch(batch_audio, None, normalize=True)

        score_list = []

        for idx in range(len(feature_lib)):
            
            all_score = 0
            person_score_list = []
            id_feature = feature_lib[idx]
            
            for feature_idx in range(id_feature.shape[0]):
                curr_feat = id_feature[feature_idx]
                x, y = curr_feat.shape
                curr_feat.resize_(1, x, y)
                curr_score = self.similarity(curr_feat, emb_audio)
                person_score_list.append(curr_score)
                all_score += curr_score
            
            person_score_list.sort()
            average_score = all_score/len(feature_lib)
            
            max_score = person_score_list[-1]
            second_score = person_score_list[-2]
            
            min_score = person_score_list[0]
            second_min_score = person_score_list[1]
            
            score_list.append(max_score + min_score)
        
        return score_list


# # debug

# if __name__ == '__main__':
    
#     verification = AISSpeakerRecognition.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb", savedir="pretrained_models/spkrec-ecapa-voxceleb")

#     file1 = '/home/kaiyuecheng/code/study/AIS_hw/ais-srs/dataset/bak_task2_train/audio/ID1/ID1_014.wav'
#     file2 = '/home/kaiyuecheng/code/study/AIS_hw/ais-srs/dataset/bak_task2_train/audio/ID1/ID1_015.wav'

#     score, prediction = verification.verify_files(file1, file2)

#     print(score)
#     print(prediction) # True = same speaker, False=Different speakers
    
#     os.system('rm -rf *.wav')
