
import os
import sys
import requests

sys.path.insert(0, "./speechbrainlib/speechbrain")

import torch
from .featurelib.getfeaturelib import get_feature_lib
from .model.speakerverification import AISSpeakerRecognition
from .config.config import Config
from .datatools.initdataset import DatasetTools


class Task2Handler:

    def __init__(self, task_id):
        self.config = Config()
        self.result = None
        self.verification = None
        self.feature_lib = None
        self.load_device = None
        self.task_id = task_id

    def get_device(self):

        if torch.cuda.is_available():
            self.load_device = 'cuda:0'
            if self.task_id == 2:
                print('  Using GPU: {}.'.format(torch.cuda.get_device_name()))
        else:
            self.load_device = 'cpu'
            if self.task_id == 2:
                print('  Using CPU.')
        
    def init_dataset(self):
        '''
            if audio dataset not exists, create it from video
        '''
        if not os.path.exists(os.path.join(self.config.TRAIN_DATASET_PATH, "ID1", "ID1_014.wav")):
            print('  Dataset not Exists!')
            dataset_tools = DatasetTools(self.config.RAW_DATASET_PATH, self.config.TRAIN_DATASET_PATH)
            dataset_tools.start_convert()
        else:
            # print('Dataset Already Exists!')
            pass


    def load_verification(self):
        '''
            tool of speaker verification
        '''
        if self.task_id == 2:
            print('  Using (spkrec-ecapa-voxceleb)')
        self.verification = AISSpeakerRecognition.from_hparams(source="speechbrain/spkrec-ecapa-voxceleb",\
                                            savedir="pretrained_models/spkrec-ecapa-voxceleb",\
                                            run_opts={"device": self.load_device})

    def test_init(self):
        '''
            make dataset
            make feature lib
            load tool
        '''
        if self.task_id == 2:
            print('[Stage 1/6] Test Init - Feature Lib')
        if not os.path.exists(self.config.FEATURE_PATH):
            url = 'https://cloud.tsinghua.edu.cn/f/27b2cfbe8bc14b168bd6/?dl=1'
            r = requests.get(url)
            with open (self.config.FEATURE_PATH,"wb") as f:
                f.write(r.content)
                f.close()
            print('  Download Feature Lib: (person_audio_feature.pth)')
        get_feature_lib(self.config.TRAIN_DATASET_PATH, self.config.FEATURE_PATH, self.task_id)
        self.get_device()
        self.feature_lib = torch.load(self.config.FEATURE_PATH, map_location=self.load_device)
        if self.task_id == 2:
            print('[Stage 2/6] Test Init - Dataset')
        # self.init_dataset()
        if self.task_id == 2:
            print('[Stage 3/6] Test Init - Get Device')
        self.get_device()
        if self.load_device == 'cpu':
            self.feature_lib = [x.cpu() for x in self.feature_lib]
        if self.task_id == 2:
            print('[Stage 4/6] Test Init - Load Model')
        self.load_verification()


    def test_single_audio(self, audio):
        '''
        Args:
            @audio:
                the audio you want to verfication
        Returns:
            @person_id:
                the speaker of the audio input
            @score_list:
                the score of every person to input audio
        '''
        score_list = self.verification.verify_file_and_features(self.feature_lib, audio)
        person_id = score_list.index(max(score_list)) + 1
        os.system('rm -rf *.wav')
        return person_id, score_list


# # debug

# if __name__ == '__main__':

#     config = Config()
#     init_dataset(config)
#     get_feature_lib(config.TRAIN_DATASET_PATH, config.FEATURE_PATH)
#     test_single_audio(config)