import numpy as np
from utils.common import dynamic_import,parseParams,return_ins,load_logging_config
from manage_dataset import get_data_dict_from_summary,gen_summary_from_dict
import os
from consta import path_sep
import logging
import wave
from utils.thread import threadsafe_generator
import random
import gc

def summary(path,summary_again,subset,num_limit,wav2trn_type_is_summary,wav_ext):
    data_dict={}
    old_cwd=os.getcwd()
    os.chdir(path)
    c=0
    tft='txt'
    if subset.trn_file_format==1:
        tft='txt'
    elif subset.trn_file_format==2:
        tft='json'
    if summary_again:
        if not wav2trn_type_is_summary:
            wav_ext=tuple(wav_ext.split(','))
            for root,dirs,files in os.walk(subset.subdir):
                for file in files:
                    if file.endswith(wav_ext):
                        with open(f'{subset.trn_dir}/{os.path.relpath(root,subset.subdir).replace(path_sep,"/")}/{os.path.splitext(file)[0]}{subset.trn_ext}',encoding='utf8') as trnf:
                            content=''.join(trnf.readlines()[subset.line_no].strip().split())
                            data_dict[f'{os.path.relpath(root,subset.subdir).replace(path_sep,"/")}/{os.path.splitext(file)[0]}']=content
                            logging.info(f'processing audio {root}/{file}')
            gen_summary_from_dict(data_dict,f'{subset.subdir}/{subset.trn_file}',tft)
        else:
            data_dict=get_data_dict_from_summary(f'{subset.subdir}/{subset.trn_file}',tft,num_limit)
    else:
        data_dict=get_data_dict_from_summary(f'{subset.subdir}/{subset.trn_file}',tft,num_limit)
    os.chdir(old_cwd)
    return data_dict if len(data_dict.keys())<=num_limit else {ddk:data_dict[ddk] for ddk in list(data_dict.keys())[:num_limit]}

def return_or_with_sample_rate(modulename,classname,parameters,attribute,sample_rate):
    obj=dynamic_import(f'{modulename}:{classname}')
    if attribute!='':
        t,d=parseParams(parameters)
        if 'sample_rate' in t:
            tl=list(t)
            for tle in tl:
                if tle=='sample_rate':
                    tl[tl.index(tle)]=sample_rate
                    break
            t=tuple(tl)
        elif 'sample_rate' in d:
            d['sample_rate']=sample_rate
        ins=obj(*t,**d)
        func_params=attribute.split('(',1)
        return getattr(ins,func_params[0]),func_params[1][:-1] # 纵使类里的成员方法没有参数，attribute也要写成func()，这样，getattr方法识别出函数对象，而不是类的什么属性
    else:
        return obj,parameters
    
def process_params_with_sample_rate(params,sample_rate):
    t,d=parseParams(params)
    if 'sample_rate' in t:
        tl=list(t)
        for tle in tl:
            if tle=='sample_rate':
                tl[tl.index(tle)]=sample_rate
                break
        t=tuple(tl)
    elif 'sample_rate' in d:
        d['sample_rate']=sample_rate
    return t,d

def return_f(modulename,classname,parameters,attribute):
    obj=dynamic_import(f'{modulename}:{classname}')
    if attribute!='':
        t,d=parseParams(parameters)
        ins=obj(*t,**d)
        func_params=attribute.split('(',1)
        return getattr(ins,func_params[0]),func_params[1][:-1]
    else:
        return obj,parameters

class DataLoader:
    def __init__(self,dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,lexicon_dict,fp,fe,train_datasubset,val_datasubset,test_datasubset):
        # load_logging_config()
        self.train_data_dict=dict()
        self.val_data_dict=dict()
        self.test_data_dict=dict()
        self.train_data_list=list()
        self.val_data_list=list()
        self.test_data_list=list()
        self.train_data_idxs=list()
        self.val_data_idxs=list()
        self.test_data_idxs=list()
        self.train_fed_wavs=[]
        self.train_labels=[]
        self.train_lexicon_labels=[]
        self.train_ys=[]
        self.val_fed_wavs=[]
        self.val_labels=[]
        self.val_lexicon_labels=[]
        self.val_ys=[]
        self.test_fed_wavs=[]
        self.test_labels=[]
        self.test_lexicon_labels=[]
        self.test_ys=[]
        self.train_num_limit=bttask.train_data_num
        self.val_num_limit=bttask.val_data_num
        self.test_num_limit=bttask.test_data_num
        self.dataset=dataset
        self.bttask=bttask
        self.train_data_preprocessor=train_data_preprocessor
        self.val_data_preprocessor=val_data_preprocessor
        self.test_data_preprocessor=test_data_preprocessor
        self.lexicon_dict=lexicon_dict
        self.fp=fp
        self.fe=fe
        self.train_datasubset=train_datasubset
        self.val_datasubset=val_datasubset
        self.test_datasubset=test_datasubset
        self._load_data()

    def _load_data(self):
        self.sample_rate=self.train_datasubset.sample_rate
        self.sample_width=self.train_datasubset.sample_width
        if self.sample_width==2:
            self.sample_dtype=np.short
        else:
            self.sample_dtype=np.short
        self.fp,fpp=return_or_with_sample_rate(self.fp.modulename,self.fp.classname,self.fp.parameters,self.fp.attribute,self.sample_rate)
        self.fpt,self.fpd=process_params_with_sample_rate(fpp,self.sample_rate)
        if self.fe is None:
            self.fe=self.fep=self.fet=self.fed=None
        else:
            self.fe,fep=return_or_with_sample_rate(self.fe.modulename,self.fe.classname,self.fe.parameters,self.fe.attribute,self.sample_rate)
            self.fet,self.fed=process_params_with_sample_rate(fep,self.sample_rate)
        g2p_dict_file=self.lexicon_dict.g2p_dict_file
        if g2p_dict_file=='':
            self.g2p_dict=None
        elif g2p_dict_file.endswith(('.txt','.json')):
            self.g2p_dict=get_data_dict_from_summary(g2p_dict_file,os.path.splitext(g2p_dict_file)[1][1:])
        else:
            self.g2p_dict=return_ins(self.lexicon_dict.g2p_dict_modulename,self.lexicon_dict.g2p_dict_classname,self.lexicon_dict.g2p_dict_parameters,self.lexicon_dict.g2p_dict_attribute)
        penc_dict_file=self.lexicon_dict.penc_dict_file
        if penc_dict_file=='':
            self.penc_dict=None
        elif penc_dict_file.endswith(('.txt','.json')):
            self.penc_dict=get_data_dict_from_summary(penc_dict_file,os.path.splitext(penc_dict_file)[1][1:])
        else:
            self.penc_dict=return_ins(self.lexicon_dict.penc_dict_modulename,self.lexicon_dict.penc_dict_classname,self.lexicon_dict.penc_dict_parameters,self.lexicon_dict.penc_dict_attribute)
        self.g2p_f,g2p_params=return_f(self.lexicon_dict.g2p_modulename,self.lexicon_dict.g2p_classname,self.lexicon_dict.g2p_parameters,self.lexicon_dict.g2p_attribute)
        self.g2p_t,self.g2p_d=parseParams(g2p_params)
        self.penc_f,penc_params=return_f(self.lexicon_dict.penc_modulename,self.lexicon_dict.penc_classname,self.lexicon_dict.penc_parameters,self.lexicon_dict.penc_attribute)
        self.penc_t,self.penc_d=parseParams(penc_params)
        if self.train_data_preprocessor is None:
            self.train_dp_f=self.train_dp_t=self.train_dp_d=None
        else:
            self.train_dp_f,train_dp_params=return_f(self.train_data_preprocessor.modulename,self.train_data_preprocessor.classname,self.train_data_preprocessor.parameters,self.train_data_preprocessor.attribute)
            self.train_dp_t,self.train_dp_d=parseParams(train_dp_params)
        if self.val_data_preprocessor is None:
            self.val_dp_f=self.val_dp_t=self.val_dp_d=None
        else:
            self.val_dp_f,val_dp_params=return_f(self.val_data_preprocessor.modulename,self.val_data_preprocessor.classname,self.val_data_preprocessor.parameters,self.val_data_preprocessor.attribute)
            self.val_dp_t,self.val_dp_d=parseParams(val_dp_params)
        if self.test_data_preprocessor is None:
            self.test_dp_f=self.test_dp_t=self.test_dp_d=None
        else:
            self.test_dp_f,test_dp_params=return_f(self.test_data_preprocessor.modulename,self.test_data_preprocessor.classname,self.test_data_preprocessor.parameters,self.test_data_preprocessor.attribute)
            self.test_dp_t,self.test_dp_d=parseParams(test_dp_params)
        if self.bttask.typ==0:
            self.train_data_dict=summary(self.dataset.path,self.bttask.summary_again,self.train_datasubset,self.train_num_limit,self.dataset.wav2trn_type_is_summary,self.dataset.wav_ext)
            self.train_data_list=list(self.train_data_dict.keys())
            self.train_data_idxs=list(range(len(self.train_data_list)))
            self.val_data_dict=summary(self.dataset.path,self.bttask.summary_again,self.val_datasubset,self.val_num_limit,self.dataset.wav2trn_type_is_summary,self.dataset.wav_ext)
            self.val_data_list=list(self.val_data_dict.keys())
            self.val_data_idxs=list(range(len(self.val_data_list)))
            if self.bttask.maintain_data_all:
                self.train_fed_wavs,self.train_labels,self.train_lexicon_labels,self.train_ys,_=self.preprocess(self.train_data_list,self.train_data_dict,self.train_datasubset)
                self.val_fed_wavs,self.val_labels,self.val_lexicon_labels,self.val_ys,_=self.preprocess(self.val_data_list,self.val_data_dict,self.val_datasubset)
        elif self.bttask.typ==1:
            self.test_data_dict=summary(self.dataset.path,self.bttask.summary_again,self.test_datasubset,self.test_num_limit,self.dataset.wav2trn_type_is_summary,self.dataset.wav_ext)
            self.test_data_list=list(self.test_data_dict.keys())
            self.test_data_idxs=list(range(len(self.test_data_list)))
            if self.bttask.maintain_data_all:
                self.test_fed_wavs,self.test_labels,self.test_lexicon_labels,self.test_ys,self.test_durations=self.preprocess(self.test_data_list,self.test_data_dict,self.test_datasubset)
    
    def preprocess(self,data_list,data_dict,subset):
        # gc.collect()
        fed_wavs=[]
        labels=[]
        lexicon_labels=[]
        ys=[]
        durations=[]
        for k in data_list:
            wav=wave.open(f'{self.dataset.path}/{subset.subdir}/{k}.wav','rb')
            str_data=wav.readframes(wav.getnframes())
            durations.append(wav.getnframes()/self.sample_rate)
            wav.close()
            wave_data=np.frombuffer(str_data,dtype=self.sample_dtype)
            if self.fe is None:
                fed_wav=self.fp(wave_data,*self.fpt,**self.fpd)
            else:
                fed_wav=self.fe(self.fp(wave_data,*self.fpt,**self.fpd),*self.fet,**self.fed)
            fed_wavs.append(fed_wav)
            labels.append(data_dict[k])
            lexicon_labels_=self.g2p_f(data_dict[k],self.g2p_dict,*self.g2p_t,**self.g2p_d)
            lexicon_labels.append(lexicon_labels_)
            ys.append(np.array(self.penc_f(lexicon_labels_,self.penc_dict,*self.penc_t,**self.penc_d)))
        # print('flask-dataloader-208,labels',labels)
        return fed_wavs,labels,lexicon_labels,ys,durations
    
    @threadsafe_generator
    def train_data_generator(self):
        batch_size=self.bttask.batch_size
        idxs=0
        idxe=batch_size
        num=len(self.train_data_list)
        if self.bttask.maintain_data_all:
            while True:
                # yield self.train_dp_f([self.train_fed_wavs[i] for i in self.train_data_idxs[idxs:idxe]],[self.train_ys[i] for i in self.train_data_idxs[idxs:idxe]],*self.train_dp_t,**self.train_dp_d)
                data=self.train_dp_f([self.train_fed_wavs[i] for i in self.train_data_idxs[idxs:idxe]],[self.train_ys[i] for i in self.train_data_idxs[idxs:idxe]],*self.train_dp_t,**self.train_dp_d)
                yield data
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
        else:
            # from pympler import tracker,muppy,summary
            # tr=tracker.SummaryTracker()
            # print('memory total')
            # all_objects=muppy.get_objects()
            # sum1=summary.summarize(all_objects)
            # summary.print_(sum1)
            while True:
                # print('memory difference')
                # tr.print_diff()
                train_fed_wavs,_,_,train_ys,_=self.preprocess([self.train_data_list[i] for i in self.train_data_idxs[idxs:idxe]],self.train_data_dict,self.train_datasubset)
                # yield self.train_dp_f(train_fed_wavs,train_ys,*self.train_dp_t,**self.train_dp_d)
                data=self.train_dp_f(train_fed_wavs,train_ys,*self.train_dp_t,**self.train_dp_d)
                yield data
                # gc.collect()
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
    
    @threadsafe_generator
    def val_data_generator(self):
        batch_size=self.bttask.batch_size
        idxs=0
        idxe=batch_size
        num=len(self.val_data_list)
        if self.bttask.maintain_data_all:
            while True:
                # yield self.val_dp_f([self.val_fed_wavs[i] for i in self.val_data_idxs[idxs:idxe]],[self.val_ys[i] for i in self.val_data_idxs[idxs:idxe]],*self.val_dp_t,**self.val_dp_d)
                data=self.val_dp_f([self.val_fed_wavs[i] for i in self.val_data_idxs[idxs:idxe]],[self.val_ys[i] for i in self.val_data_idxs[idxs:idxe]],*self.val_dp_t,**self.val_dp_d)
                yield data
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
        else:
            while True:
                val_fed_wavs,_,_,val_ys,_=self.preprocess([self.val_data_list[i] for i in self.val_data_idxs[idxs:idxe]],self.val_data_dict,self.val_datasubset)
                # yield self.val_dp_f(val_fed_wavs,val_ys,*self.val_dp_t,**self.val_dp_d)
                data=self.val_dp_f(val_fed_wavs,val_ys,*self.val_dp_t,**self.val_dp_d)
                yield data
                # gc.collect()
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size

    @threadsafe_generator
    def test_data_generator(self):
        batch_size=self.bttask.batch_size
        idxs=0
        idxe=batch_size
        num=len(self.test_data_list)
        if self.bttask.maintain_data_all:
            while True:
                # yield self.test_dp_f([self.test_fed_wavs[i] for i in self.test_data_idxs[idxs:idxe]],[self.test_ys[i] for i in self.test_data_idxs[idxs:idxe]],*self.test_dp_t,**self.test_dp_d),[self.test_labels[i] for i in self.test_data_idxs[idxs:idxe]],[self.test_durations[i] for i in self.test_data_idxs[idxs:idxe]]
                data=self.test_dp_f([self.test_fed_wavs[i] for i in self.test_data_idxs[idxs:idxe]],[self.test_ys[i] for i in self.test_data_idxs[idxs:idxe]],*self.test_dp_t,**self.test_dp_d),[self.test_labels[i] for i in self.test_data_idxs[idxs:idxe]],[self.test_durations[i] for i in self.test_data_idxs[idxs:idxe]]
                yield data
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
        else:
            while True:
                test_fed_wavs,test_labels,_,test_ys,test_durations=self.preprocess([self.test_data_list[i] for i in self.test_data_idxs[idxs:idxe]],self.test_data_dict,self.test_datasubset)
                # yield self.test_dp_f(test_fed_wavs,test_ys,*self.test_dp_t,**self.test_dp_d),test_labels,test_durations
                data=self.test_dp_f(test_fed_wavs,test_ys,*self.test_dp_t,**self.test_dp_d),test_labels,test_durations
                yield data
                # gc.collect()
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
    
    def shuffle(self):
        random.shuffle(self.train_data_idxs)
        random.shuffle(self.val_data_idxs)
        random.shuffle(self.test_data_idxs)

class DataLoader4E2E:
    def __init__(self,dataset,bttask,train_data_preprocessor,val_data_preprocessor,test_data_preprocessor,fp,fe,train_datasubset,val_datasubset,test_datasubset):
        # load_logging_config()
        self.train_data_dict=dict()
        self.val_data_dict=dict()
        self.test_data_dict=dict()
        self.train_data_list=list()
        self.val_data_list=list()
        self.test_data_list=list()
        self.train_data_idxs=list()
        self.val_data_idxs=list()
        self.test_data_idxs=list()
        self.train_fed_wavs=[]
        self.train_labels=[]
        self.train_ys=[]
        self.val_fed_wavs=[]
        self.val_labels=[]
        self.val_ys=[]
        self.test_fed_wavs=[]
        self.test_labels=[]
        self.test_ys=[]
        self.train_num_limit=bttask.train_data_num
        self.val_num_limit=bttask.val_data_num
        self.test_num_limit=bttask.test_data_num
        self.dataset=dataset
        self.bttask=bttask
        self.train_data_preprocessor=train_data_preprocessor
        self.val_data_preprocessor=val_data_preprocessor
        self.test_data_preprocessor=test_data_preprocessor
        self.fp=fp
        self.fe=fe
        self.train_datasubset=train_datasubset
        self.val_datasubset=val_datasubset
        self.test_datasubset=test_datasubset
        self._load_data()

    def _load_data(self):
        self.sample_rate=self.train_datasubset.sample_rate
        self.sample_width=self.train_datasubset.sample_width
        if self.sample_width==2:
            self.sample_dtype=np.short
        else:
            self.sample_dtype=np.short
        self.fp,fpp=return_or_with_sample_rate(self.fp.modulename,self.fp.classname,self.fp.parameters,self.fp.attribute,self.sample_rate)
        self.fpt,self.fpd=process_params_with_sample_rate(fpp,self.sample_rate)
        if self.fe is None:
            self.fe=self.fep=self.fet=self.fed=None
        else:
            self.fe,fep=return_or_with_sample_rate(self.fe.modulename,self.fe.classname,self.fe.parameters,self.fe.attribute,self.sample_rate)
            self.fet,self.fed=process_params_with_sample_rate(fep,self.sample_rate)
        if self.train_data_preprocessor is None:
            self.train_dp_f=self.train_dp_t=self.train_dp_d=None
        else:
            self.train_dp_f,train_dp_params=return_f(self.train_data_preprocessor.modulename,self.train_data_preprocessor.classname,self.train_data_preprocessor.parameters,self.train_data_preprocessor.attribute)
            self.train_dp_t,self.train_dp_d=parseParams(train_dp_params)
        if self.val_data_preprocessor is None:
            self.val_dp_f=self.val_dp_t=self.val_dp_d=None
        else:
            self.val_dp_f,val_dp_params=return_f(self.val_data_preprocessor.modulename,self.val_data_preprocessor.classname,self.val_data_preprocessor.parameters,self.val_data_preprocessor.attribute)
            self.val_dp_t,self.val_dp_d=parseParams(val_dp_params)
        if self.test_data_preprocessor is None:
            self.test_dp_f=self.test_dp_t=self.test_dp_d=None
        else:
            self.test_dp_f,test_dp_params=return_f(self.test_data_preprocessor.modulename,self.test_data_preprocessor.classname,self.test_data_preprocessor.parameters,self.test_data_preprocessor.attribute)
            self.test_dp_t,self.test_dp_d=parseParams(test_dp_params)
        if self.bttask.typ==0:
            self.train_data_dict=summary(self.dataset.path,self.bttask.summary_again,self.train_datasubset,self.train_num_limit,self.dataset.wav2trn_type_is_summary,self.dataset.wav_ext)
            self.train_data_list=list(self.train_data_dict.keys())
            self.train_data_idxs=list(range(len(self.train_data_list)))
            self.val_data_dict=summary(self.dataset.path,self.bttask.summary_again,self.val_datasubset,self.val_num_limit,self.dataset.wav2trn_type_is_summary,self.dataset.wav_ext)
            self.val_data_list=list(self.val_data_dict.keys())
            self.val_data_idxs=list(range(len(self.val_data_list)))
            if self.bttask.maintain_data_all:
                self.train_fed_wavs,self.train_labels,_=self.preprocess(self.train_data_list,self.train_data_dict,self.train_datasubset)
                self.val_fed_wavs,self.val_labels,_=self.preprocess(self.val_data_list,self.val_data_dict,self.val_datasubset)
        elif self.bttask.typ==1:
            self.test_data_dict=summary(self.dataset.path,self.bttask.summary_again,self.test_datasubset,self.test_num_limit,self.dataset.wav2trn_type_is_summary,self.dataset.wav_ext)
            self.test_data_list=list(self.test_data_dict.keys())
            self.test_data_idxs=list(range(len(self.test_data_list)))
            if self.bttask.maintain_data_all:
                self.test_fed_wavs,self.test_labels,self.test_durations=self.preprocess(self.test_data_list,self.test_data_dict,self.test_datasubset)
    
    def preprocess(self,data_list,data_dict,subset):
        fed_wavs=[]
        labels=[]
        durations=[]
        for k in data_list:
            wav=wave.open(f'{self.dataset.path}/{subset.subdir}/{k}.wav','rb')
            str_data=wav.readframes(wav.getnframes())
            durations.append(wav.getnframes()/self.sample_rate)
            wav.close()
            wave_data=np.frombuffer(str_data,dtype=self.sample_dtype)
            if self.fe is None:
                fed_wav=self.fp(wave_data,*self.fpt,**self.fpd)
            else:
                fed_wav=self.fe(self.fp(wave_data,*self.fpt,**self.fpd),*self.fet,**self.fed)
            fed_wavs.append(fed_wav)
            labels.append(data_dict[k])
        return fed_wavs,labels,durations
    
    @threadsafe_generator
    def train_data_generator(self):
        batch_size=self.bttask.batch_size
        idxs=0
        idxe=batch_size
        num=len(self.train_data_list)
        if self.bttask.maintain_data_all:
            while True:
                # yield self.train_dp_f([self.train_fed_wavs[i] for i in self.train_data_idxs[idxs:idxe]],[self.train_labels[i] for i in self.train_data_idxs[idxs:idxe]],*self.train_dp_t,**self.train_dp_d)
                data=self.train_dp_f([self.train_fed_wavs[i] for i in self.train_data_idxs[idxs:idxe]],[self.train_labels[i] for i in self.train_data_idxs[idxs:idxe]],*self.train_dp_t,**self.train_dp_d)
                yield data
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
        else:
            while True:
                train_fed_wavs,train_labels,_=self.preprocess([self.train_data_list[i] for i in self.train_data_idxs[idxs:idxe]],self.train_data_dict,self.train_datasubset)
                # yield self.train_dp_f(train_fed_wavs,train_labels,*self.train_dp_t,**self.train_dp_d)
                data=self.train_dp_f(train_fed_wavs,train_labels,*self.train_dp_t,**self.train_dp_d)
                yield data
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
    
    @threadsafe_generator
    def val_data_generator(self):
        batch_size=self.bttask.batch_size
        idxs=0
        idxe=batch_size
        num=len(self.val_data_list)
        if self.bttask.maintain_data_all:
            while True:
                # yield self.val_dp_f([self.val_fed_wavs[i] for i in self.val_data_idxs[idxs:idxe]],[self.val_labels[i] for i in self.val_data_idxs[idxs:idxe]],*self.val_dp_t,**self.val_dp_d)
                data=self.val_dp_f([self.val_fed_wavs[i] for i in self.val_data_idxs[idxs:idxe]],[self.val_labels[i] for i in self.val_data_idxs[idxs:idxe]],*self.val_dp_t,**self.val_dp_d)
                yield data
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
        else:
            while True:
                val_fed_wavs,val_labels,_=self.preprocess([self.val_data_list[i] for i in self.val_data_idxs[idxs:idxe]],self.val_data_dict,self.val_datasubset)
                # yield self.val_dp_f(val_fed_wavs,val_labels,*self.val_dp_t,**self.val_dp_d)
                data=self.val_dp_f(val_fed_wavs,val_labels,*self.val_dp_t,**self.val_dp_d)
                yield data
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size

    @threadsafe_generator
    def test_data_generator(self):
        batch_size=self.bttask.batch_size
        idxs=0
        idxe=batch_size
        num=len(self.test_data_list)
        if self.bttask.maintain_data_all:
            while True:
                # yield self.test_dp_f([self.test_fed_wavs[i] for i in self.test_data_idxs[idxs:idxe]],[self.test_labels[i] for i in self.test_data_idxs[idxs:idxe]],*self.test_dp_t,**self.test_dp_d),[self.test_labels[i] for i in self.test_data_idxs[idxs:idxe]],[self.test_durations[i] for i in self.test_data_idxs[idxs:idxe]]
                data=self.test_dp_f([self.test_fed_wavs[i] for i in self.test_data_idxs[idxs:idxe]],[self.test_labels[i] for i in self.test_data_idxs[idxs:idxe]],*self.test_dp_t,**self.test_dp_d),[self.test_labels[i] for i in self.test_data_idxs[idxs:idxe]],[self.test_durations[i] for i in self.test_data_idxs[idxs:idxe]]
                yield data
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
        else:
            while True:
                test_fed_wavs,test_labels,test_durations=self.preprocess([self.test_data_list[i] for i in self.test_data_idxs[idxs:idxe]],self.test_data_dict,self.test_datasubset)
                # yield self.test_dp_f(test_fed_wavs,test_labels,*self.test_dp_t,**self.test_dp_d),test_labels,test_durations
                data=self.test_dp_f(test_fed_wavs,test_labels,*self.test_dp_t,**self.test_dp_d),test_labels,test_durations
                yield data
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
    
    def shuffle(self):
        random.shuffle(self.train_data_idxs)
        random.shuffle(self.val_data_idxs)
        random.shuffle(self.test_data_idxs)