import json
import os
import logging
import logging.config as lc
import re
from pypinyin import pinyin,Style
import wave
import numpy as np
import random
import threading
from manage_dataset import gen_summary_from_dict,get_data_dict_from_summary

from utils.common import load_logging_config,dynamic_import,parseParams,return_ins
from utils.thread import threadsafe_generator
from consta import path_sep

def build_wavs(component,filename,audio_file_re,trn_file_replace,label_type):
    cur_dir=os.getcwd()
    os.chdir(os.path.dirname(filename))
    trn_fp=open(filename.replace('wavs.txt','trns.txt')+'.finished','w',encoding='utf8')
    if label_type=='syllables':
        syllables_fp=open(filename.replace('wavs.txt','syls.txt')+'.unfinished','w',encoding='utf8')
    data_list=list()
    wav_dict=dict()
    label_dict=dict()
    with open(filename+'.unfinished','w', encoding='utf8') as f:
        for compo in component.split('+'):
            for file in os.listdir(compo):
                # import pdb;pdb.set_trace();
                if re.match(audio_file_re,file):
                    logging.info('processing %s'%file)
                    wav_id=re.match(audio_file_re,file).group(1)
                    if os.path.exists(compo+'/'+wav_id+trn_file_replace):
                        data_list.append(wav_id)
                        f.write('%s %s\n'%(wav_id,compo+'/'+file))
                        wav_dict[wav_id]=compo+'/'+file
                        # import pdb;pdb.set_trace();
                        with open(compo+'/'+wav_id+trn_file_replace,encoding='utf8') as trn_file:
                            trn_content=''.join(trn_file.readlines()[0].split())
                            trn_fp.write('%s %s\n'%(wav_id,trn_content))

                            if label_type=='syllables':
                                # import pdb;pdb.set_trace();
                                syllables=' '.join([py[0] for py in pinyin(trn_content,style=Style.TONE3,neutral_tone_with_five=True)])
                                syllables_fp.write('%s %s\n'%(wav_id,syllables))
                                label_dict[wav_id]=syllables
                            else:
                                label_dict[wav_id]=trn_content
                    else:
                        logging.error('%s doesn\'t exist but its responding wav file exists'%compo+'/'+wav_id+trn_file_replace)
    trn_fp.close()
    if label_type=='syllables':
        syllables_fp.close()
    os.rename(filename+'.unfinished',filename)
    os.rename(filename.replace('wavs.txt','trns.txt')+'.finished',filename.replace('wavs.txt','trns.txt'))
    if label_type=='syllables':
        os.rename(filename.replace('wavs.txt','syls.txt')+'.unfinished',filename.replace('wavs.txt','syls.txt'))
    os.chdir(cur_dir)
    return data_list,wav_dict,label_dict

def summary(path,summary_again,subset,num_limit):
    data_dict={}
    old_cwd=os.getcwd()
    os.chdir(path)
    c=0
    if not os.path.exists(subset['subdir']):
            logging.error(f'目录{subset["subdir"]}不存在，无法获取对应数据')
    else:
        if summary_again:
            if subset['wav2trn_type']=='filename':
                for root,dirs,files in os.walk(subset['subdir']):
                    for file in files:
                        if file.endswith(subset['wav_ext']):
                            with open(f'{subset["trn_dir"]}/{os.path.relpath(root,subset["subdir"]).replace(path_sep,"/")}/{os.path.splitext(file)[0]}{subset["trn_ext"]}',encoding='utf8') as trnf:
                                content=''.join(trnf.readlines()[subset["line_no"]].strip().split())
                                data_dict[f'{os.path.relpath(root,subset["subdir"]).replace(path_sep,"/")}/{os.path.splitext(file)[0]}']=content
                                logging.info(f'processing audio {root}/{file}')
                gen_summary_from_dict(data_dict,f'{subset["subdir"]}/{subset["trn_file"]}',subset['trn_file_format'])
            else:
                # 直接提供汇总文件也是有格式要求的，跟前面自动生成的汇总文件格式一样
                data_dict=get_data_dict_from_summary(f'{subset["subdir"]}/{subset["trn_file"]}',subset['trn_file_format'],num_limit)
        else:
            data_dict=get_data_dict_from_summary(f'{subset["subdir"]}/{subset["trn_file"]}',subset['trn_file_format'],num_limit)

    os.chdir(old_cwd)
    return data_dict if len(data_dict.keys())<=num_limit else {ddk:data_dict[ddk] for ddk in list(data_dict.keys())[:num_limit]}

def return_or_with_sample_rate(modulename,classname,parameters,attribute,sample_rate):
    obj=dynamic_import(f'{modulename}:{classname}')
    if attribute!='':
        t,d=parseParams(parameters)
        if 'sample_rate' in t:
            tl=list(t)
            for tle in tl:
                if tle=='sample_rate':
                    tl[tl.index(tle)]=sample_rate
                    break
            t=tuple(tl)
        elif 'sample_rate' in d:
            d['sample_rate']=sample_rate
        ins=obj(*t,**d)
        func_params=attribute.split('(',1)
        return getattr(ins,func_params[0]),func_params[1][:-1]
    else:
        return obj,parameters
    
def process_params_with_sample_rate(params,sample_rate):
    t,d=parseParams(params)
    if 'sample_rate' in t:
        tl=list(t)
        for tle in tl:
            if tle=='sample_rate':
                tl[tl.index(tle)]=sample_rate
                break
        t=tuple(tl)
    elif 'sample_rate' in d:
        d['sample_rate']=sample_rate
    return t,d

def return_f(modulename,classname,parameters,attribute):
    obj=dynamic_import(f'{modulename}:{classname}')
    if attribute!='':
        t,d=parseParams(parameters)
        ins=obj(*t,**d)
        func_params=attribute.split('(',1)
        return getattr(ins,func_params[0]),func_params[1][:-1]
    else:
        return obj,parameters

class DataLoader:
    '''
    数据加载器
    '''
    def __init__(self,dataset_json,bttask_json,train_data_preprocessor_json,val_data_preprocessor_json,test_data_preprocessor_json,lexicon_dict_json,frontend_processor_json,feature_extractor_json,train_num_limit=0,val_num_limit=0,test_num_limit=0):
        '''
        数据加载器初始化

        Args:
            json_file: 数据集json配置文件
            component: 数据集的组件
            候选参数： label_type,表示转录结果的表示方式，是拼音还是文字，有的模型或许需要两种标签？
        '''
        self.train_data_dict=dict()
        self.val_data_dict=dict()
        self.test_data_dict=dict()
        self.train_data_list=list()
        self.val_data_list=list()
        self.test_data_list=list()
        self.train_data_idxs=list()
        self.val_data_idxs=list()
        self.test_data_idxs=list()
        self.train_fed_wavs=[]
        self.train_labels=[]
        self.train_lexicon_labels=[]
        self.train_ys=[]
        self.val_fed_wavs=[]
        self.val_labels=[]
        self.val_lexicon_labels=[]
        self.val_ys=[]
        self.test_fed_wavs=[]
        self.test_labels=[]
        self.test_lexicon_labels=[]
        self.test_ys=[]
        self.train_num_limit=train_num_limit
        self.val_num_limit=val_num_limit
        self.test_num_limit=test_num_limit
        self.dataset_json=dataset_json # 数据集的配置信息
        self.bttask_json=bttask_json # 训练过程的配置参数
        self.train_data_preprocessor_json=train_data_preprocessor_json # 前端处理、特征提取，以及对音频和音素的编码处理、模型需要的额外一些数据信息的处理，数据生成器
        self.val_data_preprocessor_json=val_data_preprocessor_json
        self.test_data_preprocessor_json=test_data_preprocessor_json
        self.lexicon_dict_json=lexicon_dict_json # 发音字典，映射文本到音素，能做这种转换
        self.frontend_processor_json=frontend_processor_json
        self.feature_extractor_json=feature_extractor_json
        self._load_data()

    def preprocess(self,data_list,data_dict,subset_name):
        fed_wavs=[]
        labels=[]
        lexicon_labels=[]
        ys=[]
        durations=[]
        for k in data_list:
            wav=wave.open(f'{self.dataset_json["path"]}/{self.dataset_json[subset_name]["subdir"]}/{k}.wav','rb')
            str_data=wav.readframes(wav.getnframes())
            durations.append(wav.getnframes()/self.sample_rate)
            wav.close()
            wave_data=np.frombuffer(str_data,dtype=self.sample_dtype)
            fed_wav=self.fe(self.fp(wave_data,*self.fpt,**self.fpd),*self.fet,**self.fed) # wave_data.shape: (sample_num,)
            fed_wavs.append(fed_wav)
            labels.append(data_dict[k])
            lexicon_labels_=self.g2p_f(data_dict[k],self.g2p_dict,*self.g2p_t,**self.g2p_d)
            lexicon_labels.append(lexicon_labels_)
            ys.append(np.array(self.penc_f(lexicon_labels_,self.penc_dict,*self.penc_t,**self.penc_d)))
        return fed_wavs,labels,lexicon_labels,ys,durations

    def _load_data(self):
        # self.pinyin_list,self.pinyin_dict=load_pinyin_dict(load_prime_config()['dict_filename'])
        # 默认输入即输出的处理函数
        self.sample_rate=int(list(self.dataset_json['train']['sample_rate'].keys())[0])
        self.sample_width=int(list(self.dataset_json['train']['sample_width'].keys())[0])
        if self.sample_width==2:
            self.sample_dtype=np.short
        else:
            self.sample_dtype=np.short
        self.fp,fpp=return_or_with_sample_rate(self.frontend_processor_json['modulename'],self.frontend_processor_json['classname'],self.frontend_processor_json['parameters'],self.frontend_processor_json['attribute'],self.sample_rate)
        self.fpt,self.fpd=process_params_with_sample_rate(fpp,self.sample_rate)
        self.fe,fep=return_or_with_sample_rate(self.feature_extractor_json['modulename'],self.feature_extractor_json['classname'],self.feature_extractor_json['parameters'],self.feature_extractor_json['attribute'],self.sample_rate)
        self.fet,self.fed=process_params_with_sample_rate(fep,self.sample_rate)
        g2p_dict_file=self.lexicon_dict_json['g2p']['dict_file']
        if g2p_dict_file=='':
            self.g2p_dict=None
        elif g2p_dict_file.endswith(('.txt','.json')):
            self.g2p_dict=get_data_dict_from_summary(g2p_dict_file,os.path.splitext(g2p_dict_file)[1][1:])
        else:
            self.g2p_dict=return_ins(self.lexicon_dict_json['g2p']['dict_modulename'],self.lexicon_dict_json['g2p']['dict_classname'],self.lexicon_dict_json['g2p']['dict_parameters'],self.lexicon_dict_json['g2p']['dict_attribute'])
        penc_dict_file=self.lexicon_dict_json['penc']['dict_file']
        if penc_dict_file=='':
            self.penc_dict=None
        elif penc_dict_file.endswith(('.txt','.json')):
            self.penc_dict=get_data_dict_from_summary(penc_dict_file,os.path.splitext(penc_dict_file)[1][1:])
        else:
            self.penc_dict=return_ins(self.lexicon_dict_json['penc']['dict_modulename'],self.lexicon_dict_json['penc']['dict_classname'],self.lexicon_dict_json['penc']['dict_parameters'],self.lexicon_dict_json['penc']['dict_attribute'])
        self.g2p_f,g2p_params=return_f(self.lexicon_dict_json['g2p']['modulename'],self.lexicon_dict_json['g2p']['classname'],self.lexicon_dict_json['g2p']['parameters'],self.lexicon_dict_json['g2p']['attribute'])
        self.g2p_t,self.g2p_d=parseParams(g2p_params)
        self.penc_f,penc_params=return_f(self.lexicon_dict_json['penc']['modulename'],self.lexicon_dict_json['penc']['classname'],self.lexicon_dict_json['penc']['parameters'],self.lexicon_dict_json['penc']['attribute'])
        self.penc_t,self.penc_d=parseParams(penc_params)
        self.train_dp_f,train_dp_params=return_f(self.train_data_preprocessor_json['modulename'],self.train_data_preprocessor_json['classname'],self.train_data_preprocessor_json['parameters'],self.train_data_preprocessor_json['attribute'])
        self.train_dp_t,self.train_dp_d=parseParams(train_dp_params)
        self.val_dp_f,val_dp_params=return_f(self.val_data_preprocessor_json['modulename'],self.val_data_preprocessor_json['classname'],self.val_data_preprocessor_json['parameters'],self.val_data_preprocessor_json['attribute'])
        self.val_dp_t,self.val_dp_d=parseParams(val_dp_params)
        self.test_dp_f,test_dp_params=return_f(self.test_data_preprocessor_json['modulename'],self.test_data_preprocessor_json['classname'],self.test_data_preprocessor_json['parameters'],self.test_data_preprocessor_json['attribute'])
        self.test_dp_t,self.test_dp_d=parseParams(test_dp_params)
        if self.bttask_json['type']=='train':
            self.train_data_dict=summary(self.dataset_json['path'],self.bttask_json['summary_again'],self.dataset_json['train'],self.train_num_limit)
            self.train_data_list=list(self.train_data_dict.keys())
            self.train_data_idxs=list(range(len(self.train_data_list)))
            self.val_data_dict=summary(self.dataset_json['path'],self.bttask_json['summary_again'],self.dataset_json['val'],self.val_num_limit)
            self.val_data_list=list(self.val_data_dict.keys())
            self.val_data_idxs=list(range(len(self.val_data_list)))
            if self.bttask_json['maintain_data_style']=='total':
                self.train_fed_wavs,self.train_labels,self.train_lexicon_labels,self.train_ys,_=self.preprocess(self.train_data_list,self.train_data_dict,'train')
                self.val_fed_wavs,self.val_labels,self.val_lexicon_labels,self.val_ys,_=self.preprocess(self.val_data_list,self.val_data_dict,'val')

        elif self.bttask_json['type']=='infer':
            self.test_data_dict=summary(self.dataset_json['path'],self.bttask_json['summary_again'],self.dataset_json['test'],self.test_num_limit)
            self.test_data_list=list(self.test_data_dict.keys())
            self.test_data_idxs=list(range(len(self.test_data_list)))
            if self.bttask_json['maintain_data_style']=='total':
                self.test_fed_wavs,self.test_labels,self.test_lexicon_labels,self.test_ys,self.test_durations=self.preprocess(self.test_data_list,self.test_data_dict,'test')

    @threadsafe_generator
    def train_data_generator(self):
        batch_size=self.bttask_json['batch_size']
        idxs=0
        idxe=batch_size
        num=len(self.train_data_list)
        if self.bttask_json['maintain_data_style']=='total':
            while True:
                yield self.train_dp_f([self.train_fed_wavs[i] for i in self.train_data_idxs[idxs:idxe]],[self.train_ys[i] for i in self.train_data_idxs[idxs:idxe]],*self.train_dp_t,**self.train_dp_d)
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
        elif self.bttask_json['maintain_data_style']=='last':
            while True:
                train_fed_wavs,_,_,train_ys,_=self.preprocess([self.train_data_list[i] for i in self.train_data_idxs[idxs:idxe]],self.train_data_dict,'train') # 有没有可能只需要获取fed_wavs和ys，另两个数据没必要获取
                yield self.train_dp_f(train_fed_wavs,train_ys,*self.train_dp_t,**self.train_dp_d)
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size

    @threadsafe_generator
    def val_data_generator(self):
        batch_size=self.bttask_json['batch_size']
        idxs=0
        idxe=batch_size
        num=len(self.val_data_list)
        if self.bttask_json['maintain_data_style']=='total':
            while True:
                yield self.val_dp_f([self.val_fed_wavs[i] for i in self.val_data_idxs[idxs:idxe]],[self.val_ys[i] for i in self.val_data_idxs[idxs:idxe]],*self.val_dp_t,**self.val_dp_d)
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
        elif self.bttask_json['maintain_data_style']=='last':
            while True:
                val_fed_wavs,_,_,val_ys,_=self.preprocess([self.val_data_list[i] for i in self.val_data_idxs[idxs:idxe]],self.val_data_dict,'val')
                yield self.val_dp_f(val_fed_wavs,val_ys,*self.val_dp_t,**self.val_dp_d)
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size

    @threadsafe_generator
    def test_data_generator(self):
        batch_size=self.bttask_json['batch_size']
        idxs=0
        idxe=batch_size
        num=len(self.test_data_list)
        if self.bttask_json['maintain_data_style']=='total':
            while True:
                yield self.test_dp_f([self.test_fed_wavs[i] for i in self.test_data_idxs[idxs:idxe]],[self.test_ys[i] for i in self.test_data_idxs[idxs:idxe]],*self.test_dp_t,**self.test_dp_d),[self.test_labels[i] for i in self.test_data_idxs[idxs:idxe]],[self.test_durations[i] for i in self.test_data_idxs[idxs:idxe]]
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size
        elif self.bttask_json['maintain_data_style']=='last':
            while True:
                test_fed_wavs,test_labels,_,test_ys,test_durations=self.preprocess([self.test_data_list[i] for i in self.test_data_idxs[idxs:idxe]],self.test_data_dict,'test')
                yield self.test_dp_f(test_fed_wavs,test_ys,*self.test_dp_t,**self.test_dp_d),test_labels,test_durations
                if idxe>=num:
                    idxs=0
                    idxe=batch_size
                elif idxe+batch_size>num:
                    idxs=num-batch_size
                    idxe=num
                else:
                    idxs=idxe
                    idxe+=batch_size

    def shuffle(self):
        random.shuffle(self.train_data_idxs)
        random.shuffle(self.val_data_idxs)
        random.shuffle(self.test_data_idxs)

if __name__=='__main__':
    load_logging_config()