from utils.common import dump_json,load_logging_config,load_from_json
import os
import logging
import wave
from collections import defaultdict
import numpy as np
import json
from consta import path_sep

def gen_summary_from_dict(data_dict,trn_file,format):
    if format=='txt':
        with open(trn_file,'w',encoding='utf8') as f:
            for ddk in data_dict:
                f.write(f'{ddk} {data_dict[ddk]}\n')
    elif format=='json': # json
        dump_json(data_dict,trn_file)
    else:
        logging.error('不支持该格式的数据集索引文件')

def get_data_dict_from_summary(trn_file,format=None,num_limit=0):
    if format is None:
        format=trn_file.split('.')[-1]
    if format=='json':
        data_dict=load_from_json(trn_file)
    elif format=='txt':
        if num_limit==0:
            with open(trn_file,'r',encoding='utf8') as f:
                lines=f.readlines()
        else:
            with open(trn_file,'r',encoding='utf8') as f:
                lines=f.readlines()[:num_limit]
        data_dict={}
        for line in lines:
            ls=line.strip().split(' ',1)
            data_dict[ls[0]]=ls[1]
    else:
        logging.error('不支持该格式的数据集索引文件')
        data_dict=None
    return data_dict

def add_dataset():
    dataset_name='thchs301'
    description=''
    path='C:\\Users\\tellw\\chinese_speech_dataset\\THCHS-30\\data_thchs30'
    wav_ext=('.wav') # 目前数据集音频文件仅支持wav格式
    wav2trn_type='filename' # 另一选项'summary'，目录里的所有音频文件的转录文本全在一个文本文件里，且文本文件里按照音频文件名照应其转录结果
    train={'subdir':'train','trn_ext':'.wav.trn','trn_dir':'data','trn_file':'','line_no':0,'trn_file_format':'txt','num':0}
    val={'subdir':'dev','trn_ext':'.wav.trn','trn_dir':'data','trn_file':'','line_no':0,'trn_file_format':'txt','num':0}
    test={'subdir':'test','trn_ext':'.wav.trn','trn_dir':'data','trn_file':'','line_no':0,'trn_file_format':'txt','num':0}
    # 增加三种数据集的转录文件选项设置相同的行为，制定一部分数据集的操作选项，另两个同理，就不用三个都要设一遍，这是前端需要完成的事情
    # 由评测人员指定汇总的转录文本文件位置，比filename模式少了汇总的步骤，直接成品呈上来，相对于数据集目录的路径，如果trn_file输入是绝对路径的话，需要通过os.path.relpath(file,path)得到相对subdir的路径
    old_cwd=os.getcwd()
    os.chdir(path)
    for subset in (train,val,test):
        if not os.path.exists(subset['subdir']):
            logging.error(f'{subset["subdir"]}目录不存在，无法获取对应数据')
            continue
        if subset['trn_file']=='' or not os.path.exists(f'{subset["subdir"]}/{subset["trn_file"]}'):
            if wav2trn_type=='filename':
                wav_dir=subset['subdir']
                data_dict={}
                wav_durations=[]
                sample_rate=defaultdict(int)
                channel_num=defaultdict(int)
                sample_width=defaultdict(int)
                for root,dirs,files in os.walk(wav_dir):
                    for file in files:
                        if file.endswith(wav_ext):
                            with open(f'{subset["trn_dir"]}/{os.path.relpath(root,wav_dir).replace(path_sep,"/")}/{os.path.splitext(file)[0]}{subset["trn_ext"]}',encoding='utf8') as trnf:
                                content=''.join(trnf.readlines()[subset["line_no"]].strip().split())
                                data_dict[f'{os.path.relpath(root,subset["subdir"]).replace(path_sep,"/")}/{os.path.splitext(file)[0]}']=content
                            logging.info(f'processing audio {root}/{file}')
                            wav=wave.open(f'{root}/{file}','rb')
                            frame_num=wav.getnframes()
                            nchannel=wav.getnchannels()
                            fr=wav.getframerate()
                            sw=wav.getsampwidth()
                            sample_rate[fr]+=1
                            channel_num[nchannel]+=1
                            sample_width[sw]+=1
                            wav_durations.append(frame_num/fr)
                            wav.close()
                trn_file='trn_summary.txt' if len(subset['trn_file'])==0 else subset['trn_file']
                gen_summary_from_dict(data_dict,f'{subset["subdir"]}/{trn_file}',subset['trn_file_format'])
                subset['trn_file']=trn_file
                subset['audio_num']=len(wav_durations)
                wav_durations=np.array(wav_durations,dtype=np.float64)
                counts=np.bincount(wav_durations.astype(np.int8))
                subset['audio_dur_statistics']=[np.mean(wav_durations),np.median(wav_durations),np.argmax(counts).astype(np.float64),np.sum(wav_durations),np.max(wav_durations)]
                subset['sample_rate']=sample_rate
                subset['channel_num']=channel_num
                subset['sample_width']=sample_width
                subset['num']=len(data_dict.keys())
                if len(sample_rate)!=1:
                    logging.error('数据集中的音频有两种及以上的采样率')
                if len(channel_num)!=1:
                    logging.error('数据集中的音频有两种及以上的通道数')
                if len(sample_width) !=1:
                    logging.error('数据集中的音频有两种及以上的位深')
                dump_json(subset,f'{wav_dir}/audio_infos.json')
            else:
                logging.error(f'{wav_dir}/{subset["trn_file"]}不存在，且您设置获取语音-文本对照表的方式是summary')
        
        elif not os.path.exists(f'{subset["subdir"]}/audio_infos.json'):
            wav_durations=[]
            sample_rate=defaultdict(int)
            channel_num=defaultdict(int)
            sample_width=defaultdict(int)
            data_dict=get_data_dict_from_summary(f'{subset["subdir"]}/{subset["trn_file"]}',subset['trn_file_format'])
            for ddk in data_dict:
                file=f'{subset["subdir"]}/{ddk}.wav'
                logging.info(f'processing audio {file}')
                wav=wave.open(file,'rb')
                frame_num=wav.getnframes()
                nchannel=wav.getnchannels()
                fr=wav.getframerate()
                sw=wav.getsampwidth()
                sample_rate[fr]+=1
                channel_num[nchannel]+=1
                sample_width[sw]+=1
                wav_durations.append(frame_num/fr)
                wav.close()
            subset['audio_num']=len(wav_durations)
            wav_durations=np.array(wav_durations,dtype=np.float64)
            counts=np.bincount(wav_durations.astype(np.int8))
            subset['audio_dur_statistics']=[np.mean(wav_durations),np.median(wav_durations),np.argmax(counts).astype(np.float64),np.sum(wav_durations),np.max(wav_durations)]
            subset['sample_rate']=sample_rate
            subset['channel_num']=channel_num
            subset['sample_width']=sample_width
            subset['num']=len(data_dict.keys())
            if len(sample_rate)!=1:
                logging.error('数据集中的音频有两种及以上的采样率')
            if len(channel_num)!=1:
                logging.error('数据集中的音频有两种及以上的通道数')
            if len(sample_width) !=1:
                logging.error('数据集中的音频有两种及以上的位深')
            dump_json(subset,f'{subset["subdir"]}/audio_infos.json')
        else:
            audio_infos_json=load_from_json(f'{subset["subdir"]}/audio_infos.json')
            for aik in audio_infos_json:
                if aik not in subset:
                    subset[aik]=audio_infos_json[aik]
            dump_json(subset,f'{subset["subdir"]}/audio_infos.json')
    # 如果需要检查训练集分别和验证集、测试集重合了哪些音频文件，用检查图片相似性的方法ahash、phash和dhash将音频幅度变形为正方形，再缩至8x8

    os.chdir(old_cwd)
    dataset_json={
        'name':dataset_name,
        'description':description,
        'path':path,
        'wav_ext':wav_ext,
        'wav2trn_type':wav2trn_type,
        'train':train,
        'val':val,
        'test':test
    }
    
    if not f"{dataset_json['name']}.json" in os.listdir('jsons/datasets'):
        dump_json(dataset_json,f'jsons/datasets/{dataset_json["name"]}.json')
    else:
        logging.error('数据集json文件重名，请重命名')

if __name__=='__main__':
    load_logging_config()
    add_dataset()