'''获取数据集的属性'''
import json
import os
import logging
import logging.config as lc
import wave
import sys
import numpy as np
import pdb
import re

lc.fileConfig('../common/logger.conf')

def get_dataset_component_info(datasetDict):
    '''
    去数据集的目录，计算音频条数，音频时长的统计特征，句子个数，字数，词汇表的大小，音频的采样率，通道数和位深

    Args:
        datasetDir: 数据集配置字典
    
    Returns:
        出错字符串
    '''
    cur_dir=os.getcwd()
    for compo in ['train','test','val']:
        os.chdir(os.path.join(datasetDict['path'],compo))
        wavs=[]
        wav_durations=[]
        sample_rate={}
        channel_num={}
        sample_width={}
        trns=[]
        sentence_num=0 #THCHS30的转录结果一般一句话放在转录文件第一行，目前就按转录文件数算，之后直接sentence_num=len(trns)
        word_num=0 #THCHS30的转录文件的第一行包含转录的所有字
        dictionary=set() # 字典表，集合
        logging.info('entering directory %s'%os.path.join(datasetDict['path'],compo))
        with open('TRANS.TXT',encoding='utf8') as f:
            lines=f.readlines()
        for line in lines:
            logging.info('processing file %s'%line.split()[0]+'.wav')
            wav=wave.open(file,'rb')
            frame_num=wav.getnframes()
            nchannel=wav.getnchannels()
            fr=wav.getframerate()
            sw=wav.getsampwidth()
            if fr in sample_rate:
                sample_rate[fr]+=1
            else:
                sample_rate[fr]=1
            if nchannel in channel_num:
                channel_num[nchannel]+=1
            else:
                channel_num[nchannel]=1
            if sw in sample_width:
                sample_width[sw]+=1
            else:
                sample_width[sw]=1
            wav_durations.append(frame_num/fr)
            words=line.split()[1]
            word_num+=len(words)
            for word in words:
                dictionary.add(word)
        logging.info('calculating attributes of %s component in %s'%(compo,datasetDict['name']))
        datasetDict['components'][compo]['audio_num']=len(lines)
        wav_durations=np.array(wav_durations,dtype=np.int8)
        counts=np.bincount(wav_durations)
        datasetDict['components'][compo]['audio_len_statistics']=[np.mean(wav_durations),np.median(wav_durations),np.argmax(counts).astype(np.float64),np.sum(wav_durations).astype(np.float64),np.max(wav_durations).astype(np.float64)]
        datasetDict['components'][compo]['sentence_num']=len(lines)
        datasetDict['components'][compo]['word_num']=word_num
        datasetDict['components'][compo]['vocabulary_num']=len(dictionary)
        datasetDict['components'][compo]['sample_rate']=sample_rate
        datasetDict['components'][compo]['channel_num']=channel_num
        datasetDict['components'][compo]['sample_width']=sample_width
        datasetDict['components'][compo]['err']=''
        if len(sample_rate)!=1:
            logging.error('数据集中的音频有两种及以上的采样率')
            datasetDict['components'][compo]['err']+='数据集中的音频有两种及以上的采样率'
        if len(channel_num)!=1:
            logging.error('数据集中的音频有两种及以上的通道数')
            datasetDict['components'][compo]['err']+='数据集中的音频有两种及以上的通道数'
        if len(sample_width)!=1:
            logging.error('数据集中的音频有两种及以上的位深')
            datasetDict['components'][compo]['err']+='数据集中的音频有两种及以上的位深'
    os.chdir(cur_dir)
        # pdb.set_trace()
    with open('jsons/datasets/%s.json'%datasetDict['name'],'w',encoding='utf8') as f:
        json.dump(datasetDict,f,ensure_ascii=False,indent=4)
    logging.info('dump to json file jsons/datasets/%s.json'%datasetDict['name'])

def write_nterms_freq(trns,n,limit,f):
    '''
    统计字符串里的常见n元词

    Args:
        trns: 字符串列表，一个字符串就是一段文本
        n: 长度为n的子串
        limit: 出现的最少频次
        f: 文件对象，把常见的短语写入文件中
    Returns:
        无
    '''
    freqs={}
    for trn in trns:
        for i in range(len(trn)-n):
            freqs[trn[i:i+n]]=0
    for trn in trns:
        for i in range(len(trn)-n):
            freqs[trn[i:i+n]]+=1
    freqs={k:v for k,v in freqs.items() if v>=limit}
    freqs=sorted(freqs.items(),key=lambda d:d[1],reverse=True)
    f.write(str(freqs)+'\n')

def gen_nterms_limit(json_file,component):
    '''
    统计数据集里的常见词

    Args:
        json_file: 数据集的配置json文件名
        component: 数据集组分，如"train"、"train+test"

    Returns:
        无
    '''
    with open(json_file,'r',encoding='utf8') as f:
        dataset=json.load(f)
    cur_dir=os.getcwd()
    os.chdir(dataset['path'])
    trns=[]
    for compo in component.split('+'):
        logging.info('entering directory %s'%compo)
        os.chdir(compo)
        for file in os.listdir():
            if file.endswith(dataset['trn_file_replace']):
                with open(file,encoding='utf8') as f:
                    trns.append(''.join(f.readlines()[0].strip().split()))
        logging.info('processed all trn files in %s'%compo)
        os.chdir('..')
    with open(component.replace('+','-')+'-frequent-words.txt','w',encoding='utf8') as f:
        for tn in dataset['terms']:
            logging.info('counting %s-term frequency'%tn)
            write_nterms_freq(trns,int(tn),dataset['terms'][tn],f)
    os.chdir(cur_dir)

def validateDatasetDir(datasetDir):
    validateDatasetComponent(datasetDir,'train')
    validateDatasetComponent(datasetDir,'test')
    validateDatasetComponent(datasetDir,'val')

def validateDatasetComponent(datasetDir, component):
    if not os.path.exists(os.path.join(datasetDir,component)):
        return '%s/%s directory should exist'%(datasetDir,component)
    if not os.path.exists(os.path.join(datasetDir,component,'TRANS.TXT')):
        return '%s/%s/TRANS.TXT transcription file should exist'%(datasetDir,component)
    os.chdir(os.path.join(datasetDir,component))
    with open('TRANS.TXT',encoding='utf8') as f:
        lines=f.readlines()
        for line in lines:
            if not os.path.exists(line.split()[0]+'.wav'):
                return '%s/%s/%s.wav audio file should exist'%(datasetDir,component,line.split()[0])

if __name__=='__main__':
    json_file='../jsons/datasets/THCHS-30.json'
    # get_dataset_component_info(json_file,'train')
    # gen_nterms_limit(json_file,"train")