import os
import time
import random
import numpy as np
import logging
import logging.config as lc
import difflib
import threading
import re

from utils.ops import load_pinyin_dict,read_wav_data
from utils.conf import load_prime_config

lc.fileConfig('common/logger.conf')

class threadsafe_iter:
    def __init__(self,it):
        self.it=it
        self.lock=threading.Lock()
    
    def __iter__(self):
        return self

    def __next__(self):
        with self.lock:
            return self.it.__next__()

def threadsafe_generator(f):
    def g(*a,**kw):
        return threadsafe_iter(f(*a,**kw))
    return g

def get_edit_distance(str1,str2):
    leven_cost=0
    sequence_match=difflib.SequenceMatcher(None,str1,str2)
    for tag,index_1,index_2,index_j1,index_j2 in sequence_match.get_opcodes():
        if tag=='replace':
            leven_cost+=max(index_2-index_1,index_j2-index_j1)
        elif tag=='insert':
            leven_cost+=(index_j2-index_j1)
        elif tag=='delete':
            leven_cost+=(index_2-index_1)
    return leven_cost

class SpeechModel:
    '''
    语音模型类

    Args:
        speech_model: 声学模型类型实例对象
        speech_features: 声学特征类别(SpeechFeatureMeta类)实例对象
    '''
    def __init__(self,speech_model,speech_features,model_saved_dir,max_label_length=64):
        self.data_loader=None
        self.speech_model=speech_model
        self.trained_model,self.base_model=speech_model.get_model()
        self.speech_features=speech_features
        self.max_label_length=max_label_length
        self.model_saved_dir=model_saved_dir
    
    @threadsafe_generator
    def _data_generator(self,batch_size,data_loader):
        '''
        数据生成器函数，用于Keras的generator_fit训练
        batch_size: 一次产生的数据量
        '''
        labels=np.zeros((batch_size,1),dtype=np.float)
        data_count=data_loader.get_data_count()
        index=0

        while True:
            x=np.zeros((batch_size,)+self.speech_model.input_shape,dtype=np.float)
            y=np.zeros((batch_size,self.max_label_length),dtype=np.int16)
            input_length=[]
            label_length=[]
            for i in range(batch_size):
                wavdata,sample_rate,data_labels=data_loader.get_data(index)
                data_input=self.speech_features.run(wavdata,sample_rate)
                data_input=data_input.reshape(data_input.shape[0],data_input.shape[1],1)
                pool_size=self.speech_model.input_shape[0]//self.speech_model.output_shape[0]
                inlen=min(data_input.shape[0]//pool_size+data_input.shape[0]%pool_size,self.speech_model.output_shape[0])
                input_length.append(inlen)
                x[i,0:len(data_input)]=data_input
                y[i,0:len(data_labels)]=data_labels
                label_length.append([len(data_labels)])
                index=(index+1)%data_count
            label_length=np.matrix(label_length)
            input_length=np.array(input_length).T
            yield [x,y,input_length,label_length],labels

    def train_model(self,optimizer,data_loader,epochs=1,save_step=1,batch_size=16,last_epoch=0,call_back=None):
        save_filename=os.path.join(self.model_saved_dir,self.speech_model.get_model_name(),self.speech_model.get_model_name())
        self.trained_model.compile(loss=self.speech_model.get_loss_function(),optimizer=optimizer)
        logging.info('compiles model successfully')
        yielddatas=self._data_generator(batch_size,data_loader)
        data_count=data_loader.get_data_count()
        num_iterate=data_count//batch_size
        loaded=False
        if os.path.exists(os.path.dirname(save_filename)+'/epoch_'+self.speech_model._model_name+'.txt'):
            with open(os.path.dirname(save_filename)+'/epoch_'+self.speech_model._model_name+'.txt',encoding='utf8') as f:
                contents=f.read()
            if len(contents)>0:
                try:
                    last_epoch=int(re.match('epoch(\d+)$',contents).group(1))
                    self.load_model(contents)
                    loaded=True
                except:
                    pass
        if not loaded and last_epoch>0:
            try:
                self.load_model(save_filename+'_epoch'+str(last_epoch))
            except:
                last_epoch=0
        iter_start=last_epoch
        iter_end=last_epoch+epochs
        for epoch in range(iter_start,iter_end):
            try:
                epoch+=1
                logging.info('training epoch %d/%d'%(epoch,iter_end))
                data_loader.shuffle()
                self.trained_model.fit_generator(yielddatas,num_iterate,callbacks=call_back)
            except StopIteration:
                logging.error('generator error. please check data format.')
            if epoch%save_step==0:
                if not os.path.exists(os.path.join(self.model_saved_dir,self.speech_model.get_model_name())):
                    os.makedirs(os.path.join(self.model_saved_dir,self.speech_model.get_model_name()))
                self.save_model(save_filename+'_epoch'+str(epoch))
        logging.info('model training complete')

    def load_model(self,filename):
        self.speech_model.load_weights(filename)

    def save_model(self,filename):
        self.speech_model.save_weights(filename)

    def evaluate_model(self,data_loader,data_count=-1,out_report=False,show_ratio=True,show_per_step=100):
        data_nums=data_loader.get_data_count()
        if data_count<=0 or data_count>data_nums:
            data_count=data_nums
        try:
            ran_num=random.randint(0,data_nums-1)
            words_num=0
            word_error_num=0
            nowtime=time.strftime('%Y%m%d_%H%M%S',time.localtime(time.time()))
            if out_report:
                txt_obj=open('Test_Report_'+'_'.join(data_loader.component.split('+'))+'_'+nowtime+'.txt','w',encoding='utf8')
                txt_obj.truncate((data_count+1)*300)
                # 预先分配一定数量的磁盘空间，避免后期文件在硬盘中频繁移动，以防写入速度越来越慢
                txt_obj.seek(0)
            txt=''
            i=0
            while i<data_count:
                wavdata,fs,data_labels=data_loader.get_data((ran_num+i)%data_nums)
                # 从随机数开始连续向后取一定数量数据
                data_input=self.speech_features.run(wavdata,fs)
                data_input=data_input.reshape(data_input.shape[0],data_input.shape[1],1)
                if data_input.shape[0]>self.speech_model.input_shape[0]:
                    logging.error('wave data length of num',(ran_num+i)%data_nums,'is too long.','this data\'s length is',data_input.shape[0],'expect <=',self.speech_model.input_shape[0],'\n A excpetion raise when testing Speech Model.')
                    i+=1
                    continue
                pre=self.predict(data_input)
                words_n=data_labels.shape[0]
                words_num+=words_n
                edit_distance=get_edit_distance(data_labels,pre)
                if edit_distance<=words_n:
                    word_error_num+=edit_distance
                else:
                    word_error_num+=words_n
                if i%show_per_step==0 and show_ratio:
                    logging.info('testing %d/%d'%(i,data_count))
                txt=''
                if out_report:
                    txt+=str(i)+'\n'
                    txt+='True:\t'+str(data_labels)+'\n'
                    txt+='Pred:\t'+str(pre)+'\n'
                    txt+='\n'
                    txt_obj.write(txt)
                i+=1
            logging.info('Speech Recognition '+'_'.join(data_loader.component.split("+"))+' set word error radio: '+'%f%%'%word_error_num/words_num*100)
            if out_report:
                txt='Speech Recognition '+'_'.join(data_loader.component.split("+"))+' set word error radio: '+'%f%%'%word_error_num/words_num*100
                txt_obj.write(txt)
                txt_obj.truncate()
                # 去除文件末尾剩余未使用的空白存储字节
                txt_obj.close()

        except StopIteration:
            logging.error('Model testing raise an error. Please check data format')
    
    def predict(self,data_input):
        return self.speech_model.forword(data_input)

    def recognition_speech(self,wavsignal,fs):
        data_input=self.speech_features.run(wavsignal,fs)
        data_input=np.array(data_input,dtype=np.float)
        data_input=data_input.reshape(data_input.shape[0],data_input.shape[1],1)
        r1=self.predict(data_input)
        list_symbol_dic,_=load_pinyin_dict(load_prime_config()['dict_filename'])
        r_str=[]
        for i in r1:
            r_str.append(list_symbol_dic[i])
        return r_str

    def recognize_speech_from_file(self,filename):
        wavsignal,sample_rate,_,_=read_wav_data(filename)
        r=self.recognition_speech(wavsignal,sample_rate)
        return r

    @property
    def model(self):
        return self.trained_model