# coding=utf-8
import sys
import os
import numpy as np
import time
from ctc_func import greedy_decode
from get_features import RecognizeSpeech_FromFile
from get_symbol_list import GetSymbolList
from language_model_func import ModelLanguage

import acl
from utils import *
from acl_dvpp import Dvpp
from acl_model import Model
from acl_image import AclImage
from image_net_classes import get_image_net_class
from PIL import Image, ImageDraw, ImageFont

# 定义文件获取路径
#speech_voice_path =  'speech_voice/teacher.wav'
#speech_voice_path2 = 'speech_voice/teacher.pcm' # 开发板保存的语音路径
#wav_seg_path = 'wav_seg/' # 语音分割后保存的路径
#audiotype = 'wav' # 定义输入音频文件的格式。也可设置为pcm

speech_voice_path = 'speech_voice/jlu.wav' # 读者可自行更换待识别的wav音频

MODEL_PATH = "./speech_model/speech_model.om"
MODEL_WIDTH = 200
MODEL_HEIGHT = 1600

class Speech_Recog(object):
    def __init__(self, model_path, model_width, model_height):
        self.device_id = 0
        self.context = None
        self.stream = None
        self._model_path = model_path
        self._model_width = model_width
        self._model_height = model_height
        self._dvpp = None

    def __del__(self):
        if self._model:
            del self._model
        if self._dvpp:
            del self._dvpp
        if self.stream:
            acl.rt.destroy_stream(self.stream)
        if self.context:
            acl.rt.destroy_context(self.context)
        acl.rt.reset_device(self.device_id)
        acl.finalize()
        print("[Sample] class Samle release source success")

    def _init_resource(self):
        print("[Sample] init resource stage:")
        ret = acl.init()
        check_ret("acl.rt.set_device", ret)

        ret = acl.rt.set_device(self.device_id)
        check_ret("acl.rt.set_device", ret)

        self.context, ret = acl.rt.create_context(self.device_id)
        check_ret("acl.rt.create_context", ret)

        self.stream, ret = acl.rt.create_stream()
        check_ret("acl.rt.create_stream", ret)

        self.run_mode, ret = acl.rt.get_run_mode()
        check_ret("acl.rt.get_run_mode", ret)

        print("Init resource stage success") 

    def init(self):
        #初始化 acl 资源
        self._init_resource() 
        self._dvpp = Dvpp(self.stream, self.run_mode)

        #初始化dvpp
        ret = self._dvpp.init_resource()
        if ret != SUCCESS:
            print("Init dvpp failed")
            return FAILED
        
        #加载模型
        self._model = Model(self.run_mode, self._model_path)
        ret = self._model.init_resource()
        if ret != SUCCESS:
            print("Init model failed")
            return FAILED

        return SUCCESS

    def pre_process(self, speech_voice_path):
        """ 直接读取wav格式音频数据 """

        features, in_len = RecognizeSpeech_FromFile(speech_voice_path) #1,1600,200,1  in_len=122 全0矩阵
        features1=np.reshape(features,[1,1600,200,1]).copy()

        return features1, in_len

    def inference(self, resized_image):
        return self._model.execute(resized_image.data(), resized_image.size)

    def post_process(self, resultList, in_len): 
        """ 语音识别后处理，对模型推理后的数据进行解码"""
    
        # 将三维矩阵转为二维
        dets = resultList[0]

        # 进行ctc解码
        rr, ret1 = greedy_decode(dets)

        #print "---" + str(ret1)
        for i in range(len(ret1)):
            if i % 2 == 0:
                try:
                    ret1.remove(1423)
                except:
                    pass

        list_symbol_dic = GetSymbolList()

        r_str = []
        for i in ret1:
            r_str.append(list_symbol_dic[i])

        #print "拼音序列识别结果：" + str(r_str)
        string_pinyin = str(r_str)
        ml = ModelLanguage('language_model')
        ml.LoadModel()
        str_pinyin = r_str
        r = ml.SpeechToText(str_pinyin)

        print(str_pinyin)
        print(r)
        # 保存语音识别的结果
        with open('results/asr_results.txt','a+b') as f:
            data = ' ' + string_pinyin[1:-1] + '-' + r + '\n'

            f.write(data.encode())
            f.close()

        return r

def main():
    """ 用于测试 """
    #实例化分类检测,传入om模型存放路径,模型输入宽高参数
    sr = Speech_Recog(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT)

    #推理初始化
    ret = sr.init()
    check_ret("Speech_Recog.init ", ret)

    #将整段语音进行分割
    #speech_num = wav_seg(speech_voice_path)

    # 开始模型推理
    print("Start inference")

    # 进行音频特征提取
    features, in_len = sr.pre_process(speech_voice_path)

    # 将特征读入acliamge接口中
    features1 = AclImage(features)

    # 开始模型推理
    result = sr.inference(features1)

    # 进行语音识别后处理
    final_result = sr.post_process(result,in_len)


    """
    for i in range(speech_num):
        # 设置每段音频的路径
        speech_path = wav_seg_path + str(i) + '.' + audiotype

        # 进行音频特征提取
        #features, in_len = GetDataSet(speech_path) 
        features, in_len = sr.pre_process(speech_path) 

        # 将特征读入acliamge接口中
        features1 = AclImage(features)

        # 开始模型推理
        result = sr.inference(features1)

        # 进行语音识别后处理
        #final_result = SpeechPostProcess(result,in_len)
        final_result = sr.post_process(result,in_len)
    """

if __name__ == "__main__":
    main()