# coding=utf-8
import os
import difflib
import tensorflow as tf
import numpy as np
from utils import decode_ctc, GetEditDistance
from keras.optimizers import Adam

from keras.models import load_model

# 0.准备解码所需字典，参数需和训练一致，也可以将字典保存到本地，直接进行读取
from utils import get_data, data_hparams

data_args = data_hparams()
train_data = get_data(data_args)
os.environ["CUDA_VISIBLE_DEVICES"] = "1"

# 1.声学模型-----------------------------------
from model_speech.cnn_ctc_one import Am, am_hparams, ctc_lambda
import keras

am_args = am_hparams()
am_args.vocab_size = len(train_data.am_vocab)
am = Am(am_args)

if os.path.exists('../LogDir/Logs/log3/model5_wights.h5'):
    print('loading acoustic model...')
    am.ctc_model.load_weights('log3/model_thchs30_wights.h5')

# 2.语言模型-------------------------------------------
from model_language.transformer import Lm, lm_hparams

lm_args = lm_hparams()
lm_args.input_vocab_size = len(train_data.pny_vocab)
lm_args.label_vocab_size = len(train_data.han_vocab)
lm_args.dropout_rate = 0.
print('loading language model...')
lm = Lm(lm_args)
sess = tf.Session(graph=lm.graph)
with lm.graph.as_default():
    saver = tf.train.Saver()
with sess.as_default():
    latest = tf.train.latest_checkpoint('../LogDir/Logs/log3/log_lm/model2')
    saver.restore(sess, latest)

testPathWav = ["E:\\DeepLearning\\dl_studio\\bishe\\my_ch_speech_recognition-master\\devData\\3.wav"]

test_data = get_data(data_args)
test_data.wav_lst = testPathWav
test_data.shuffle = False
test_data.batch_size = 1
test_data.data_path = ""
# 3. 进行测试-------------------------------------------
test_batch = test_data.get_am_batch()
for i in range(len(testPathWav)):
    print('\n the ', i, 'th example.')
    inputs, _ = next(test_batch)
    x = inputs['the_inputs']
    print("x==>", x.shape)
    result = am.model.predict(x, steps=1)
    # 将数字结果转化为文本结果
    _, text = decode_ctc(result, train_data.am_vocab)
    text = ' '.join(text)
    print('文本结果：', text)
    with sess.as_default():
        text = text.strip('\n').split(' ')
        x = np.array([train_data.pny_vocab.index(pny) for pny in text])
        x = x.reshape(1, -1)
        preds = sess.run(lm.preds, {lm.x: x})
        got = ''.join(train_data.han_vocab[idx] for idx in preds[0])
        print('识别结果：', got)
# sess.close()
