#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Created on 2015年6月18日

@author: yangzhou1
'''
import tensorflow as tf  # 0.12
import chatv2.seq2seq_model as seq2seq_model
import os
import numpy as np
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_boolean(
    'use_fp16',
    False,
    '是否使用16位浮点数（默认32位）'
)
PAD_ID = 0
GO_ID = 1
EOS_ID = 2
UNK_ID = 3

train_encode_vocabulary = 'data/train_encode_vocabulary'
train_decode_vocabulary = 'data/train_decode_vocabulary'

def read_vocabulary(input_file):
    tmp_vocab = []
    with open(input_file, mode="r",encoding='utf-8') as f:
        tmp_vocab.extend(f.readlines())
    tmp_vocab = [line.strip() for line in tmp_vocab]
    vocab = dict([(x, y) for (y, x) in enumerate(tmp_vocab)])
    return vocab, tmp_vocab

vocab_en, _, = read_vocabulary(train_encode_vocabulary)
_, vocab_de, = read_vocabulary(train_decode_vocabulary)
print("vocab_en------------------")
print(vocab_en)
print("vocab_de------------------")
print(vocab_de)
# 词汇表大小5000
vocabulary_encode_size = 100
vocabulary_decode_size = 100

buckets = [(5, 10), (10, 15), (20, 25), (40, 50)]
layer_size = 256  # 每层大小
num_layers = 3   # 层数
batch_size =  1
dtype = tf.float16 if FLAGS.use_fp16 else tf.float32
model = seq2seq_model.Seq2SeqModel(source_vocab_size=vocabulary_encode_size,
                                   target_vocab_size=vocabulary_decode_size,
                                   buckets=buckets,
                                   size=layer_size,
                                   num_layers=num_layers,
                                   max_gradient_norm= 5.0,
                                   batch_size=batch_size,
                                   learning_rate=0.01,
                                   learning_rate_decay_factor=0.99,
                                   forward_only=True,
                                   dtype=dtype)
model.batch_size = 1

with tf.Session() as sess:
    # 恢复前一次训练
    '''
    ckpt = tf.train.get_checkpoint_state('model')
    if ckpt != None:
        print(ckpt.model_checkpoint_path)
        model.saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        print("没找到模型")
    '''
    ckpt = "model"
    if ckpt != None:
        #---begin:savedmodel--
        from tensorflow.python.saved_model import tag_constants
        md = tf.saved_model.loader.load(sess, [tag_constants.SERVING],ckpt)
        #sig = md.signature_def[tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY]
        #print(sig)
        #--end:savedmodel---

    else:
        print("没找到模型")

    while True:
        input_string = input('me > ')
        # 退出
        if input_string == 'quit':
            exit()

        input_string_vec = []
        for words in input_string.strip():
            input_string_vec.append(vocab_en.get(words, UNK_ID))
        print(input_string_vec)

        bucket_id = min([b for b in range(len(buckets)) if buckets[b][0] > len(input_string_vec)])
        print(bucket_id)
        encoder_inputs, decoder_inputs, target_weights = model.get_batch({bucket_id: [(input_string_vec, [])]}, bucket_id)
        print(encoder_inputs)
        print(decoder_inputs)
        _, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs, target_weights, bucket_id, True)
        outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
        if EOS_ID in outputs:
            outputs = outputs[:outputs.index(EOS_ID)]

        print(outputs)
        response = "".join([tf.compat.as_str(vocab_de[output]) for output in outputs])
        print('AI > ' + response)