import sys, os

import numpy as np
import jieba
import tensorflow as tf

from properties import Properties
from model import Seq2SeqModel
from seq2seq import create_cell, get_batch, run_step
import data_proc

class talker(object):

    def _init_session(self, sess, props):
        """init session for model using"""
        celltype = props.getProperties('Attrs.celltype')
        workspace = props.getProperties('Path.checkpoint')
        self.model = Seq2SeqModel(True, create_cell(celltype), props)
        self.model.build_graph()
        ckpt = tf.train.get_checkpoint_state(workspace)
        if ckpt and ckpt.model_checkpoint_path:
            self.model.saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            print('Could not find checkpoint!',file=sys.stderr)
            sys.exit(1)
        self.model.batch_size = 1
        data = props.getProperties('Path.workspace')
        enc_vocab_path = os.path.join(data, "vocab%d.enc" % int(props.getProperties('Attrs.enc_vocab_size')))
        dec_vocab_path = os.path.join(data, "vocab%d.dec" % int(props.getProperties('Attrs.dec_vocab_size')))

        self.enc_vocab, _ = data_proc.initialize_vocabulary(enc_vocab_path)
        _, self.rev_dec_vocab = data_proc.initialize_vocabulary(dec_vocab_path)
        return sess

    def __init__(self):
        self.props = Properties('standard.props')
        self.props.readProperties()
        self.sess = tf.Session()
        self.sess = self._init_session(self.sess, self.props)
        self.buckets = eval(self.props.getProperties('Attrs.buckets'))


    def talk(self, request):
        """
        talk with him
        :param request: str, sentence
        :return: str, response
        """
        request = ' '.join(jieba.cut(request))
        token_ids = data_proc.sentence_to_token_ids(tf.compat.as_bytes(request), self.enc_vocab)
        bucket_id = min([b for b in range(len(self.buckets)) if self.buckets[b][0] > len(token_ids)])
        encoder_inputs, decoder_inputs, target_weights = get_batch(
            self.props, {bucket_id: [(token_ids, [])]}, bucket_id, batch_size = 1)
        _, _, output_logits = run_step(self.sess, self.props, self.model,
                                       encoder_inputs, decoder_inputs, target_weights, bucket_id, True)
        outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
        if data_proc.EOS_ID in outputs:
            outputs = outputs[:outputs.index(data_proc.EOS_ID)]
        response = "".join([tf.compat.as_str(self.rev_dec_vocab[output]) for output in outputs])
        response = response.replace(data_proc.UNK, 'XX').strip()
        return response
