#coding: utf-8
from .nets import nets_factory
from .preprocessing import preprocessing_factory
from tensorflow.contrib.framework.python.ops import variables
from tensorflow.python.training import saver as tf_saver
from tensorflow.python.training import supervisor
from tensorflow.core.protobuf import saver_pb2
import tensorflow as tf
import logging
import numpy as np
from IPython import embed

slim = tf.contrib.slim


def build_sess(conf):
    with tf.Graph().as_default():
        tf_global_step = slim.get_or_create_global_step()

        ####################
        # Select the model #
        ####################
        network_fn = nets_factory.get_network_fn(
            conf['model_name'],
            num_classes=(conf['num_classes']),
            is_training=False)

        logging.info('use %d num_classes..............' % conf['num_classes'])

        ##############################################################
        # Create a dataset provider that loads data from the dataset #
        ##############################################################
        image = tf.placeholder(tf.uint8, (256, 256, 3), name='image')

        #####################################
        # Select the preprocessing function #
        #####################################
        preprocessing_name = conf['preprocessing_name'] or conf['model_name']
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            preprocessing_name,
            is_training=False)

        eval_image_size = network_fn.default_image_size
        image = image_preprocessing_fn(image, eval_image_size, eval_image_size)

        # add dimension
        images = tf.expand_dims(image, 0)
        images.set_shape((1, eval_image_size, eval_image_size, 3))
        sess = tf.Session()

        logits, _ = network_fn(images)
        prob = tf.nn.softmax(logits)
        prob_top_k = tf.nn.top_k(prob, conf['num_classes'])
        variables_to_restore = slim.get_variables_to_restore()

        if tf.gfile.IsDirectory(conf['checkpoint_path']):
            checkpoint_path = tf.train.latest_checkpoint(conf['checkpoint_path'])
        else:
            checkpoint_path = conf['checkpoint_path']

        # label
        with open(conf['label_file']) as f:
            id2label = {}
            for i, line in enumerate(f):
                id2label[i] = line.strip()

        # result = slim.evaluation.evaluate_once(
        #     master=FLAGS.master,
        #     checkpoint_path=checkpoint_path,
        #     logdir=FLAGS.eval_dir,
        #     num_evals=1,
        #     final_op=prob_top_k,
        #     variables_to_restore=variables_to_restore)

        # probs = result.values[0]
        # predictions = result.indices[0]
        # for i in range(len(probs)):
        #     print('%s: %f' % (id2label[predictions[i]], probs[i]))

        global_step = variables.get_or_create_global_step()

        saver = tf_saver.Saver(
            variables_to_restore or variables.get_variables_to_restore())

        sv = supervisor.Supervisor(
            summary_op=None,
            summary_writer=None,
            global_step=None,
            saver=None)

        sess = tf.Session()
        saver.restore(sess, checkpoint_path)
        sv.start_queue_runners(sess)
        return sess, prob_top_k, id2label


class MyModel(object):
    def __init__(self, conf, ext='jpg'):
        """
        conf example:

        checkpoint_path: 'data/model'
        num_classes: 6
        model_name: 'inception_v3'
        preprocessing_name: null
        eval_dir: '/tmp/tfmodel/'
        label_file: 'data/label.txt'

        """
        self.sess, self.result_tensor, self.id2label = build_sess(conf)
        self.ext = ext

    def detect_one_img(self, img_path):
        with open(img_path, 'rb') as f:
            img_byte = f.read()
        with tf.device('/cpu:0'):
            with tf.Graph().as_default():
                with tf.Session() as sess_pic_data:
                    if self.ext == 'jpg':
                        image_ndarray = tf.image.decode_jpeg(img_byte)
                    elif self.ext == 'png':
                        image_ndarray = tf.image.decode_png(img_byte)
                    else:
                        raise Exception('Unsupported Ext Type!')
                    image_ndarray = sess_pic_data.run(image_ndarray)
                    image_ndarray = image_ndarray[:, :, 0:3]
        # embed()
        #result = tf.contrib.slim.evaluation.evaluate_once(self.sess,
        #                                               num_evals=1,
        #                                               final_op=self.result_tensor,
        #                                               final_op_feed_dict={'image:0': image_ndarray})
        result = self.sess.run(self.result_tensor, feed_dict={'image:0':image_ndarray})
        probs = result.values[0]
        predictions = result.indices[0]
        feature = [0] * 6
        for index, i in enumerate(predictions):
            feature[i] = probs[index]
        feature = np.array(feature)
        # feature应该是一个按id2label依次组织的，每个类的概率
        return feature, self.id2label
