# !/usr/bin/env python2.7
"""Export inception model given existing training checkpoints.
The model is exported as SavedModel with proper signatures that can be loaded by
standard tensorflow_model_server.
"""

from __future__ import print_function

import base64
import os.path

# This is a placeholder for a Google-internal import.

import tensorflow as tf

slim = tf.contrib.slim

import nets.inception as inception
from nets.inception_v4 import inception_v4_arg_scope
from preprocessing import preprocessing_factory

predict_images = "porn_detect"

print("***********************************************************************************************")

tf.app.flags.DEFINE_string('checkpoint_dir', './my-data/43k_train/',
                           """Directory where to read training checkpoints.""")
tf.app.flags.DEFINE_string('output_dir', './serving_models',
                           """Directory where to export inference model.""")
tf.app.flags.DEFINE_integer('model_version', 12,
                            """Version number of the model.""")
tf.app.flags.DEFINE_integer('image_size', 299,
                            """Needs to provide same value as in training.""")
tf.app.flags.DEFINE_string('ckpt_file','model.ckpt-171126','')
tf.app.flags.DEFINE_string('model_name','InceptionV4','mdoel of name')
FLAGS = tf.app.flags.FLAGS

NUM_CLASSES = 3
NUM_TOP_CLASSES = 1
OUTPUT_NODENAME = "InceptionV4/Logits/Predictions:0"


WORKING_DIR = os.path.dirname(os.path.realpath(__file__))
print(WORKING_DIR)
print("checkpoint_dir is ", FLAGS.checkpoint_dir)
print("output_dir is ", FLAGS.output_dir)
print("model_version", FLAGS.model_version)
print("image_size is ", FLAGS.image_size)
print("***********************************************************************************************")


def export():
	with tf.Graph().as_default():
		# build inference model
		# labels
		f = open('./labels.txt')

		lines = f.readlines()

		names = {}
		#names = {0: 'background'}
	for line in lines:
		i, label = line.split(':')

		names[int(i)] = label.strip()
	# print(names)

	names_tensor = tf.constant(list(names.values()))
	names_lookup_table = tf.contrib.lookup.index_to_string_table_from_tensor(names_tensor)

	# input transformation
	serialized_tf_example = tf.placeholder(tf.string, name='tf_example')

	feature_configs = {
		'image/encoded': tf.FixedLenFeature(shape=[], dtype=tf.float32), }

	tf_example = tf.parse_example(serialized_tf_example, feature_configs)

	jpegs = tf_example['image/encoded']

	images = tf.map_fn(preprocess_image, jpegs, dtype=tf.float32)

	# run inference

	with slim.arg_scope(inception_v4_arg_scope()):
		# inception v3 models

		logits, end_points = inception.inception_v4(
			images, num_classes=NUM_CLASSES,
			is_training=False)

		# logits = tf.Print(logits, [logits])

		probs = tf.nn.softmax(logits)

		# transform output to topk result

		topk_probs, topk_indices = tf.nn.top_k(probs, NUM_TOP_CLASSES)

		topk_names = names_lookup_table.lookup(tf.to_int64(topk_indices))

		init_fn = slim.assign_from_checkpoint_fn(
			os.path.join(FLAGS.checkpoint_dir, FLAGS.ckpt_file), slim.get_model_variables(FLAGS.model_name))

	config = tf.ConfigProto(
		device_count = {
		'GPU': 0
		},
		gpu_options={
			'allow_growth': 1,
			# 'per_process_gpu_memory_fraction': 0.01
		},
		allow_soft_placement=True,
		log_device_placement=False,
	)

	with tf.Session(config=config) as sess:
		init_fn(sess)

		# init on 2017.08.05

		# prelogits = sess.graph.get_tensor_by_name(

		# 'InceptionV3/Logits/PreLogitsFlatten/Reshape:0'

		# )

		# update on 2017.10.22

		# note: looks like the graphdef is updated for slim-inception-v3

		print('Graph Node Tensor Name:')

		for node_tensor in tf.get_default_graph().as_graph_def().node:
			if False:
				print(str(node_tensor.name))
			# if str(node_tensor.name).startswith('InceptionV4/Logits'):
				# print(str(node_tensor.name))

		prelogits = sess.graph.get_tensor_by_name('InceptionV4/Logits/Predictions:0')
		# inputs = sess.graph.get_tensor_by_name('input:0')
		# print(inputs)
		#Tensor("InceptionV4/Logits/Predictions:0", shape=(?, 3), dtype=float32)


		# an optional alternative

		# prelogits = end_points['PreLogitsFlatten']

		# export inference model.

		output_path = os.path.join(
			tf.compat.as_bytes(FLAGS.output_dir),
			tf.compat.as_bytes(str(FLAGS.model_version))
		)
		print('Exporting trained model to', FLAGS.output_dir)

		# yuankun
		builder = tf.saved_model.builder.SavedModelBuilder(output_path)
		# builder = tf.saved_model.builder.SavedModelBuilder(output_path)
		# build the signature_def_map.

		predict_inputs_tensor_info = tf.saved_model.utils.build_tensor_info(jpegs)
		# print('-----------',type(predict_inputs_tensor_info))
		classes_output_tensor_info = tf.saved_model.utils.build_tensor_info(topk_names)

		scores_output_tensor_info = tf.saved_model.utils.build_tensor_info(prelogits) # topk_probs
		# print(classes_output_tensor_info,scores_output_tensor_info)
		prelogits_output_tensor_info = tf.saved_model.utils.build_tensor_info(prelogits)
		prediction_signature = (tf.saved_model.signature_def_utils.build_signature_def(
			inputs={
				'images': predict_inputs_tensor_info},
			outputs={'classes': classes_output_tensor_info,
			         'scores': scores_output_tensor_info,
			         # 'prelogits': prelogits_output_tensor_info
			         },
			method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
		)
		)

		legacy_init_op = tf.group(tf.tables_initializer(), name='legacy_init_op')

		builder.add_meta_graph_and_variables(sess, [tf.saved_model.tag_constants.SERVING],
		                                     signature_def_map={predict_images: prediction_signature, },
		                                     legacy_init_op=legacy_init_op
		                                     )

		builder.save()
		print('Successfully exported model to %s' % FLAGS.output_dir)


def preprocess_image(image_buffer):
	"""Preprocess JPEG encoded bytes to 3D float Tensor."""

	# Decode the string as an RGB JPEG.
	# Note that the resulting image contains an unknown height and width
	# that is set dynamically by decode_jpeg. In other words, the height
	# and width of image is unknown at compile-time.

	image = tf.image.decode_jpeg(image_buffer, channels=3)

	# After this point, all image pixels reside in [0,1)
	# until the very end, when they're rescaled to (-1, 1).  The various
	# adjust_* ops all require this range for dtype float.


	image = tf.image.convert_image_dtype(image, dtype=tf.float32)


	# Crop the central region of the image with an area containing 87.5% of
	# the original image.

	image = tf.image.central_crop(image, central_fraction=0.875)
	# # Resize the image to the original height and width.
	image = tf.expand_dims(image, 0)
	image = tf.image.resize_bilinear(
		image, [FLAGS.image_size, FLAGS.image_size], align_corners=False)
	image = tf.squeeze(image, [0])
	# Finally, rescale to [-1,1] instead of [0, 1)
	image = tf.subtract(image, 0.5)
	image = tf.multiply(image, 2.0)


	return image


def main(_):
	export()



if __name__ == '__main__':
	tf.app.run()
