import tensorflow as tf

import os
import sys
slim = tf.contrib.slim
from nets.inception_resnet_v2 import *
from preprocessing import preprocessing_factory
from nets import nets_factory
import numpy as np
from datasets import dataset_factory
from datasets import dataset_utils
# import cv2
# from map_feature import map_coordinate as map_feature
# from random import random as rand

FLAGS = tf.app.flags.FLAGS

#tf.app.flags.DEFINE_string('output_folder', '', '')
tf.app.flags.DEFINE_string('input_checkpoint',
                           '/home/xmx_work/saved_inception_v4/',
                           '')
tf.app.flags.DEFINE_string(
    'dataset_name', 'handwriting', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string(
    'dataset_split_name', 'validation', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
    'dataset_dir', '/home/workspace/xmx/quiz-w7-2-densenet/data',
    'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_string(
    'file_name', 'dog.jpg,panda.jpg', 'The name of the file.')
tf.app.flags.DEFINE_string(
    'model_name', 'inception_v4', 'The name of the architecture to evaluate.')
tf.app.flags.DEFINE_integer(
    'labels_offset', 0,
    'An offset for the labels in the dataset. This flag is primarily used to '
    'evaluate the VGG and ResNet architectures which do not use a background '
    'class for the ImageNet dataset.')
tf.app.flags.DEFINE_string(
    'sample_images_path', '/home/dl/offline/comp.jpg/test2/', 'The name of the sample path.')
tf.app.flags.DEFINE_string(
    'preprocessing_name', 'handwriting', 'The name of the preprocessing.')
tf.app.flags.DEFINE_string(
    'output', '/home/workspace/xmx/quiz-w7-2-densenet/data/result_inception_v4.csv', 'The name of the output path.')
tf.app.flags.DEFINE_string(
    'labels', '/home/workspace/xmx/quiz-w7-2-densenet/data/', 'The name of the output path.')


# Get sample images
sample_images = []
if os.path.isdir(FLAGS.sample_images_path):
    sample_images = tf.gfile.ListDirectory(FLAGS.sample_images_path)


print("Samples num: {0}".format(len(sample_images)))
print("Samples path: {0}".format(FLAGS.sample_images_path))

# Load the model
sess = tf.Session()

dataset = dataset_factory.get_dataset(
    FLAGS.dataset_name, FLAGS.dataset_split_name, FLAGS.dataset_dir)

# Select the model #
####################
network_fn = nets_factory.get_network_fn(
    FLAGS.model_name,
    num_classes=(dataset.num_classes - FLAGS.labels_offset),
    is_training=False)


def eval():
    eval_image_size = 299
    num_classes = dataset.num_classes

    preprocessing_name = FLAGS.preprocessing_name
    image_preprocessing_fn = preprocessing_factory.get_preprocessing(
        preprocessing_name,
        is_training=False)
    input_tensor = tf.placeholder(tf.string, name='DecodeJpeg/contents')
    image = tf.image.decode_jpeg(input_tensor, channels=3)
    image = image_preprocessing_fn(image,
                                   eval_image_size, eval_image_size)
#     eval_image_size, eval_image_size)
    image = tf.expand_dims(image, 0)
    logits, end_points = network_fn(image)
    prediction = tf.nn.softmax(logits, name='prediction')
    _, prediction_topk = tf.nn.top_k(prediction, k=5)
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    saver = tf.train.Saver()
    if(os.path.isdir(FLAGS.input_checkpoint)):
        ckpt_filename = tf.train.latest_checkpoint(FLAGS.input_checkpoint)
    else:
        ckpt_filename = FLAGS.input_checkpoint
    print('restored from file %s' % ckpt_filename)
    saver.restore(sess, os.path.join(FLAGS.input_checkpoint, ckpt_filename))
    return logits, prediction_topk, end_points


logits, prediction, end_points = eval()
out_put = open(FLAGS.output, "w+")
out_put.write("filename,label\n")  # head

# load
labels = dataset_utils.read_label_file(FLAGS.labels)

for num, image_name in enumerate(sample_images):
    with open(os.path.join(FLAGS.sample_images_path, image_name), 'rb') as f:
        image_data = f.read()
    train_folder, checkpoint = os.path.split(FLAGS.input_checkpoint)
    logit_values, predict_values, end_points_values = sess.run(
        [logits, prediction, end_points],
        feed_dict={'DecodeJpeg/contents:0': image_data})
    predict_values_final = np.squeeze(predict_values, 0) # 一维数组，1*5
    line = []
    line.append(image_name)

    results = ""
    for i in predict_values_final.tolist():
        results += labels.get(i)

    line.append(results)

    line_final = ",".join(line)
    print("{0}:{1}".format(num, line_final))

    out_put.write(line_final+"\n")

out_put.close()
