import os
import math
import random

import numpy as np
import tensorflow as tf
import cv2
import sys
slim = tf.contrib.slim
import utils
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from PIL import Image
from nets import ssd_vgg_300, ssd_common, np_methods,inception_resnet_v2
from preprocessing import ssd_vgg_preprocessing,inception_preprocessing
import visualization
#mpl.rcParams['axes.unicode_minus'] = False
plt.rcParams['font.sans-serif']=['SimHei']
# TensorFlow session: grow memory when needed. TF, DO NOT USE ALL MY GPU MEMORY!!!
gpu_options = tf.GPUOptions(allow_growth=True)
config = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)
isess = tf.InteractiveSession(config=config)
class_num=764
image_size=299
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
def preload(sess,g2):
    with sess.as_default():
        with g2.as_default():
            images = tf.placeholder("float", [None, image_size, image_size, 3])
            with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
                with slim.arg_scope([slim.batch_norm], is_training=False):
                    net, end_points = inception_resnet_v2.inception_resnet_v2(images, class_num, is_training=False)

                    prob = end_points['Predictions']  # after softmax
                    print('prob:', prob)
                    cost = (-1) * tf.reduce_sum(tf.multiply(tf.log(prob), tf.log(prob)), axis=1)
                    # print('cost:', cost)
                    # print('endpoints:', end_points.keys())
                    target_conv_layer = end_points['Conv2d_7b_1x1']
                    y_c = tf.reduce_sum(tf.multiply(net, tf.log(prob)), axis=1)
                    # print('y_c:', y_c)
                    target_conv_layer_grad = tf.gradients(y_c, target_conv_layer)[0]
                    # print('target_conv_layer_grad:', target_conv_layer_grad)

                    gb_grad = tf.gradients(cost, images)[0]
                    init = tf.global_variables_initializer()
                    latest_checkpoint = "./inception_resnet/model.ckpt-370132"
                    saver = tf.train.Saver()
            sess.run(init)
            saver.restore(sess, latest_checkpoint)
            return prob, images, cost, target_conv_layer, y_c, target_conv_layer_grad, gb_grad, net;
#SSD net
# Input placeholder.
def SSDpreload(isess,g1):
    with isess.as_default():
        with g1.as_default():
            net_shape = (300, 300)
            data_format = 'NHWC'
            img_input = tf.placeholder(tf.uint8, shape=(None, None, 3))
            # Evaluation pre-processing: resize to SSD net shape.
            image_pre, labels_pre, bboxes_pre, bbox_img = ssd_vgg_preprocessing.preprocess_for_eval(
                img_input, None, None, net_shape, data_format, resize=ssd_vgg_preprocessing.Resize.WARP_RESIZE)
            image_4d = tf.expand_dims(image_pre, 0)

            # Define the SSD model.
            reuse = True if 'ssd_net' in locals() else None
            ssd_net = ssd_vgg_300.SSDNet()
            with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)):
                predictions, localisations, _, _ = ssd_net.net(image_4d, is_training=False, reuse=reuse)

            # Restore SSD model.
            ckpt_filename = 'checkpoints/ssd_300_vgg.ckpt'
            # ckpt_filename = '../checkpoints/VGG_VOC0712_SSD_300x300_ft_iter_120000.ckpt'
            isess.run(tf.global_variables_initializer())
            saver = tf.train.Saver()
            saver.restore(isess, ckpt_filename)

            # SSD default anchor boxes.
            ssd_anchors = ssd_net.anchors(net_shape)
            return ssd_anchors,image_4d,bbox_img,predictions, localisations,img_input

# Main image processing routine.
def process_image(isess,img, image_4d,predictions, localisations,img_input,ssd_anchor,select_threshold=0.5, nms_threshold=.45, net_shape=(300, 300)):
    # Run SSD network.
    rimg, rpredictions, rlocalisations, rbbox_img = isess.run([image_4d, predictions, localisations, bbox_img],
                                                              feed_dict={img_input: img})

    # Get classes and bboxes from the net outputs.
    rclasses, rscores, rbboxes, max_pos = np_methods.ssd_bboxes_select(
        rpredictions, rlocalisations, ssd_anchors,
        select_threshold=select_threshold, img_shape=net_shape, num_classes=21, decode=True)

    rbboxes = np_methods.bboxes_clip(rbbox_img, rbboxes)
    rclasses, rscores, rbboxes = np_methods.bboxes_sort(rclasses, rscores, rbboxes, top_k=400)
    rclasses, rscores, rbboxes = np_methods.bboxes_nms(rclasses, rscores, rbboxes, nms_threshold=nms_threshold)
    for i in range(len(rclasses)):
        if i >= len(rclasses):
            break
        if not (rclasses[i])==7:
            rclasses=rclasses[i+1:]
            rscores=rscores[i+1:]
            rbboxes=rbboxes[i+1:]
    # Resize bboxes to original image shape. Note: useless for Resize.WARP!
    rbboxes = np_methods.bboxes_resize(rbbox_img, rbboxes)
    return rclasses, rscores, rbboxes


def predict(g2,cropimg_list, sess, prob, images):
    #dirlist = image_dir.split(' ')
    with sess.as_default():
        with g2.as_default():
            batch_size = len(cropimg_list)
            print(str(batch_size) + ' files detected')
            counter = 0
            for croparr in cropimg_list:
                cropimg = Image.fromarray(croparr)
                save_dir='tmp'+str(counter)+'.jpg'
                cropimg.save(save_dir)
                img = tf.image.decode_jpeg(tf.read_file(save_dir), channels=3)
                processed_image = inception_preprocessing.preprocess_image(img,
                                                                           image_size,
                                                                           image_size,
                                                                           is_training=False, )
                processed_images = tf.expand_dims(processed_image, 0)
                if counter == 0:
                    batch_img = processed_images
                else:
                    batch_img  = tf.concat([processed_images, batch_img], 0)
                counter += 1

            img_v = sess.run(batch_img)
            prob_np = sess.run(prob, feed_dict={images: img_v})
            for i in range(batch_size):
                top1,top5=utils.print_prob(prob_np[i], './labels.txt')
                fig = plt.figure("Detect image")  # 图像窗口名称
                img = img_v[i].astype(float)
                img -= np.min(img)
                img /= img.max()
                plt.imshow(img)

                plt.axis('on')  # 关掉坐标轴为 off
                plt.title('top1:'+top1)  # 图像题目
#demo/_0_1.jpg demo/_0_6.jpg
                plt.show()
                os.system("pause");
            print('finish')

            return;
def checklist(dirllist):
    for imgfile in dirllist:
        if not os.path.isfile(imgfile) or not imgfile.endswith('.jpg'):
            print(imgfile+" does not exist or is not a jpg file")
            return False
    return True;
if __name__=="__main__":
    g1 = tf.Graph()  # 加载到Session 1的graph
    g2 = tf.Graph()  # 加载到Session 2的graph

    sess1 = tf.Session(graph=g1)  # Session1
    sess2 = tf.Session(graph=g2)  # Session2
    ssd_anchors,image_4d,bbox_img,predictions, localisations,img_input=SSDpreload(sess1,g1);
    prob, images, cost, target_conv_layer, y_c, target_conv_layer_grad, gb_grad, net = preload(sess2,g2)

    while (True):
        image_dir = input("please input your file dir or type 'quit':")
        if (image_dir == 'quit'):
            break;
        elif image_dir=='' or image_dir == None:
            continue;

        img_list=(image_dir.split(' '))
        if not checklist(img_list):
            continue;
        while (len(img_list)>0):
            img_dir=img_list.pop()
            img = mpimg.imread(img_dir)
            #fig = plt.figure(figsize=(12, 16))
            rclasses, rscores, rbboxes = process_image(sess1,img, image_4d,predictions, localisations,img_input,ssd_anchors)
            if len(rclasses)==0:
                print("No any car object found in picture " + img_dir+" !!!!!!!!!!!!!!!!!!!!!!!!!!")
                continue;
            cropimg_list=visualization.plt_bboxes(img, rclasses, rscores, rbboxes)
            os.system("pause");
            predict(g2,cropimg_list, sess2, prob, images)





