from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import time
import argparse
from importlib import util
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
import cv2
import os
import sys

os.environ['TF_CPP_MIN_LOG_LEVEL']='3'

def load_graph(model_file):
  graph = tf.Graph()
  graph_def = tf.GraphDef()

  with open(model_file, "rb") as f:
    graph_def.ParseFromString(f.read())
  with graph.as_default():
    tf.import_graph_def(graph_def)

  return graph


def read_tensor_from_image_file(file_name,
                                input_height=299,
                                input_width=299,
                                input_mean=0,
                                input_std=255):
  input_name = "file_reader"
  output_name = "normalized"

  file_reader = tf.read_file(file_name, input_name)
  if file_name.endswith(".png"):
    image_reader = tf.image.decode_png(
        file_reader, channels=3, name="png_reader")
  elif file_name.endswith(".gif"):
    image_reader = tf.squeeze(
        tf.image.decode_gif(file_reader, name="gif_reader"))
  elif file_name.endswith(".bmp"):
    image_reader = tf.image.decode_bmp(file_reader, name="bmp_reader")
  else:
    image_reader = tf.image.decode_jpeg(
        file_reader, channels=3, name="jpeg_reader")
  # float_caster = tf.cast(image_reader, tf.float32)

  float_caster = tf.image.convert_image_dtype(image_reader, dtype=tf.float32)
  float_caster = tf.image.central_crop(float_caster, central_fraction=0.875)

  image = tf.expand_dims(float_caster, 0)
  image = tf.image.resize_bilinear(image, [input_height, input_width])
  # image = tf.squeeze(image, [0])
  image = tf.subtract(image, 0.5)
  image = tf.multiply(image, 2.0)
  # normalized = tf.divide(tf.subtract(image, [input_mean]), [input_std])


  sess = tf.compat.v1.Session()
  result = sess.run(image)

  return result


def draw_detections_class(frame,pre):
  # cv2.rectangle(frame, top_left, bottom_right, (255, 0, 0), 2) #画框
  cv2.putText(frame, '{0}: {1}'.format(pre[0].split(':')[1],pre[1]), (50,100),
                cv2.FONT_HERSHEY_SIMPLEX, 1.3, (255, 0, 0), 2)
  return frame


def read_tensor_from_video_file(video_name):
  cap = cv2.VideoCapture(video_name)
  # fourcc = cv2.VideoWriter_fourcc(*'XVID')
  # width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
  # height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
  # fps = cap.get(cv2.CAP_PROP_FPS)

  #print(width,height,fps) #1280.0 720.0 5.0

  # 读取一帧，返回ndarry
  if cap.isOpened():
    succes, frame = cap.read()

    # rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    # fra = cv2.resize(rgb,(299,299))
    # fra = fra.reshape([1,299,299,3])
    # return fra
    # 读取所有帧

    c = 0
    while succes:
      # cv2.imshow('',frame)
      # cv2.waitKey(0)
      rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
      fra = cv2.resize(rgb,(299,299))
      # cv2.imshow('',fra)
      # cv2.waitKey(0)
      fra = fra.reshape([1,299,299,3])
      c +=1
      yield fra ,frame
      succes, frame = cap.read()


def write_to_video(video_name, img,path_to_output_video):
   "将带有类别的图片写入视频"
   cap = cv2.VideoCapture(video_name)
   fourcc = cv2.VideoWriter_fourcc(*'XVID')
   width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
   height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
   fps = cap.get(cv2.CAP_PROP_FPS)
   out = cv2.VideoWriter(path_to_output_video, fourcc, fps, (int(width), int(height)))
   out.write(img)
   # img = draw_detections(fr,)


def load_labels(label_file):
  label = []
  proto_as_ascii_lines = tf.gfile.GFile(label_file).readlines()
  for l in proto_as_ascii_lines:
    # print(l)  # classes
    label.append(l.rstrip())
  return label


def preprocess_image(image_buffer):

  # image = tf.image.decode_jpeg(image_buffer, channels=3)


  image = tf.image.convert_image_dtype(image_buffer, dtype=tf.float32)

  image = tf.image.central_crop(image, central_fraction=0.875)
  # Resize the image to the original height and width.
  image = tf.expand_dims(image, 0)
  image = tf.image.resize_bilinear(
      image, [299, 299], align_corners=False)
  # image = tf.squeeze(image, [0])
  # Finally, rescale to [-1,1] instead of [0, 1)
  image = tf.subtract(image, 0.5)
  image = tf.multiply(image, 2.0)

  sess = tf.compat.v1.Session()
  result = sess.run(image)


  return result


def main():

  video_path = "./test_porn_sj.mp4"

  mn = 171126
  model_file = "./inference_model/porn_%s.pb" % mn

  label_file = "labels.txt"

  input_layer = "input"
  output_layer = "InceptionV4/Logits/Predictions"
  # output_layer = "resnet_v2_50/predictions/Reshape_1"
  # output_layer = "vgg_16/fc8/squeezed"
  # output_layer = "resnet_v2_50/predictions/Softmax"

  args = method_name()

  if args.graph:
    model_file = args.graph
  if args.image:
    file_name = args.image
  if args.labels:
    label_file = args.labels
  if args.input_height:
    input_height = args.input_height
  if args.input_width:
    input_width = args.input_width
  if args.input_mean:
    input_mean = args.input_mean
  if args.input_std:
    input_std = args.input_std
  if args.input_layer:
    input_layer = args.input_layer
  if args.output_layer:
    output_layer = args.output_layer
  start = time.time()
  # 1 load graph
  graph = load_graph(model_file)
  # print(graph)
  # 2 get img tensor
  d_res = {}
  dump_path = "E:\working\TF_Slim_classification-master/test_video/dump/"
  with tf.compat.v1.Session(graph=graph) as sess:

      cap = cv2.VideoCapture(video_path)
      fourcc = cv2.VideoWriter_fourcc(*'XVID')
      width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
      height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
      fps = cap.get(cv2.CAP_PROP_FPS)
      out = cv2.VideoWriter('./av.mp4', fourcc, 5, (int(width), int(height)))
      input_operation, output_operation = get_io_op(graph, input_layer, output_layer)
      succes = True
      i = 0
      timeF = 5
      j= 0
      f = open('res.csv','w')
      while succes:
          succes, img = cap.read()
          if i % timeF == 0:
              j += 1
              img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
              tensor_img = preprocess_image(img)
              img = cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
              cv2.imwrite(dump_path+str(j)+'.jpg',img) #保存截取图片


              results = sess.run(output_operation.outputs[0], {
                      input_operation.outputs[0]: tensor_img
                })
              res = make_res(label_file, results)
              f.write("{0},{1},{2}\n".format(j,res[0],res[1]))
              f.flush()
              print(res)

              fram = draw_detections_class(img,res)


              out.write(fram)

              # sys.stdout.flush()
              # cv2.imshow('d',fram)
              # cv2.waitKey(0)

              # plt.imshow(fram)
              # plt.show()
          i += 1
      cap.release()
      out.release()
      f.close()


def make_res(label_file, results ):
  results = np.squeeze(results)
  top_k_index = results.argsort()[-2:][::-1][0]
  labels = load_labels(label_file)
  res = labels[top_k_index], results
  # img = draw_detections_class(v[1], res)

  return res


def get_io_op(graph, input_layer, output_layer):
  input_name = "import/" + input_layer
  output_name = "import/" + output_layer
  # oplist = graph.get_operations()
  input_operation = graph.get_operation_by_name(input_name)
  output_operation = graph.get_operation_by_name(output_name)
  return input_operation, output_operation


def get_vout(path_to_output_video, video_path):
  cap = cv2.VideoCapture(video_path)
  fourcc = cv2.VideoWriter_fourcc(*'XVID')
  width = cap.get(cv2.CAP_PROP_FRAME_WIDTH)
  height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT)
  fps = cap.get(cv2.CAP_PROP_FPS)
  out = cv2.VideoWriter(path_to_output_video, fourcc, fps, (int(width), int(height)))
  return out


def method_name():
  parser = argparse.ArgumentParser()
  parser.add_argument("--image", help="image to be processed")
  parser.add_argument("--graph", help="graph/model to be executed")
  parser.add_argument("--labels", help="name of file containing labels")
  parser.add_argument("--input_height", type=int, help="input height")
  parser.add_argument("--input_width", type=int, help="input width")
  parser.add_argument("--input_mean", type=int, help="input mean")
  parser.add_argument("--input_std", type=int, help="input std")
  parser.add_argument("--input_layer", help="name of input layer")
  parser.add_argument("--output_layer", help="name of output layer")
  args = parser.parse_args()
  return args


if __name__ == "__main__":
  main()










