# coding:utf-8
'''
https://github.com/TannerGilbert/Tutorials/blob/master/Tensorflow%20Object%20Detection/object_detection_tutorial.ipynb
思考，objectDetector类应该只接受numpy！
它应该只负责来处理图像。
存储结果的话，需要外部来指定文件名。
它可以储存numpy_RGB图片
也可以存储numpy_Markup图片
但它不保存原始图片！因为原始图片应该是外面的程序来做的事情。
'''

import sys
import os
from platform import system as platform_system
import threading
import time

print(platform_system())
if platform_system()=='Windows':
  sys.path.append(r'C:\Users\uidt8491\AppData\Local\Continuum\anaconda3\envs\tf15\models\research\object_detection')
  sys.path.append(r'C:\Users\uidt8491\AppData\Local\Continuum\anaconda3\envs\tf15\models\research')
  sys.path.append(r'C:\Users\uidt8491\AppData\Local\Continuum\anaconda3\envs\tf15\models\research\slim')
  sys.path.append(r'.\..')
  PATH_TO_TEST_IMAGES_DIR = r'D:\OneDrive - Continental AG\Dataset\ecu_damper\color\20200722-f05\g1'
  pass
elif platform_system()=='Linux':
  sys.path.append(r'/home/nvidia/models/research/object_detection')
  sys.path.append(r'/home/nvidia/models/models/research')
  sys.path.append(r'/home/nvidia/models/models/research/slim')
  PATH_TO_TEST_IMAGES_DIR = r'/media/0F6F0ADC0F6F0ADC/datasets/ecu_damper/20200609-camera'
  pass
import numpy as np
from os import path as os_path
if __name__=='__main__':
  import pascal_voc_io
else:
  from cores import pascal_voc_io
import cv2
from PIL.Image import open as Image_open
from io import StringIO
from matplotlib import pyplot as plt
from PIL import Image
import tensorflow as tf

from object_detection.utils import ops as utils_ops

from object_detection.utils import label_map_util
#from utils import label_map_util

from object_detection.utils import visualization_utils as vis_util
#====================================================================#
#                                                                    #
# 核 心 类                                                           #
#                                                                    #
#====================================================================#
class __ObjectDetectionCoreTF1x__(object):
    '''
    适用于tensorflow 1.x版本
    pbtxt_path: pbtxt的路径
    saved_model_dir: pb文件的路径
    min_score: 分类结果的最小分值
    '''
    def __init__(self,pbtxt_path,saved_model_dir,min_score=0.2):
        # patch tf1 into `utils.ops`
        utils_ops.tf = tf.compat.v1
        # Patch the location of gfile
        tf.gfile = tf.io.gfile
        self.__minScore__=min_score
        self.__category_index__ = label_map_util.create_category_index_from_labelmap(pbtxt_path, use_display_name=True)
        #self.__log__=clsLogger.myLogger('clsProcessor','program.log')

        self.image_RGB_np=[]
        self.image_marked_np=[]
        self.image_np_expanded=[]
        
        self.image_shape=[0,0,0]
        self.xmlRes=[]

        self.output_dict_all={}
        self.output_dict_filtered={}
        self.output_dict_filtered_scored={}

        '''---------- From TF1.15-----------'''
        #self.__sess__=tf.Session()
        self.__sess__=tf.compat.v1.Session()
        self.__loadSavedModel__(saved_model_dir)
        self.__getTensorDicts__()
    #==========================模型读取的两种方法======================
    def __loadSavedModel__(self,saved_model_dir):
      tf.compat.v1.saved_model.loader.load(self.__sess__,[tf.saved_model.SERVING],saved_model_dir)  
      self.__detection_graph__=tf.compat.v1.get_default_graph()
      pass
    def loadFrozenModel(self,frozen_model_path):
      detection_graph = tf.Graph()
      with detection_graph.as_default():
        od_graph_def=tf.compat.v1.GraphDef() #
        with tf.io.gfile.GFile(frozen_model_path, 'rb') as fid:
          serialized_graph = fid.read()
          od_graph_def.ParseFromString(serialized_graph)  
          tf.import_graph_def(od_graph_def, name='')  
      self.__detection_graph__=tf.get_default_graph()    
      pass
    #==========================解析模型=====================
    def __getTensorDicts__(self):
        with self.__detection_graph__.as_default(): 
            ops = tf.compat.v1.get_default_graph().get_operations()
            all_tensor_names = {output.name for op in ops for output in op.outputs}
            self.__tensor_dict__ = {}
            for key in ['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks']:
                tensor_name = key + ':0'
                if tensor_name in all_tensor_names:
                    self.__tensor_dict__[key] = tf.compat.v1.get_default_graph().get_tensor_by_name(tensor_name)
            if 'detection_masks' in self.__tensor_dict__:
            # The following processing is only for single image
                detection_boxes = tf.squeeze(self.__tensor_dict__['detection_boxes'], [0])
                detection_masks = tf.squeeze(self.__tensor_dict__['detection_masks'], [0])
                # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.
                real_num_detection = tf.cast(self.__tensor_dict__['num_detections'][0], tf.int32)
                detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])
                detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])
                detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(detection_masks, detection_boxes, self.image_np_expanded.shape[1], self.image_np_expanded.shape[2])
                detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)
                # Follow the convention by adding back the batch dimension
                self.__tensor_dict__['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)
            self.image_tensor = tf.get_default_graph().get_tensor_by_name('image_tensor:0')
        pass
    #==========================传入numpy图像=====================  
    def putImage_RGB_np(self,image_np):
      if (len(image_np)):
        self.image_RGB_np=image_np
        #height=self.image_RGB_np.shape[0]
        #width=self.image_RGB_np.shape[1]
        #layer_count=self.image_RGB_np.shape[2]
        self.image_np_expanded=np.expand_dims(self.image_RGB_np, axis=0)
        self.image_shape = self.image_RGB_np.shape
    #==========================运行预测=====================
    def __run_inference_for_single_image__(self):
      if(len(self.image_np_expanded)==0):
        print('object detection: no image found')
        return
      with self.__detection_graph__.as_default():
          #output_dict={}
          self.output_dict_all = self.__sess__.run(self.__tensor_dict__,feed_dict={self.image_tensor: self.image_np_expanded})
          self.output_dict_filtered=self.output_dict_all.copy()
          # all outputs are float32 numpy arrays, so convert types as appropriate
          self.output_dict_filtered['num_detections'] = int(self.output_dict_filtered['num_detections'][0])
          self.output_dict_filtered['detection_classes'] = self.output_dict_filtered['detection_classes'][0].astype(np.int64)
          self.output_dict_filtered['detection_boxes'] = self.output_dict_filtered['detection_boxes'][0]
          self.output_dict_filtered['detection_scores'] = self.output_dict_filtered['detection_scores'][0]
          if 'detection_masks' in self.output_dict_filtered:
              self.output_dict_filtered['detection_masks'] = self.output_dict_filtered['detection_masks'][0]
          #self.output_dict=output_dict
          #按照设定的分数进一步过滤
          #self.output_dict_filtered_scored={}
          self.output_dict_filtered_scored['detection_classes']=self.output_dict_filtered['detection_classes'][self.output_dict_filtered['detection_scores']>self.__minScore__]
          self.output_dict_filtered_scored['detection_boxes']=self.output_dict_filtered['detection_boxes'][self.output_dict_filtered['detection_scores']>self.__minScore__]
          self.output_dict_filtered_scored['detection_scores']=self.output_dict_filtered['detection_scores'][self.output_dict_filtered['detection_scores']>self.__minScore__]
          self.output_dict_filtered_scored['num_detections']=len(self.output_dict_filtered_scored['detection_scores'])

          self.output_dict_filtered_scored['boxes_pix']= self.output_dict_filtered_scored['detection_boxes']*[self.image_shape[0],self.image_shape[1],self.image_shape[0],self.image_shape[1]]
          self.output_dict_filtered_scored['detection_descs']=[self.__category_index__[idx]['name'] 
                                                    for idx in 
                                                    self.output_dict_filtered_scored['detection_classes']
                                                    ]
          #print(self.output_dict_filtered_scored)
    #==========================标记图片=====================
    def __getInferenceResult__(self,minScore=None):
        self.__run_inference_for_single_image__()
        #'''
        self.output_dict_filtered_scored['detection_box_centers']=[]
        for box in self.output_dict_filtered_scored['detection_boxes']:
          x=(box[0]+box[2])/2
          y=(box[1]+box[3])/2
          self.output_dict_filtered_scored['detection_box_centers'].append([x,y])
        #'''        
        self.image_marked_np=self.image_RGB_np.copy()
        score=self.__minScore__ if minScore is None else minScore
        #print('score in objectdetection',score)
        vis_util.visualize_boxes_and_labels_on_image_array(
            self.image_marked_np,
            self.output_dict_filtered['detection_boxes'],
            self.output_dict_filtered['detection_classes'],
            self.output_dict_filtered['detection_scores'],
            self.__category_index__,
            instance_masks=self.output_dict_filtered.get('detection_masks'),
            use_normalized_coordinates=True,
            line_thickness=1,
            min_score_thresh=score
            )   
        pass
    def startInfer(self,image_path='',minScore=None):
        self.__getInferenceResult__(minScore)      

    def getResultDicts(self):
        return self.output_dict_filtered_scored
        #return self.output_dict_filtered        

#====================================================================#
#                                                                    #
#             _________可供外部使用的类___________                    #
#                                                                    #
#====================================================================#
class ObjectDetectionExt(__ObjectDetectionCoreTF1x__):
  def __init__(self, pbtxt_path, saved_model_dir, min_score=0.2):
    super().__init__(pbtxt_path, saved_model_dir, min_score=min_score)
    self.image_path=''
  #----------------------------------------------------
  def putParams(self,pbtxt_path, saved_model_dir, min_score=0.2):
    pass
  #----------------------------------------------------
  def getResultImgAndDicts(self):
    '''
    获取结果，由3部分组成，原始图像，标记了目标的图像，查找到结果
    '''
    return self.image_RGB_np,self.image_marked_np,self.output_dict_filtered_scored
  #----------------------------------------------------
  def saveImageRGB(self,filename):
    '''
    保存原始图像
    '''
    cv2.imwrite(filename,self.image_RGB_np)
  def saveImageMarkup(self,filename):
    '''
    保存标记后的图像
    '''
    cv2.imwrite(filename,self.image_marked_np)
  def saveResultAsXML(self,foldername,image_path):
    self.xmlRes=pascal_voc_io.PascalVocWriter(foldername, image_path, self.image_shape)
    for i in range(self.output_dict_filtered_scored['num_detections']):
        ymin, xmin, ymax, xmax=self.output_dict_filtered_scored['boxes_pix'][i]
        name                  =self.output_dict_filtered_scored['detection_descs'][i]
        score                 =self.output_dict_filtered_scored['detection_scores'][i]
        self.xmlRes.addBndBox( int(xmin), int(ymin), int(xmax), int(ymax), name, difficult=0,score=score)        
    self.xmlRes.appendObjects(self.xmlRes.genXML())
    xmlFn=os_path.normpath(os_path.splitext(image_path)[0]+'.xml')
    #print('xml file:',xmlFn)
    self.xmlRes.save(xmlFn) 
#====================================================================#
#                                                                    #
#                _____________线程类_______________                  #
#                                                                    #
#====================================================================#
class clsObjectDetectorThread(threading.Thread):
  def __init__(self,name,object_detector,dataFeeder=None):
    super().__init__()
    self.objectDetector=object_detector
    print('object detector = ')
    threading.Thread.__init__(self)
    self.name = name
    self.exitFlag=False
    self.numpy_image_RGB=[]
    self.__new_image_received__=False
    self.threadLocker=threading.Lock()
    self.dataFeeder=dataFeeder
  
  def getResultImgAndDicts(self):
    return self.objectDetector.getResultImgAndDicts()

  def putImage_RGB_np(self,image_np):
    self.threadLocker.acquire()
    self.objectDetector.putImage_RGB_np(image_np)
    self.__new_image_received__=True
    self.threadLocker.release()
    pass
  def run(self):
      """
      目标检测线程启动
      """
      print ("开始目标检测子线程：" + self.name)
      self.__thread_inference__(self.name, 0.001)
      print ("退出目标检测子线程：" + self.name)

  def __thread_inference__(self,threadName, delay):
      while 1:
          if self.exitFlag:
              print('self.exitFlat = ',self.exitFlag)
              break
          else:
            #img=self.dataFeeder.getImgNumpy()
            #self.putImage_RGB_np(img)
            if self.__new_image_received__:
              #print('__new_image_received__',self.__new_image_received__)
              self.threadLocker.acquire()
              #self.objectDetector.putImage_RGB_np(self.numpy_image_RGB)
              self.objectDetector.startInfer()
              #self.objectDetector.getResultDicts()
              self.__new_image_received__=False
              self.threadLocker.release()
              #print(self.getInferenceResult())
              time.sleep(delay)
  def exit(self):
      self.exitFlag=True
  
  
#====================================================================#
#                                                                    #
#                ______________测试程序_____________                  #
#                                                                    #
#====================================================================#
if __name__=='__main__':

  def get_imlist(path,ext):
    return [os.path.join(path,f) for f in os.listdir(path) if f.endswith('.'+ext)]

  PBTXT_PATH=r'../model/saved_model.pbtxt'
  FROZEN_GRAPH_FILE = r'ecu_damper_4660/frozen_inference_graph.pb'
  SAVED_MODEL_DIR = r'ecu_damper_4660/saved_model'
  SAVED_MODEL_DIR = r'ecu_damper_4660/rt_model'
  SAVED_MODEL_DIR =os.path.abspath( r'../model/ecu_damper_789/saved_model')
  print(SAVED_MODEL_DIR)
  #TEST_IMAGE_PATHS = [ os_path.join(PATH_TO_TEST_IMAGES_DIR, '2020060914332{}.jpg'.format(i)) for i in range(1, 9) ]
  #from general_utils import myfunc

  TEST_IMAGE_LIST = get_imlist(PATH_TO_TEST_IMAGES_DIR,'jpg')
  #IMAGE_SIZE = (1024, 768)

  myDetector=ObjectDetectionExt(PBTXT_PATH,SAVED_MODEL_DIR,min_score=0.6)
  
  for image_path in TEST_IMAGE_LIST:
    print(image_path)
    if(os_path.exists(image_path)):
      image=cv2.imread(image_path)
      layers=cv2.split(image)
      #---------图像直方图均衡-----------
      eqHisRGB=[]
      for layer in layers:
        eqHisRGB.append(cv2.equalizeHist(layer))
      imgeq=cv2.merge(eqHisRGB)
      cv2.imshow('eqlzdHist',imgeq)
      #---------图像分割-------
      cropLU=imgeq[0:300,0:400,:]
      myDetector.putImage_RGB_np(cropLU)
      myDetector.startInfer()
      res_cropLU=myDetector.image_marked_np.copy()
      cv2.imshow('res_cropLU',res_cropLU)
      myDetector.saveImageRGB('cropLU.jpg')
      myDetector.saveResultAsXML(foldername='.',image_path='cropLU.jpg')

      cropLD=imgeq[500:800,0:400,:]
      myDetector.putImage_RGB_np(cropLD)
      myDetector.startInfer()
      res_cropLD=myDetector.image_marked_np.copy()
      cv2.imshow('res_cropLD',res_cropLD)

      cropRU=imgeq[0:300,800:,:]
      myDetector.putImage_RGB_np(cropRU)
      myDetector.startInfer()
      res_cropRU=myDetector.image_marked_np.copy()
      cv2.imshow('res_cropRU',res_cropRU)

      cropRD=imgeq[500:800,800:,:]
      myDetector.putImage_RGB_np(cropRD)
      myDetector.startInfer()
      res_cropRD=myDetector.image_marked_np.copy()
      cv2.imshow('res_cropRD',res_cropRD)


      myDetector.putImage_RGB_np(image)
      myDetector.startInfer()
      res0=myDetector.image_marked_np.copy()
      cv2.imshow('res0',res0)

      myDetector.putImage_RGB_np(imgeq)
      myDetector.startInfer()
      res1=myDetector.image_marked_np.copy()
      cv2.imshow('res1',res1)
      #myDetector.__run_inference_for_single_image__()
      #myDetector.__getInferenceResult_()
      
      #cv2.imshow('img_obj',myDetector.image_marked_np)
      #cv2.imshow('img',myDetector.image_RGB_np)
      keyIn=cv2.waitKey(0)&0xFF
      if keyIn == ord('q'):
        break
  cv2.destroyAllWindows() 
  


        

