# 图片 目标检测
import tensorflow as tf
import os
import warnings
import time
from object_detection.utils import label_map_util, config_util
from object_detection.utils import visualization_utils as vis_util
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from object_detection.builders import model_builder
from absl import app
 
warnings.filterwarnings('ignore')
 
# GPU 设定为 记忆体动态调整 (dynamic memory allocation)
# 通过 tf.config.experimental.list_physical_devices，我们可以获得当前主机上某种特定运算设备类型（如 GPU 或 CPU ）的列表
#gpus = tf.config.list_physical_devices('GPU')
gpus = tf.config.experimental.list_physical_devices('GPU')
try:
    for gpu in gpus:
        # 可以通过 tf.config.experimental.set_memory_growth 将 GPU 的显存使用策略设置为 “仅在需要时申请显存空间”
        tf.config.experimental.set_memory_growth(gpu, True)
    #  GPU 设定为固定为 5GB
    if gpus:
        tf.config.experimental.set_virtual_device_configuration(gpus[0],
            [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*5)])
except:
  # Invalid device or cannot modify virtual devices once initialized.
  pass


#MODEL_DATE = '20200711'
#MODEL_NAME = 'centernet_hg104_1024x1024_coco17_tpu-32'
# 一般用keras下载下来的文件存储在"C:\Users\用户名\.keras" 下，linux在"/root/.keras"
#PATH_TO_MODEL_DIR = '/root/.keras/datasets/centernet_hg104_1024x1024_coco17_tpu-32'
#PATH_TO_LABELS = '/root/.keras/datasets/mscoco_label_map.pbtxt'
#PATH_TO_MODEL_DIR = 'E:/workspace/web/www/zzd/daelui-tensorflow/crack_detection/pre_trained_models/faster_rcnn_resnet50_v1_800x1333_coco17_gpu-8'
#PATH_TO_LABELS = 'E:/workspace/web/www/zzd/daelui-tensorflow/crack_detection/annotations/mscoco_complete_label_map.pbtxt'
#PATH_TO_MODEL_DIR = 'E:/workspace/web/www/zzd/daelui-tensorflow/crack_detection/models/v1'
#PATH_TO_LABELS = 'E:/workspace/web/www/zzd/daelui-tensorflow/crack_detection/annotations/crack_label_map.pbtxt'
PATH_TO_MODEL_DIR = 'E:/workspace/web/www/zzd/daelui-tensorflow/crack_detection/models'
PATH_TO_LABELS = 'E:/workspace/web/www/zzd/daelui-tensorflow/crack_detection/annotations/rdd_label_map.pbtxt'
# 建立 Label 的对照表 (代码与名称) 标签映射将索引号对应于类别名称，如5对应于飞机。
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)

def main(_, inputImage = None, outputImage = None):
    """加载模型方法1：Saved Model（包含完整的网络结构和权重等参数）"""
    # 从下载的目录载入模型
    PATH_TO_SAVED_MODEL = PATH_TO_MODEL_DIR + "/saved_model"
    print('载入模型...', end='')
    start_time = time.time()
    # 载入模型
    detect_fn = tf.saved_model.load(PATH_TO_SAVED_MODEL)
    elapsed_time = time.time() - start_time
    print(f'共花费 ' + str(elapsed_time) + ' 秒')


    # 测试图片
    inputs = './production/input/detection.png'
    if inputImage:
      inputs = inputImage
    image = Image.open(inputs)
    image_np = np.array(image.convert('RGB'))
    width, height = image.size
    exif_data = image._getexif()
    dpi = exif_data.get(282) if exif_data else None
    if dpi == None:
        dpi = 96
    # 转为 TensorFlow tensor
    input_tensor = tf.convert_to_tensor(image_np)
    # 加一维，变为 (批处理数, 宽, 高, 3通道颜色)
    input_tensor = input_tensor[tf.newaxis, ...]
    #input_tensor = input_tensor[:, :, :, :3] # <= add this line
    print('载入图片完成：' + inputs)
    print('开始检测...')

    # 进行检测，信息包含：候选框, 类别, 机率
    detections = detect_fn(input_tensor)
    # 得到检测到的目标数
    num_detections = int(detections.pop('num_detections'))
    print(f'检测到的物件个数：{num_detections}')
    # 转换数据类型
    detections = {key: value[0, :num_detections].numpy() for key, value in detections.items()}
    detections['num_detections'] = num_detections
    detections['detection_classes'] = detections['detection_classes'].astype(np.int64)

    # 打印检测到的结果
    print(f'物件资讯 (候选框, 类别, 机率)：')
    for detection_boxes, detection_classes, detection_scores in \
            zip(detections['detection_boxes'], detections['detection_classes'], detections['detection_scores']):
        print(np.around(detection_boxes, 4), detection_classes, round(detection_scores*100, 2))
    
    image_np_with_detections = image_np.copy()

    # 加框
    # for i in range(num_detections):
    #     class_id = detections[3][i]
    #     score = detections[2][i]
    #     if score > .70 and category_index[class_id]['name'] == 'person':
    #         box = detections[1][i]
    #         vis_util.draw_bounding_box_on_image_array(
    #             image_np_with_detections,
    #             box,
    #             color='red',
    #             thickness=1,
    #             display_str_list=['person'])

    # 加框
    lineWidth = int(width / 1000 * 2)
    if lineWidth < 2:
        lineWidth = 2
    vis_util.visualize_boxes_and_labels_on_image_array(
        image_np_with_detections,
        detections['detection_boxes'],
        detections['detection_classes'],
        detections['detection_scores'],
        category_index,                   # 分类的映射
        use_normalized_coordinates=True,  # 框的坐标是否为归一化的值
        max_boxes_to_draw=200,            # 最多绘制多少框，不指定将会全部绘制
        min_score_thresh=.35,             # 过滤概率过低的
        agnostic_mode=True,               # 默认False，表示每个类别的边界框独立（不同的颜色、标注）
        line_thickness=lineWidth,                 # 线宽
        skip_scores=True,                 # 不显示分数
        skip_labels=False)                # 不显示标签

    plt.figure(figsize=(float(width / dpi), float(height / dpi)), dpi=dpi)
    #plt.figure(figsize=(12, 8))
    plt.imshow(image_np_with_detections, cmap='viridis')
    # 去除图像周围的白边
    plt.axis('off')  # 不显示坐标轴
    plt.gca().xaxis.set_major_locator(plt.NullLocator()) 
    plt.gca().yaxis.set_major_locator(plt.NullLocator()) 
    plt.subplots_adjust(top=1,bottom=0,left=0,right=1,hspace=0,wspace=0) 
    plt.margins(0,0)
  
    outputs = './production/output/detection.png'
    if outputImage:
      outputs = outputImage
    saved_file = outputs
    # 删除旧文件
    if os.path.isfile(saved_file):
        os.remove(saved_file)
    plt.savefig(saved_file)

if __name__ == '__main__':
  app.run(main)