# 图片 目标检测
import tensorflow as tf
import os
import warnings
import time
from object_detection.utils import label_map_util, config_util
from object_detection.utils import visualization_utils as viz_utils
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
from object_detection.builders import model_builder
 
warnings.filterwarnings('ignore')

# GPU 设定为 记忆体动态调整 (dynamic memory allocation)
# 通过 tf.config.experimental.list_physical_devices，我们可以获得当前主机上某种特定运算设备类型（如 GPU 或 CPU ）的列表
#gpus = tf.config.list_physical_devices('GPU')
gpus = tf.config.experimental.list_physical_devices('GPU')
try:
    for gpu in gpus:
        # 可以通过 tf.config.experimental.set_memory_growth 将 GPU 的显存使用策略设置为 “仅在需要时申请显存空间”
        tf.config.experimental.set_memory_growth(gpu, True)
    #  GPU 设定为固定为 5GB
    if gpus:
        tf.config.experimental.set_virtual_device_configuration(gpus[0],
            [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=1024*5)])
except:
  # Invalid device or cannot modify virtual devices once initialized.
  pass


# 一般用keras下载下来的文件存储在"C:\Users\用户名\.keras" 下，linux在"/root/.keras"
#PATH_TO_MODEL_DIR = '/root/.keras/datasets/centernet_hg104_1024x1024_coco17_tpu-32'
#PATH_TO_LABELS = '/root/.keras/datasets/mscoco_label_map.pbtxt'
#PATH_TO_MODEL_DIR = 'E:/workspace/web/www/zzd/daelui-tensorflow/crack_detection/pre_trained_models/ssd_mobilenet_v2_320x320_coco17_tpu-8'
#PATH_TO_LABELS = 'E:/workspace/web/www/zzd/daelui-tensorflow/crack_detection/pre_trained_models/mscoco_complete_label_map.pbtxt'
PATH_TO_MODEL_DIR = 'E:/workspace/web\www/zzd/daelui-tensorflow/crack_detection/models'
PATH_TO_LABELS = 'E:/workspace/web/www/zzd/daelui-tensorflow/crack_detection/annotations/crack_label_map.pbtxt'
# 建立 Label 的对照表 (代码与名称) 标签映射将索引号对应于类别名称，如5对应于飞机。
category_index = label_map_util.create_category_index_from_labelmap(PATH_TO_LABELS, use_display_name=True)
 

"""加载模型方法2：Checkpoint（只包含权重信息，需要重新构建模型。一般是训练过程中临时保存，加载速度相对更快）"""
PATH_TO_CFG = PATH_TO_MODEL_DIR + "/pipeline.config"
PATH_TO_CKPT = PATH_TO_MODEL_DIR + "/checkpoint"
 
# 计时开始
print('载入模型...', end='')
start_time = time.time()
configs = config_util.get_configs_from_pipeline_file(PATH_TO_CFG)
model_config = configs['model']
detection_model = model_builder.build(model_config=model_config, is_training=False)
# 还原模型
ckpt = tf.compat.v2.train.Checkpoint(model=detection_model)
# 恢复变量当在其他地方需要为模型重新载入之前保存的参数时，需要再次实例化一个 checkpoint，同时保持键名的一致。再调用 checkpoint 的 restore 方法。
ckpt.restore(os.path.join(PATH_TO_CKPT, 'ckpt-0')).expect_partial()
elapsed_time = time.time() - start_time
print(f'共花费 ' + str(elapsed_time) + ' 秒')


# 测试图片
image_np = np.array(Image.open('./production/input/detection.png'))
# 转为 TensorFlow tensor
input_tensor = tf.convert_to_tensor(image_np)
# 加一维，变为 (批处理数, 宽, 高, 3通道颜色)
input_tensor = input_tensor[tf.newaxis, ...]
print('载入图片完成，开始检测...')


# # 任选一张图片进行物件侦测
# # 虽然默认的即时执行模式（Eager Execution）为我们带来了灵活及易调试的特性，但在特定的场合，例如追求高性能或部署模型时，我们依然希望
# # 使用 TensorFlow 1.X 中默认的图执行模式（Graph Execution），将模型转换为高效的 TensorFlow 图模型。此时，TensorFlow 2 为我们提供
# # 了 tf.function 模块，结合 AutoGraph 机制，使得我们仅需加入一个简单的 @tf.function 修饰符，就能轻松将模型以图执行模式运行。
@tf.function
def detect_fn(image):
    image, shapes = detection_model.preprocess(image)
    prediction_dict = detection_model.predict(image, shapes)
    detections = detection_model.postprocess(prediction_dict, shapes)
    return detections


# 进行检测，信息包含：候选框, 类别, 机率
detections = detect_fn(input_tensor)
# 得到检测到的目标数
num_detections = int(detections.pop('num_detections'))
print(f'检测到的物件个数：{num_detections}')
# 转换数据类型
detections = {key: value[0, :num_detections].numpy() for key, value in detections.items()}
detections['num_detections'] = num_detections
detections['detection_classes'] = detections['detection_classes'].astype(np.int64)

# 打印检测到的结果
print(f'物件资讯 (候选框, 类别, 机率)：')
for detection_boxes, detection_classes, detection_scores in \
        zip(detections['detection_boxes'], detections['detection_classes'], detections['detection_scores']):
    print(np.around(detection_boxes, 4), detection_classes, round(detection_scores*100, 2))

image_np_with_detections = image_np.copy()
# 加框
viz_utils.visualize_boxes_and_labels_on_image_array(
      image_np_with_detections,
      detections['detection_boxes'],
      detections['detection_classes'],
      detections['detection_scores'],
      category_index,                   # 分类的映射
      use_normalized_coordinates=True,  # 框的坐标是否为归一化的值
      max_boxes_to_draw=200,            # 最多绘制多少框，不指定将会全部绘制
      min_score_thresh=.30,             # 过滤概率过低的
      agnostic_mode=True)              # 默认False，表示每个类别的边界框独立（不同的颜色、标注）
 
plt.figure(figsize=(12, 8))
plt.imshow(image_np_with_detections, cmap='viridis')
saved_file = './production/output/detection.png'
# 删除旧文件
if os.path.isfile(saved_file):
    os.remove(saved_file)
plt.savefig(saved_file)