from __future__ import division, print_function, absolute_import

import os
import warnings
import sys
import cv2
import numpy as np
import pyrealsense2 as rs
import matplotlib.pyplot as plt
import tensorflow as tf

from keras import backend as K
from PIL import Image
from yolo import YOLO
from timeit import time
from deep_sort import preprocessing
from deep_sort import nn_matching
from deep_sort.detection import Detection
from deep_sort.tracker import Tracker
from tools import generate_detections as gdet
from deep_sort.detection import Detection as ddet



def main(yolo):

   # 参数的定义 
    max_cosine_distance = 0.3  #最大余弦距离
    nn_budget = None
    nms_max_overlap = 1.0
    
   # deep_sort 
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename,batch_size=1)
    
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)
    
    writeVideo_flag = True 

    w = 848
    h = 480
    f = 60 

    pipeline = rs.pipeline()
    config = rs.config()
    config.enable_stream(rs.stream.depth, w, h, rs.format.z16, f)
    config.enable_stream(rs.stream.color, w, h, rs.format.bgr8, f)
    profile = pipeline.start(config)


    if writeVideo_flag:
    # 定义编解码器并创建VideoWriter对象
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        out = cv2.VideoWriter('output.avi', fourcc, 15, (w, h))
        list_file = open('detection.txt', 'w')
        frame_index = -1 
        
    fps = 0.0 
    while True: 
        start = time.time()
        frames = pipeline.wait_for_frames()   
        depth = frames.get_depth_frame()
        frame = frames.get_color_frame()
        if not depth or not frame:
            break

        # 存储下一帧集以供以后处理：
        frameset = pipeline.wait_for_frames()
        color_frame = frameset.get_color_frame()
        depth_frame = frameset.get_depth_frame()

        frame = np.asanyarray(color_frame.get_data())
        colorizer = rs.colorizer()
        colorized_depth = np.asanyarray(colorizer.colorize(depth_frame).get_data())

        # 创建以颜色作为目标流的对齐单元：
        align = rs.align(rs.stream.color)
        frameset = align.process(frameset)

        # 更新颜色和深度：
        aligned_depth_frame = frameset.get_depth_frame()
        colorized_depth = np.asanyarray(colorizer.colorize(aligned_depth_frame).get_data())
        
        image = Image.fromarray(frame[...,::-1]) #bgr to rgb
        boxs, scores, classes, color = yolo.detect_image(image) #获取检测数据
        features = encoder(frame, boxs)
        
        # score 1.0
        detections = [Detection(bbox, 1.0, feature) for bbox, feature in zip(boxs, features)]
        
        # Run non-maxima suppression.非最大值抑制。
        boxes = np.array([d.tlwh for d in detections])
        scores_ = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores_)
        detections = [detections[i] for i in indices]
        
        tracker.predict()
        tracker.update(detections)
        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue 
            bbox = track.to_tlbr()
            x = int((int(bbox[0]) + int(bbox[2]))/2)
            y = int((int(bbox[1]) + int(bbox[3]))/2)

            depthdatas = np.asanyarray(aligned_depth_frame.get_data())
            depth_scale = profile.get_device().first_depth_sensor().get_depth_scale()
            depth_data = depthdatas[y,x].astype(float) * depth_scale

            print("ID:", track.track_id, "BOX:", bbox, "CENTER: ", x, " | ", y, "DEPTH:", depth_data)
            cv2.circle(frame, (x, y), 1, (255, 255, 255), 3)
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, str(track.track_id), (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 150, (0, 255, 0), 2)
            cv2.circle(colorized_depth, (x, y), 1, (255, 255, 255), 3)
            cv2.rectangle(colorized_depth, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            cv2.putText(colorized_depth, str(track.track_id), (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 150, (0, 255, 0), 2)
        for det in detections:
            bbox = det.to_tlbr()
            x = int((int(bbox[0]) + int(bbox[2]))/2)
            y = int((int(bbox[1]) + int(bbox[3]))/2)
            cv2.circle(frame, (x, y), 1, (255, 0, 0), 3)
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)
            cv2.circle(colorized_depth, (x, y), 1, (255, 0, 0), 3)
            cv2.rectangle(colorized_depth, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)
        print("-----------------------------------")

        end = time.time()
        font = cv2.FONT_HERSHEY_SIMPLEX
        fps = "FPS:" + str(round(1.0/(end - start), 2))
        frame = cv2.putText(frame, fps, (20, 20), font, 0.5, (0, 0, 0), 2)
        frame = np.hstack((frame, colorized_depth))
        cv2.namedWindow('Realsense-YOLOv3-DeepSort', cv2.WINDOW_AUTOSIZE)
        cv2.imshow('Realsense-YOLOv3-DeepSort', frame)
        
        if writeVideo_flag:
            # 保存一帧
            out.write(frame)
            frame_index = frame_index + 1
            list_file.write(str(frame_index)+' ')
            if len(boxs) != 0:
                for i in range(0,len(boxs)):
                    list_file.write(str(boxs[i][0]) + ' '+str(boxs[i][1]) + ' '+str(boxs[i][2]) + ' '+str(boxs[i][3]) + ' ')
            list_file.write('\n')
            
        # fps  = ( fps + (1./(time.time()-start)) ) / 2
        # print("fps= %f"%(fps))
        
        # 按Q停止
        if cv2.waitKey(1) & 0xFF == ord('Q'):
            break

    pipeline.stop()

    if writeVideo_flag:
        out.release()
        list_file.close()
    cv2.destroyAllWindows()

if __name__ == '__main__':
    config = tf.ConfigProto()
    config.gpu_options.per_process_gpu_memory_fraction = 1
    sess = tf.Session(config=config)
    K.set_session(sess)
    main(YOLO())
