'''
In this example, we will load a RefineDet model and use it to detect objects.
'''

import argparse
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import skimage.io as io
import cv2

import ctypes 
from ctypes import *

# Make sure that caffe is on the python path:
caffe_root = '/home/deepmind/RefineDet/'
os.chdir(caffe_root)
sys.path.insert(0, os.path.join(caffe_root, 'python'))
import caffe

from google.protobuf import text_format
from caffe.proto import caffe_pb2

# multil threading and serial port
import threading
from socket import *
#from transfer import SerialData
import time

# import necessary packages for object tracking
from imutils.video import VideoStream
from imutils.video import FPS
import argparse
import imutils
from datetime import datetime

def video_read_left():
    # where the video come from
    #cap = cv2.VideoCapture("online_test.avi")
    cap1 = cv2.VideoCapture("rtsp://admin:zhifan518@192.168.1.120:554/cam/realmonitor?channel=1&subtype=2")

    global frame1
    global fps1
    global size1
    fps1 = cap1.get(cv2.CAP_PROP_FPS)
    size1 = (int(cap1.get(cv2.CAP_PROP_FRAME_WIDTH)),
                    int(cap1.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    
    ok1, frame1 = cap1.read()  # read a frame
    
    while cap1.isOpened():
        if not ok1:
            break
        ok1, frame1 = cap1.read()  # read a frame

        # release the camera
        try:
            pass
        except KeyboardInterrupt:
            cap1.release()



def get_labelname(labelmap, labels):
    num_labels = len(labelmap.item)
    labelnames = []
    if type(labels) is not list:
        labels = [labels]
    for label in labels:
        found = False
        for i in xrange(0, num_labels):
            if label == labelmap.item[i].label:
                found = True
                labelnames.append(labelmap.item[i].display_name)
                break
        assert found == True
    return labelnames

def DetectResults(img, results, labelmap, threshold=0.6):
    
    # reasonable tracking area
    budxmin = img.shape[1] * 0.15
    budxmax = img.shape[1] * 0.85
    budymin = img.shape[0] * 0.15
    budymax = img.shape[0] * 0.85

    weights = [0, 2, 1, 1, 0]    # weights for different marine organism 
    accounts = []  # account for track priority
    for i in range(0, results.shape[0]):
        score = results[i, -2]
        if threshold and score < threshold:
            accounts.append(0)
            continue

        label = int(results[i, -1])
        name = get_labelname(labelmap, label)[0]
        xmin = int(round(results[i, 0]))
        ymin = int(round(results[i, 1]))
        xmax = int(round(results[i, 2]))
        ymax = int(round(results[i, 3]))
        # account for track priority
        accounts.append(weights[label]*score)
         
        print score, name, xmin, ymin, xmax, ymax
        # bounding box of object
        cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
        # label information
        text = 'p({}|box)={:.1f}'.format(name, score)
        cv2.putText(img, text, (int(xmin), int(ymin) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
    
    budbox = ()  # initial track bounding box
    flag = False  # whether track next frame
    leng = len(accounts)
    for j in range(0, leng):
        value = max(accounts)   # take the maximum score
        key = accounts.index(value)  # position of maximum
        
        if value > 0:
            accounts.remove(value) # remove value
            x_mi = int(round(results[key, 0]))
            y_mi = int(round(results[key, 1]))
            x_ma = int(round(results[key, 2]))
            y_ma = int(round(results[key, 3]))
            center_x = (x_mi + x_ma) / 2
            center_y = (y_mi + y_ma) / 2
            if center_x >= budxmin and center_x <=budxmax and center_y >= budymin and center_y <= budymax:  # object in the tracking area
                flag = True
                budbox = (x_mi, y_mi, x_ma - x_mi, y_ma - y_mi)
                break

    return budbox, flag

def tic():  # start of timing 
    globals()['tt'] = time.clock()

def toc():  # end of timing
    return time.clock() - globals()['tt']

e=2

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu_id', type=int, default=0)
    args = parser.parse_args()

    host = '192.168.1.16'
    port = 1995
    s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)    # import socket module
    s.connect((host, port))
    

    # ++++++++++++++++ open the camera ++++++++++++++++++ #
    # set read image thread, read image first, then detecting and tracking object.
    image1 = None
    image2 = None
    t1 = threading.Thread(target=video_read_left, name='read_video_thread1')
    t1.start()
    tic()
    while toc() < 10.0:
        continue
    height=len(frame1)
    width=len(frame1[0])
    image1 = frame1[0:height,0: width/2]
    #image2 = frame1[0:height, width/2: width]
   
    # +++++++++++++++++++++++++++++++++++++++++++++++++++ #



    # ++++++++++++++ detecting preparation ++++++++++++++ #
    # gpu preparation
    if args.gpu_id >= 0:
        caffe.set_device(args.gpu_id)
        caffe.set_mode_gpu()

    # load labelmap
    labelmap_file = 'data/VOC0712/labelmap_voc.prototxt'
    file = open(labelmap_file, 'r')
    labelmap = caffe_pb2.LabelMap()
    text_format.Merge(str(file.read()), labelmap)
    
    # load model
    model_def = 'models/VGGNet/VOC0712/refinedet_vgg16_320x320/deploy.prototxt'
    model_weights = 'models/VGGNet/VOC0712/refinedet_vgg16_320x320/VOC0712_refinedet_vgg16_320x320_iter_110000.caffemodel'
    net = caffe.Net(model_def, model_weights, caffe.TEST)
    
    # image preprocessing
    if '320' in model_def:
        img_resize = 320
    else:
        img_resize = 512
    net.blobs['data'].reshape(1, 3, img_resize, img_resize)
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2, 0, 1))
    transformer.set_mean('data', np.array([104, 117, 123]))  # mean pixel
    transformer.set_raw_scale('data', 255)  # the reference model operates on images in [0,255] range instead of [0,1]
    transformer.set_channel_swap('data', (2, 1, 0))  # the reference model has channels in BGR order instead of RGB
    # +++++++++++++++++++++++++++++++++++++++++++++++++++ #
    

    # ++++++++++++++ ranging preparation ++++++++++++++++ #
    ll = ctypes.cdll.LoadLibrary  
    lib = ll("/home/deepmind/URPC2018/libpycallclass.so")  
    
    class testdll(Structure):_fields_=[('A',c_float),('B',c_float),('C',c_float)] 
    # +++++++++++++++++++++++++++++++++++++++++++++++++++ #

    IsDetecting = False
    #filename1 = '/home/deepmind/URPC2018//video_left.avi'
   
    #video1 = cv2.VideoWriter(filename1, cv2.VideoWriter_fourcc('X', '2', '6', '4'), fps1, size1, 1)#('M', 'P', '4', '2'),('I', '4', '2', '0')  ('M', 'J', 'P', 'G') ('D', 'I', 'V', 'X') ('X', '2', '6', '4')('X', 'V', 'I', 'D')
   
    while True:
       
        image1 = frame1[0:height,0: width/2]
        #image2 = frame1[0:height, width/2: width]
        print "+++++++++++++++++++++++++++"
        temp = image1
        temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
        temp = temp / 255.
        transformed_image = transformer.preprocess('data', temp)
        net.blobs['data'].data[...] = transformed_image
        
        detections = net.forward()['detection_out']
        det_label = detections[0, 0, :, 1]
        det_conf = detections[0, 0, :, 2]
        det_xmin = detections[0, 0, :, 3] * image1.shape[1]
        det_ymin = detections[0, 0, :, 4] * image1.shape[0]
        det_xmax = detections[0, 0, :, 5] * image1.shape[1]
        det_ymax = detections[0, 0, :, 6] * image1.shape[0]
        result = np.column_stack([det_xmin, det_ymin, det_xmax, det_ymax, det_conf, det_label])
        # get initialize bounding box for tracking and dicide whether track or not.
        box, IsDetecting = DetectResults(image1, result, labelmap, 0.5)
               
        #cv2.imwrite("./L%d.jpg"%e,image1)
        #cv2.imwrite("./R%d.png"%e,image2)

        if IsDetecting:
            #i=i+1
            IsDetecting = False
            # grab the frame dimensions
            (H, W) = image1.shape[:2]
            # rectangle
            (x, y, w, h) = [int(v) for v in box]
           
            temp_l = frame1
            #temp_r = image_2

            lib.convert_left.restype = ctypes.c_float
            #lib.convert_right.restype = ctypes.c_float
            
            img1 = np.asarray(temp_l, dtype=np.uint8)
            #img2 = np.asarray(temp_r, dtype=np.uint8)

            img1 = img1.ctypes.data_as(ctypes.c_char_p)
            #img2 = img2.ctypes.data_as(ctypes.c_char_p)

            value1 = lib.convert_left(img1)
            #value2 = lib.convert_right(img2)

            #print value1, value2
            print value1

            a = c_float(0)
            b = c_float(0)            

            # ranging
            lib.rang.restype=c_float
            lib.rang.argtypes = [c_float,c_float,c_float,c_float,c_float,c_float]
            num=lib.rang(a,b, x, y, x+w, y+h)            

            #print num  
            A=c_float()
            B=c_float() 
            C=c_float()

            t=testdll()
            t.A=A
            t.B=B
            t.C=C
            lib.test.restype=testdll
 
            t=lib.test(t)
            print t.A, t.B, t.C
            dx = t.A
            dy = t.B
            dz = t.C

            # the set of information we'll be displaying on the frame
            info = [
                ("x: ", "{:.2f}".format(dx)),
                ("y: ", "{:.2f}".format(dy)),
                ("z: ", "{:.2f}".format(dz)),
            ]
            # loop over the info tuples and draw them on our frame
            cv2.rectangle(image1, (x, y), (x+w, y+h), (0, 255, 0), 2)
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(image1, text, (int(x), int(y+h) + (3-i) * 15),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.45, (0, 0, 255), 2)
            

            s.send('A{:05d}B{:05d}C{:05d}D1'.format(int(dx), int(dy), int(dz)))
            print 'A{:05d}B{:05d}C{:05d}D1'.format(int(dx), int(dy), int(dz))
            print "+++++++++++++++++++++++++++"
            e=e+1
        
        else:
            print 'A00000B00000C00000D1'
            s.send('A00000B00000C00000D1')

        
        #video1.write(frame1)  # write left video
        cv2.namedWindow("Frame",0)
        cv2.imshow("Frame", image1)   # visualize detect or track results
       
       
        key = cv2.waitKey(1) & 0xFF
        # if press 'q' key, jump out of the loop
        if key == ord("q"):
            break
    s.close
                

        
