'''
In this example, we will load a RefineDet model and use it to detect objects.
'''

import argparse
import os
import sys
import numpy as np
import matplotlib.pyplot as plt
import skimage.io as io
import time
import cv2

# Make sure that caffe is on the python path:
caffe_root = '/home/wuhan/RefineDet/'
os.chdir(caffe_root)
sys.path.insert(0, os.path.join(caffe_root, 'python'))

import time
import caffe
import torch
import argparse
import threading
import ctypes
from ctypes import *
from socket import *
from queue import Queue
from google.protobuf import text_format
from caffe.proto import caffe_pb2
from pysot.core.config import cfg
from pysot.models.model_builder import ModelBuilder
from pysot.tracker.tracker_builder import build_tracker


def video_read(frame_q):
    # where the video come from
    #cap = cv2.VideoCapture("1.avi")
    cap = cv2.VideoCapture("/home/wuhan/forward03.avi")
    #cap = cv2.VideoCapture("rtsp://192.168.1.8:554/user=admin&password=&channel=1&stream=1.sdp?real_stream")
    # global frame
    # global frame2
    # global fps
    # global size
    fps = cap.get(cv2.CAP_PROP_FPS)
    size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                    int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    
    #print("****************")
    #print(fps)
    #print("****************")
    ok, frame = cap.read()  # read a frame


    while cap.isOpened():

        if frame_q.qsize() >= 5:
            continue

        if not ok:
            break
        ok, frame = cap.read()  # read a frame

        frame_q.put(
            {
                'frame': frame,
                'frame2': frame,
                'fps': fps,
                'size': size
            }
        )
        

        #cv2.imwrite('111.jpg', frame)

        # release the camera
        try:
            pass
        except KeyboardInterrupt:
            cap.release()

def get_labelname(labelmap, labels):
    num_labels = len(labelmap.item)
    labelnames = []
    if type(labels) is not list:
        labels = [labels]
    for label in labels:
        found = False
        for i in range(0, num_labels):
            if label == labelmap.item[i].label:
                found = True
                labelnames.append(labelmap.item[i].display_name)
                break
        assert found == True
    return labelnames

def DetectResults(img, results, labelmap, height, width, threshold=0.6):
    
    # reasonable tracking area
    budxmin = img.shape[1] * 0.0001
    budxmax = img.shape[1] * 0.8
    budymin = img.shape[0] * 0.2
    budymax = img.shape[0] * 0.8

    weights = [0, 1, 1, 1, 0]    # weights for different marine organism 
    accounts = []  # account for track priority
    for i in range(0, results.shape[0]):
        score = results[i, -2]
        if threshold and score < threshold:
            accounts.append(0)
            continue

        label = int(results[i, -1])
        name = get_labelname(labelmap, label)[0]
        xmin = int(round(results[i, 0]))
        ymin = int(round(results[i, 1]))
        xmax = int(round(results[i, 2]))
        ymax = int(round(results[i, 3]))
        # account for track priority
        accounts.append(weights[label]*score)
         
        print (score, name, xmin, ymin, xmax, ymax)
        # bounding box of object
        cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
        # label information
        text = 'p({}|box)={:.1f}'.format(name, score)
        cv2.putText(img, text, (int(xmin), int(ymin) - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
    
    budbox = ()  # initial track bounding box
    flag = False  # whether track next frame
    leng = len(accounts)
    for j in range(0, leng):
        value = max(accounts)   # take the maximum score
        key = accounts.index(value)  # position of maximum
        
        if value > 0:
            accounts.remove(value) # remove value
            x_mi = int(round(results[key, 0]))
            y_mi = int(round(results[key, 1]))
            x_ma = int(round(results[key, 2]))
            y_ma = int(round(results[key, 3]))
            if x_mi < 0: # make sure the object is in the screen
               x_mi = 0
            if y_mi < 0:
                y_mi = 0
            if y_ma > height:
                y_ma = height
            if x_ma > width:
                x_ma = width
            center_x = (x_mi + x_ma) / 2
            center_y = (y_mi + y_ma) / 2
            if center_x >= budxmin and center_x <=budxmax and center_y >= budymin and center_y <= budymax:  # object in the tracking area
                flag = True
                budbox = (x_mi, y_mi, x_ma - x_mi, y_ma - y_mi)
                break

    return budbox, flag

def tic():  # start of timing 
    globals()['tt'] = time.clock()

def toc():  # end of timing
    return time.clock() - globals()['tt']


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu_id', type=int, default=0)
    args = vars(parser.parse_args())
    time_start = time.time()

    host = '192.168.1.16'
    port = 1995

    s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP)    # import socket module
    #s.connect((host, port))
    
    frame_q = Queue(6)

    # ++++++++++++++++ open the camera ++++++++++++++++++ #
    # set read image thread, read image first, then detecting and tracking object.
    image = None
    t = threading.Thread(target=video_read, name='read_video_thread', args=[frame_q,])
    t.setDaemon(True)
    t.start()
    tic()
    while toc() < 8.0:
        continue
    img2 = cv2.imread("2.jpg")
    
    
    #image1 = None
    #image2 = None
    #t1 = threading.Thread(target=video_read_left, name='read_video_thread1')
    #t1.start()
    #tic()
   # while toc() < 10.0:
        #continue
    #height=len(frame1)
   # width=len(frame1[0])
    #image1 = frame1[0:height,0: width/2]
  

    #cv2.imwrite('1.jpg', image)
    # +++++++++++++++++++++++++++++++++++++++++++++++++++ #


    # ++++++++++++++ detecting preparation ++++++++++++++ #
    # gpu preparation
    if args["gpu_id"] >= 0:
        caffe.set_device(args["gpu_id"])
        caffe.set_mode_gpu()

    # load labelmap
    labelmap_file = 'data/VOC0712/labelmap_voc.prototxt'
    file = open(labelmap_file, 'r')
    labelmap = caffe_pb2.LabelMap()
    text_format.Merge(str(file.read()), labelmap)

    # load model
    model_def = 'models/VGGNet/VOC0712/refinedet_vgg16_320x320/deploy.prototxt'
    model_weights = 'models/VGGNet/VOC0712/refinedet_vgg16_320x320/VOC0712_refinedet_vgg16_320x320_iter_120000.caffemodel'
    net = caffe.Net(model_def, model_weights, caffe.TEST)

    time_end = time.time()
    print('time cost', time_end - time_start, 's')
    # image preprocessing
    if '320' in model_def:
        img_resize = 320
    else:
        img_resize = 512
    net.blobs['data'].reshape(1, 3, img_resize, img_resize)
    transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
    transformer.set_transpose('data', (2, 0, 1))
    transformer.set_mean('data', np.array([104, 117, 123]))  # mean pixel
    transformer.set_raw_scale('data', 255)  # the reference model operates on images in [0,255] range instead of [0,1]
    transformer.set_channel_swap('data', (2, 1, 0))  # the reference model has channels in BGR order instead of RGB
    # +++++++++++++++++++++++++++++++++++++++++++++++++++++ #
    
    # +++++++++++++++++ tracking preparation ++++++++++++++ #
    # load config
    cfg.merge_from_file("/home/wuhan/pysot-master/experiments/siamrpn_r50_l234_dwxcorr/config.yaml")
    cfg.CUDA = torch.cuda.is_available() and cfg.CUDA
    device = torch.device('cuda' if cfg.CUDA else 'cpu')

    # create model
    model = ModelBuilder()

    # load model
   # model.load_state_dict(torch.load("/home/wuhan/pysot-master/experiments/siamrpn_r50_l234_dwxcorr/model.pth",
                                    # map_location=lambda storage, loc: storage.cpu())['state_dict'])
    model.load_state_dict(torch.load("/home/wuhan/pysot-master/experiments/siamrpn_r50_l234_dwxcorr/model.pth",
                                     map_location=lambda storage, loc: storage.cpu()))
    model.eval().to(device)

    # build tracker
    # tracker = build_tracker(model)
    # +++++++++++++++++++++++++++++++++++++++++++++++++++++ #

    # ++++++++++++++ ranging preparation ++++++++++++++++ #
    ll = ctypes.cdll.LoadLibrary
    lib = ll("/home/deepmind/URPC2018/libpycallclass.so")


    class testdll(Structure):
        _fields_ = [('A', c_float), ('B', c_float), ('C', c_float)]

    # +++++++++++++++++++++++++++++++++++++++++++++++++++ #

    frame_info = frame_q.get()
    frame = frame_info['frame']
    frame2 = frame_info['frame']
    fps = frame_info['fps']
    size = frame_info['size']

    IsTracking = False
    filename = '/home/wuhan/video.avi'
    video = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc('I', '4', '2', '0'), fps, size)
    #video2 = cv2.VideoWriter(filename2, cv2.VideoWriter_fourcc('I', '4', '2', '0'), fps, size, 1)
    while True:
        # if frame is None:
        #     print('$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$')
        #     continue

        image = frame
        #image = cv2.add(frame, img2)
        
        # image acquisition
        image_1 = frame2
	    # video2.write(image_1)
        #image_tmp = cv2.add(frame, img2)
        image_tmp = frame
        (H, W) = image_tmp.shape[:2]
        # print("+++++++++++++++++++++++++++")
        if not IsTracking:
            temp = image
            temp = cv2.cvtColor(temp, cv2.COLOR_BGR2RGB)
            temp = temp / 255.
            transformed_image = transformer.preprocess('data', temp)
            net.blobs['data'].data[...] = transformed_image
        
            detections = net.forward()['detection_out']
            det_label = detections[0, 0, :, 1]
            det_conf = detections[0, 0, :, 2]
            det_xmin = detections[0, 0, :, 3] * image.shape[1]
            det_ymin = detections[0, 0, :, 4] * image.shape[0]
            det_xmax = detections[0, 0, :, 5] * image.shape[1]
            det_ymax = detections[0, 0, :, 6] * image.shape[0]
            result = np.column_stack([det_xmin, det_ymin, det_xmax, det_ymax, det_conf, det_label])
	    #(H, W) = image.shape[:2]
            # get initialize bounding box for tracking and dicide whether track or not.
            box, IsTracking = DetectResults(image, result, labelmap, H, W, 0.5)
            
            if IsTracking:
                tracker = build_tracker(model)
                
                tracker.init(image, box)   # tracker initialization
		#2019.3.23
                dx = int(-((364.0 - box[0]-box[2]/2) * 2.23-5-119))
                dy = int(-((box[1]+box[3]/2 - 350) * 1.27+29-115))
	    
                count = 0  # init count num
                init_x = box[0]+box[2]/2  # init coordinate x
                init_y = box[1]+box[3]/2  # init coordinate y

              
                print ('A{:05d}B{:05d}C1'.format(dy, dx))
            
             

        else:
            # grab the frame dimensions
            #(H, W) = image.shape[:2]
            # track
            outputs = tracker.track(image)
            box = outputs['bbox']
            score = outputs['best_score']

            if score <= 0.9:    # 通过阈值判断是否追踪成功
                IsTracking = False
            
            # rectangle
            (x, y, w, h) = [int(v) for v in box]
            cv2.rectangle(image, (x, y), (x+w, y+h), (0, 0, 255), 2) # tracking box

            # -----------------------------------------------
            lib.convert_left.restype = ctypes.c_float
            img1 = np.asarray(image, dtype=np.uint8)
            # img2 = np.asarray(temp_r, dtype=np.uint8)

            img1 = img1.ctypes.data_as(ctypes.c_char_p)
            # img2 = img2.ctypes.data_as(ctypes.c_char_p)

            value1 = lib.convert_left(img1)
            # value2 = lib.convert_right(img2)

            # print value1, value2
            print(value1)

            a = c_float(0)
            b = c_float(0)

            # ranging
            lib.rang.restype = c_float
            lib.rang.argtypes = [c_float, c_float, c_float, c_float, c_float, c_float]
            num = lib.rang(a, b, x, y, x + w, y + h)

            # print num
            A = c_float()
            B = c_float()
            C = c_float()

            t = testdll()
            t.A = A
            t.B = B
            t.C = C
            lib.test.restype = testdll

            t = lib.test(t)
            print(t.A, t.B, t.C)
            dx = t.A
            dy = t.B
            dz = t.C

            # -----------------------------------------------

            # the set of information we'll be displaying on the frame
            info = [
                ("x: ", "{:.2f}".format(dx)),
                ("y: ", "{:.2f}".format(dy)),
                ("z: ", "{:.2f}".format(dz)),
                ("IsTracking", IsTracking),
                ("FPS", "{:.2f}".format(fps)),
                ("BestScore", score)
            ]
            # loop over the info tuples and draw them on our frame
            for (i, (k, v)) in enumerate(info):
                text = "{}: {}".format(k, v)
                cv2.putText(image, text, (10, H - ((i * 20) + 20)),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            # output controlled quantity
	    #dx = int(-((375 - box[0]-box[2]/2) * 2.23-5-119))
	    #dy = int(-((box[1]+box[3]/2 - 306) * 1.27+29-115))
	    #-12-95 near the root 2019.2.23#
            dx = int(-((364 - box[0]-box[2]/2) * 2.23-5-119))
            dy = int(-((box[1]+box[3]/2 - 350) * 1.27+29-115))
            #dx = int(-((436 - box[0]-box[2]/2) * 2.23-5-119))
            #dy = int(-((box[1]+box[3]/2 - 345) * 1.27+29-115))

            #s.send('A{:05d}B{:05d}C1'.format(dy, dx))
            print ('A{:05d}B{:05d}C1'.format(dy, dx))

            #print ("+++++++++++++++++++++++++++")
            #print ("****************")
            #print (fps)
            #print ("****************")
            # count number
            count = count + 1
            cx = abs(x + w/2 - init_x)  #difference between the init_x and count_x
            cy = abs(y + h/2 - init_y)  #difference between the init_y and count_y
            if cx > 0.05*W or cy > 0.05*H: # out of range
                count = 0
                init_x = x + w/2
                init_y = y + h/2
            if count == 300: # count frame
                IsTracking = False
                del(tracker) # delete tracker

        video.write(image)  # write video
        cv2.imshow("Frame", image)   # visualize detect or track results
        key = cv2.waitKey(1) & 0xFF
        # if press 'q' key, jump out of the loop
        if key == ord("q"):
            break

        frame_info = frame_q.get()
        frame = frame_info['frame']
        frame2 = frame_info['frame']
        fps = frame_info['fps']
        size = frame_info['size']

    cv2.destroyAllWindows()
    # s.close()
                

        
