# -*- coding: utf_8 -*-

# 使用 python3 运行程序
import pyrealsense2 as rs
import socket
import pickle
import threading
from collections import deque,Counter
import numpy as np
from sklearn.cluster import DBSCAN
from sklearn import metrics
import cv2

class VisionServer:
    def __init__(self,addr,port,colorRange,radiusRange,distanceRange):
        config = rs.config()
        config.enable_stream(rs.stream.color, 1920, 1080, rs.format.bgr8, 15)
        config.enable_stream(rs.stream.depth, 1280, 720, rs.format.z16, 15)

        self.pipeline = rs.pipeline()
        self.pipeline.start(config)
        self.align = rs.align(rs.stream.color)

        colorStreamProfile = rs.video_stream_profile(self.pipeline.get_active_profile().get_stream(rs.stream.color))
        self.intrinsics = colorStreamProfile.as_video_stream_profile().get_intrinsics()
        self.extrinsics = self.read_yaml('config.yaml')

        self.s = socket.socket()
        self.s.bind((addr,port))
        self.s.listen()

        self.CONST_NotDetected = pickle.dumps(np.array([[0, 0, 0, 0],
                                   [0, 0, 0, 0],
                                   [0, 0, 0, 0],
                                   [0, 0, 0, 0]]), protocol=0)
        self.CONST_IllegalInput = pickle.dumps(np.array([[-1, -1, -1, -1],
                                   [-1, -1, -1, -1],
                                   [-1, -1, -1, -1],
                                   [-1, -1, -1, -1]]), protocol=0)

        self.colorRange = colorRange
        self.radiusRange = radiusRange 
        self.distanceRange = distanceRange
        self.pickPointDeque=deque(maxlen = 20)
        self.depthFrame = None

    def read_yaml(self,path):
        infile = cv2.FileStorage(path,cv2.FILE_STORAGE_READ)
        rotate = infile.getNode("rotate").mat()
        translation = infile.getNode("translation").mat()
        temp = np.concatenate([rotate,translation],1)
        eye = [
            [0,0,0,1]
        ]
        eye = np.array(eye)
        out = np.concatenate([temp,eye],0)
        return out

    def showHSVValue(self,event,x,y,flags,param):
        hsvImage=param
        if event==cv2.EVENT_LBUTTONDOWN:
            hsvString = str(hsvImage[y,x])
            cv2.setWindowTitle('video','video  hsv: '+hsvString)


    def get_frame(self):
        frames = self.pipeline.wait_for_frames()
        aligned_frames = self.align.process(frames)
        aligned_depth_frame = aligned_frames.get_depth_frame()
        color_frame = aligned_frames.get_color_frame()
        return color_frame,aligned_depth_frame
        
    def imageShowAndDetect(self):
        cv2.namedWindow('video',0)
        cv2.resizeWindow('video',960,540)
        while(True):
            color_frame, self.depthFrame = self.get_frame()
            color_image = np.asanyarray(color_frame.get_data())
            if not color_frame:
                assert color_frame is not None
                continue
                
            hsv_image = cv2.cvtColor(color_image, cv2.COLOR_BGR2HSV)
            mask = cv2.inRange(hsv_image, self.colorRange[0], self.colorRange[1])
            erode_image = cv2.erode(mask, None, iterations=3)
            canny_image = cv2.Canny(erode_image, 50, 100, L2gradient=True) 
            contours = cv2.findContours(canny_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

            pickPoint = (0,0)
            maxRadius = 0
            for c in contours[0]:
                    (x,y),radius = cv2.minEnclosingCircle(c)
                    cv2.circle(color_image,(int(x),int(y)),int(radius),(0,0,255),3)
                    if self.radiusRange[0]<radius<self.radiusRange[1] and radius > maxRadius:
                        maxRadius = int(radius)
                        pickPoint = (int(x),int(y))
            if maxRadius != 0:
                cv2.circle(color_image,pickPoint,maxRadius,(0,255,0),3)
            self.pickPointDeque.append(list(pickPoint))
            
            cv2.imshow('video', color_image)
            cv2.setMouseCallback("video",self.showHSVValue,hsv_image)
            k = cv2.waitKey(10)
            if k == 27:
                cv2.destroyAllWindows()
                break

    def get_aim(self,cx,cy,target_depth):
        target_xyz_true = rs.rs2_deproject_pixel_to_point(self.intrinsics,[int(cx), int(cy)],target_depth)
        aim = np.concatenate([np.eye(3, 3), np.array(target_xyz_true).reshape(3, 1)], 1)
        aim = np.concatenate([aim, np.array([0, 0, 0, 1]).reshape(1, 4)], 0)
        output = self.extrinsics.dot(aim)
        print(output)
        return output

    def sendPose(self,conn):
        data = np.array(self.pickPointDeque)
        db = DBSCAN(eps=15, min_samples=1).fit(data)
        print(db.labels_)
        sumX = 0
        sumY = 0
        total = 0
        clusterIndex = Counter(db.labels_).most_common(1)[0][0]
        for i in range(0,len(db.labels_)):
            if db.labels_[i] == clusterIndex:
                sumX=sumX+data[i][0]
                sumY=sumY+data[i][1]
                total=total+1
        x = sumX//total
        y = sumY//total

        target_depth = self.depthFrame.get_distance(x, y)
        if x>5 and y>5 and self.distanceRange[0]<=target_depth<=self.distanceRange[1]:
            output = self.get_aim(x, y, target_depth)
            output = pickle.dumps(output,protocol=0)
            conn.send(output)
        else:
            print("--This target was not detected--")
            conn.send(self.CONST_NotDetected)
            print('已发送')
            
    def socketCmdProcess(self,conn):
        msg = conn.recv(1024).decode('utf-8')
        if msg == "start":
            conn.send('start successed'.encode())
            print('detection start')
            while True:
                aim = conn.recv(1024).decode('utf-8')
                print("this:{}".format(aim))
                
                if aim == '1':
                    try:
                        self.sendPose(conn)
                    except:
                        print("--This target was not detected--")
                        conn.send(self.CONST_NotDetected)
                        continue
                elif aim == 'stop':
                    try:
                        conn.send('stop successed'.encode())
                        # cv2.destroyAllWindows()
                    except:
                        conn.send('stop failed'.encode())
                    break
                elif not aim:
                    print('Disconnect')
                    break
                else:
                    print('input erro,please again')
                    conn.send(self.CONST_IllegalInput)
                    continue
        else:
            conn.send('start failed'.encode())

    def start(self):
        imageShowAndDetectThread = threading.Thread(target=self.imageShowAndDetect)
        # show.setDaemon(True)
        imageShowAndDetectThread.start()
        while True:
            conn, addr = self.s.accept()
            print("connected：", addr)
            print('please input："start" keep the working state')
            
            socketThread = threading.Thread(target=self.socketCmdProcess,args=(conn,))
            socketThread.start()

if __name__ == '__main__':
    # 设置视觉服务器IP地址和端口
    addr = '127.0.0.1'
    port = 18080
    # 设置识别目标物体的有效颜色范围，前者为最小值，后者为最大值
    # 使用鼠标左键点击图像上任意像素点后，可以在图像窗口标题栏获取该像素点对应的HSV值
    colorRange = (np.array([90,170,90]), np.array([125,255,255]))
    # 设置识别目标物体的有效半径范围，单位：像素
    radiusRange = (3,200)
    # 设置识别目标物体与相机坐标系的有效Z向距离，单位：米
    distanceRange = (0.2,1.0)

    server = VisionServer(addr,port,colorRange,radiusRange,distanceRange)
    server.start()
