#!/usr/bin/env python
# coding: utf-8

# In[1]:


import cv2
import numpy as np
#import socket
#import socketserver

from pynput.mouse import Button, Controller
import time


# In[2]:


cap=cv2.VideoCapture(0)#Camera capture of cv
#Haar Cascade classifier for face and eyes
face_cascade = cv2.CascadeClassifier('D:\\ProgramData\\Anaconda3\\Lib\\site-packages\\cv2\\data\\haarcascade_frontalface_default.xml')
eye_cascade = cv2.CascadeClassifier('D:\\ProgramData\\Anaconda3\\Lib\\site-packages\\cv2\\data\\haarcascade_eye_tree_eyeglasses.xml')
font = cv2.FONT_HERSHEY_SIMPLEX

'''
host = '192.168.3.27'#host server ip
port = 18888
'''

mouse = Controller()


# In[3]:


'''
#Data Transmitting Module(In work)
#Because Raspberry Pi cannot use SID directly to work as keyboard,I try to transmit data to computer so as to move mouse on computer.
#In this way,Pi works as server and computer works as client,this .py file works as server program.
#TCP protocal requires that both pi and computer are in the same LAN and their IP addresses shall be known.
#Perhaps Bluetooth connection is a better solution.
class Myserver(socketserver.BaseRequestHandler):
    def handle(self):
        while(True):
            client = self.request
            print('Client connected')
            #buf = client.recv(1024)
            try:
                client.send("11")
            except:
                print('socket.error')
                return
server = socketserver.ThreadingTCPServer((host,port),Myserver)
server.serve_forever()
'''


# In[ ]:


#Enhanced data transmission module using Bluetooth
import bluetooth
target_name = 'LAPTOP-CEVGGQPF'
target_address = None
nearby_devices = bluetooth.discover_devices(lookup_names=True)
for bdaddr in nearby_devices:
    if target_name == bluetooth.lookup_name( bdaddr ):
        target_address = bdaddr
        break
if target_address is not None:
    print("found target bluetooth device with address ", target_address)
else:
    print("could not find target bluetooth device nearby")
    
sock=bluetooth.BluetoothSocket( bluetooth.RFCOMM )
try:
    sock.connect((target_address,1))
    print("connection successful")
except Exception as e:
    print("connection fail\n",e)
    sock.close()


# In[4]:


facePos = np.array([0,0,0,0])
eyePos = np.array([[0,0,0,0],[10,0,0,0]])
leyePos = np.array([0,0,0,0])
staticPos = np.array([0,0,0,0])
staticEyePos = np.array([0,0,0,0])
isStatic = False
settled = False
mouseMov = False
def sharpen(image):
    kernel = np.array([[0,-1,0],[-1,4.7,-1],[0,-1,0]],np.float32)#Laplace算子实现滤波器
    #Acturally using the matrix[[0,-1,0],[-1,5,-1],[0,-1,0]] works as sharpening and this one above breaks the image.
    #But this kernal filter makes eyes and pupils more outstanding.
    #The data of K22 indicates details.The smaller it is,the details are fewer.4.4~4.7 works well.
    dst = cv2.filter2D(image,-1,kernel=kernel)
    return dst


# In[ ]:


while(cap.isOpened()):
    ret,frame = cap.read()
    #frame = cv2.imread('D:\\1.jpg')
    gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    gray = cv2.equalizeHist(gray)
    faces = face_cascade.detectMultiScale(gray, 1.3, 5)
    #Detect face in gray frame
    #Canny contour detecting includes four steps,but gaussian blur is not applied in the function cv2.Canny()
    #So blurring is vital to prevent too many unnecessary details
    shp = sharpen(frame)
    shp = cv2.GaussianBlur(shp,(7,7),0)
    gshp = cv2.cvtColor(shp, cv2.COLOR_BGR2GRAY)
    canny = cv2.Canny(gshp,45,100)#The two parameters works as threshold,details between the two values are shown,bigger value means less details.
    #Face & Eyes Detection Module 
    for (x,y,w,h) in faces:
        frame = cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)#Showing face area
        roi_gray = gray[y:int(y+h/2), x:x+w]
        roi_color = frame[y:int(y+h/2), x:x+w]
        eyes = eye_cascade.detectMultiScale(roi_gray)#Caution: the array is based on roi_gray, which is relative to faces[i].
        #Because the array 'eyes' is not fixed in length, try/except is necessary in every process with 'eyes' or 'eyePos'.
        #Perhaps this might be improved in further versions.
        try:
            if(len(eyes)>=1 or eyes!=None):
                eyePos = list(eyes)
            else:
                eyePos = [[0,0,0,0],[10,0,0,0]]
            if(eyes[0][0]<w/2):
                leyePos = eyes[0]#Detect the coordinates of the left eye.Works not so well when head turns left.(Detecting right eye recommended)
            facePos = faces[0]#This program is designed for single face detection.
        except:
            pass
        for (ex,ey,ew,eh) in eyes:
            cv2.circle(roi_color,(int(ex+ew/2),int(ey+eh/2)),2,(0,255,0),-1)#Point out pupil in green,acturally the center of eye area.
            cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)#Shows eye area.
            pass
    
    #Static state module
    #Static state records the data of face and eyes of one time and freeze them in arrays:'staticPos' and 'staticEyePos'
    #Static also works as a switch for mouse movement.Only when isStatic==Ture and staticEyePos[0][0] is in the left part of face area,
    #that is to say,(isStatic and staticEyePos[0][0]<facePos[2]/2),can mouse be moved by this program.
    i = 0
    try:
        if(isStatic==False):
            settled = False#'settled' just makes sure that data won't be refreshed after isStatic
        for (eex,eey,eew,eeh) in eyePos:
            cv2.putText(frame,f'posX={eex},posY={eey},w={eew},h={eeh}',(10,300-i*30),font,0.8,(255,255,255),2)#This is pos of face and eyes in present 
            i=i+1
    except:
        pass
    i=1
    if(isStatic):
        if(settled == False):
            settled = True
            staticEyePos = eyePos
            staticPos = facePos
        cv2.putText(frame,'Static',(10,330),font,0.8,(255,255,255),2)
        for (eex,eey,eew,eeh) in staticEyePos:
            cv2.putText(frame,f'Static Eye Pos(Rlt to Face),posX={eex},posY={eey},w={eew},h={eeh}',(10,330+i*30),font,0.8,(255,255,255),2)
            i=i+1
        cv2.putText(frame,f'Static Face Position,posX={staticPos[0]},posY={staticPos[1]}',(10,330+i*30),font,0.8,(255,255,255),2)
    #Pupil detection & contour drawing Module(In work)
    #This module aims at using Canny contour detection to find one's pupil.
    #Now the 'canny' image can show one's pupil(actually works way better without glasses),but it hard to detect the index of contour.
    #Finally the gravity center method will be applied to find pupil's coordinates,realizing pupil movement detection.
    ret,thresh = cv2.threshold(canny,127,255,0)
    contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
    ii=0
    
    count = [0,0]
    grvsum = [[0,0],[0,0]]
    #count[index of eye]
    #grvsum[index of eye][the x or y] 
    
    for index,contr in enumerate(contours):
        M=cv2.moments(contr)
        if(M['m00'] != 0):
            cx=int(M['m10']/M['m00'])
            cy=int(M['m01']/M['m00'])#Gravity center coordinates
            x,y,w,h = cv2.boundingRect(contr)#Use boundingRect(contour) to show the contour's area.
        else:
            continue
        for ind,po in enumerate(eyePos):#for most of the times,ind = 0 indicates left eye while ind = 1 indicates right eye
            frame = cv2.rectangle(frame,(po[0]+facePos[0],po[1]+facePos[1]),(po[0]+po[2]+facePos[0],po[1]+po[3]+facePos[1]),(0,0,255),2)
            if(x>po[0]+facePos[0]+14 and y>po[1]+facePos[1]+10 and x+w<po[0]+po[2]+facePos[0]-14 and y+h<po[1]+po[3]+facePos[1]-10):
                frame = cv2.drawContours(frame,contours,index,(0,0,255),3)
                cv2.putText(frame,f'Contour{index}:cx={cx},cy={cy}',(10,50+ind*20),font,0.6,(255,255,255),2)
                if(ind<2):
                    grvsum[ind][0]+=cx
                    grvsum[ind][1]+=cy
                    count[ind]+=1
    #cv2.drawContours(frame,contours,-1,(0,0,255),3)
    if(count[0] != 0 and count[1] != 0):
        cv2.putText(frame,f'Contour center of grv avg:xl={int(grvsum[0][0]/count[0])},yl={int(grvsum[0][1]/count[1])},xr={int(grvsum[1][0]/count[0])},yr={int(grvsum[1][1]/count[1])}',(10,110),font,0.8,(255,255,255),2)

    
    cv2.imshow('frame',frame)
    
    
    '''
    try:
        if(eyePos[0][0]<facePos[3]/2):
            mouse.position = (eyePos[0][0]*5,eyePos[0][1]*5)
    except:
        pass
    '''
    
    #Judgement module
    #Only in use when isStatic and staticEyePos[0] shows the data of left eye
    LOrR = 0
    UpOrDown = 0
    if(isStatic):
        try:
            if(staticEyePos[0][0]+staticEyePos[0][2]/2<staticPos[2]/2):
                lpos = staticEyePos[0]
            else:
                break
            if(facePos[0]+leyePos[0]>staticPos[0]+lpos[0]+20 and facePos[0]+leyePos[0]<staticPos[0]+lpos[0]+120):
                #The eye moves leftward,so move the mouse to left
                #take the speed in the following formula:v=Int((Abs(eyeX-staticX)-10)^0.6+15)
                mouse.move(-int((facePos[0]+leyePos[0]-staticEyePos[0][0]-staticPos[0]-10)**0.6+15),0)
                LOrR=1
                pass
            elif(facePos[0]+leyePos[0]<staticPos[0]+lpos[0]-20 and facePos[0]+leyePos[0]>staticPos[0]+lpos[0]-120):#moves right
                mouse.move(int((staticPos[0]+staticEyePos[0][0]-facePos[0]-leyePos[0]-10)**0.6+15),0)
                LOrR=-1
                pass
            else:
                LOrR=0
            if(facePos[1]+leyePos[1]>staticPos[1]+lpos[1]+12 and facePos[1]+leyePos[1]<staticPos[1]+lpos[1]+80):
                mouse.move(0,int((facePos[1]+leyePos[1]-staticEyePos[0][1]-staticPos[1]-12)**0.6+15))
                UpOrDown=1
            elif(facePos[1]+leyePos[1]<staticPos[1]+lpos[1]-12 and facePos[1]+leyePos[1]>staticPos[1]+lpos[1]-120):
                mouse.move(0,-int((staticPos[1]+staticEyePos[0][1]-facePos[1]-leyePos[1]-12)**0.6+15))
                UpOrDown=-1
            else:
                UpOrDown=0
            print(f'{LOrR},{UpOrDown}')
            sock.send(f'{LOrR},{UpOrDown}')
        except:
            pass
    
    
    if cv2.waitKey(10) & 0xFF == ord('s'):
        isStatic = bool(1-isStatic)
    if cv2.waitKey(10) & 0xFF == ord('m'):
        isStatic = bool(1-mouseMov)
    if cv2.waitKey(10) & 0xFF == ord('q'):
        break


# In[ ]:


cap.release()
cv2.destroyAllWindows()

