# -*- coding: utf-8 -*-
"""
Created on Sat Jul 27 16:11:02 2019

@author: LEN
"""

# import the necessary packages
from imutils.video import VideoStream
import argparse
import datetime
import imutils
import time
import cv2
import numpy as np
import asyncio
import websockets
from socket import gaierror
import ssl


context = ssl._create_unverified_context()
async def alarm():
    uri = "wss://nussh.happydoudou.xyz:8000"
    async with websockets.connect(uri,ssl=context) as websocket:
        await websocket.send("Alarm")
        print("Alarm")
        revci = await websocket.recv()
        print(f"< {revci}")

# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-v", "--video", help="path to the video file")
ap.add_argument("-a", "--min-area", type=int, default=500, help="minimum area size")
args = vars(ap.parse_args())

# if the video argument is None, then we are reading from webcam
#if args.get("video", None) is None:
#	vs = VideoStream(src=0).start()
#	time.sleep(2.0)
 
# otherwise, we are reading from a video file

vs = cv2.VideoCapture(0)
if vs.isOpened():
    print('cam is open')    

# initialize the first frame in the video stream
firstFrame = None
# loop over the frames of the video
while True:  
	# grab the current frame and initialize the occupied/unoccupied
	# text
    if firstFrame is None:
        
        (grabbed,frame) = vs.read()
        frame = frame if args.get("video", None) is None else frame[1]
        text = "Unoccupied"

    	# if the frame could not be grabbed, then we have reached the end
        # of the video
        if frame is None:
            break
        
        # resize the frame, convert it to grayscale, and blur it
        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)

        firstFrame = gray
        continue
        
    else:
        
        firstFrame = gray
        (grabbed,frame) = vs.read()
        frame = frame if args.get("video", None) is None else frame[1]
        text = "Unoccupied"

    	# if the frame could not be grabbed, then we have reached the end
        # of the video
        if frame is None:
            break
        
        # resize the frame, convert it to grayscale, and blur it
        frame = imutils.resize(frame, width=500)
        gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        gray = cv2.GaussianBlur(gray, (21, 21), 0)
            

	# compute the absolute difference between the current frame and
	# first frame
    frameDelta = cv2.absdiff(firstFrame, gray)
    thresh = cv2.threshold(frameDelta, 25, 255, cv2.THRESH_BINARY)[1]
	# dilate the thresholded image to fill in holes, then find contours
	# on thresholded image
    thresh = cv2.dilate(thresh, None, iterations=2)
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
        cv2.CHAIN_APPROX_SIMPLE)
    cnts = cnts[0] if imutils.is_cv2() else cnts[1]
    
    # loop over the contours
    for c in cnts:
        # Only if the contour is large enough, take it into consideration
        if cv2.contourArea(c) > 100:
            
            (x, y, w, h) = cv2.boundingRect(c)
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
            text = "Occupied"
            asyncio.get_event_loop().run_until_complete(alarm())
        	# draw the text and timestamp on the frame
            
        cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
        cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
        
    cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
        (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)

	# show the frame and record if the user presses a key
    cv2.imshow("Security Feed", frame)
    cv2.imshow("Thresh", thresh)
    cv2.imshow("Frame Delta", frameDelta)
    key = cv2.waitKey(1) & 0xFF

	# if the `q` key is pressed, break from the lop
    if key == ord("q"):
        break

# cleanup the camera and close any open windows
vs.release()
cv2.destroyAllWindows()