from django.utils import timezone

from intrusionDect.models import alarmRecord

# camera1='摄像头1'
# camera2='摄像头2'
# camera3='摄像头3'
# camera4='摄像头4'
#
# area1='南门'
# # rec=alarmRecord(cameraId=camera1,region=area1,intrusionTime=timezone.now(),photo=)



# -*- coding: utf-8 -*-
"""
Spyder Editor

This is a temporary script file.
"""

# import cv2
# import time
# import datetime
#
# cap = cv2.VideoCapture(0)
#
# avg = None
# lastUploaded = datetime.datetime.now()
# motionCounter = 0
# time.sleep(10)
# while(True):
#     # 逐帧获取图像
#     tiestamp = datetime.datetime.now()
#     ret, frame = cap.read()
#     text = "unoccupied"
#     if not ret:
#         break
#
#
#     # 对每帧图像进行操作
#     gray = cv2.resize(frame,width=500)#调整大小
#     gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)#变成灰色图像
#     gray = cv2.GaussianBlur(gray,(21,21),0)#高斯滤波
#     if avg is None:
#         avg = gray.copy().astype("float")
#         continue
#     cv2.accumulateWeighted(gray,avg,0.5)
#     # 显示处理后的图像
#     cv2.imshow('frame',gray)
#     #计算当前帧与第一帧的区别
#     frameDelta = cv2.absdiff(gray,cv2.convertScaleAbs(avg))
#    # cv2.imshow('first2',frameDelta)
#    #填充孔洞
#     thresh = cv2.threshold(frameDelta, 45, 255, cv2.THRESH_BINARY)[1]
#     thresh = cv2.dilate(thresh, None, iterations=2)
#     cv2.imshow('thresh',thresh)
#     #查找轮廓
#     contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)[-2:]
#   #  cv2.imshow('thresh2',thresh.copy())
#     for c in contours:
#         # if the contour is too small, ignore it
#         if cv2.contourArea(c) < 500:
#            continue
#
#         # 计算轮廓的边界框，在当前帧中画出该框
#         (x, y, w, h) = cv2.boundingRect(c)
#         cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
#         cv2.imshow('found',frame)
#         text = "Occupied"
#         cv2.putText(frame, "Room Status: {}".format(text), (10, 20),
#         cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2)
#         cv2.putText(frame, datetime.datetime.now().strftime("%A %d %B %Y %I:%M:%S%p"),
#         (10, frame.shape[0] - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.35, (0, 0, 255), 1)
#     #if text == "Occupied":
#       #  if (timestamp-lastUploaded).second>=2:
#            # motionCounter+=1
#             #if motionCounter>=23:
#
#
#     if cv2.waitKey(1) & 0xFF == ord('q'):
#         break
#
# # When everything done, release the capture
# cap.release()
# cv2.destroyAllWindows()





#   识别
# def gen_display(addr, cno):
#     face_video = 0
#     face_count = 0
#     # fourcc = cv2.VideoWriter_fourcc(*"XVID")  # 定义视频解码器
#     fourcc = cv2.VideoWriter_fourcc('H', '2', '6', '4')
#     # 人脸识别模型，提取128D的特征矢量
#     # face recognition model, the object maps human faces into 128D vectors
#     # Refer this tutorial: http://dlib.net/python/index.html#dlib.face_recognition_model_v1
#     # facerec = dlib.face_recognition_model_v1(
#     #     "D:/pythonProject/vedioplay/dlibfile/dlib_face_recognition_resnet_model_v1.dat")
#     # Initialize Buttons
#     button = Buttons()
#     button.set_object('person')
#     colors = button.colors
#
#     # Opencv DNN
#     net = cv2.dnn.readNet("dnn_model/yolov4-tiny.weights", "dnn_model/yolov4-tiny.cfg")
#     model = cv2.dnn_DetectionModel(net)
#     model.setInputParams(size=(320, 320), scale=1 / 255)
#
#     # Load class lists
#     classes = []
#     with open("dnn_model/classes.txt", "r") as file_object:
#         for class_name in file_object.readlines():
#             class_name = class_name.strip()
#             classes.append(class_name)
#
#     print("Objects list")
#     print(classes)
#
#     # 创建 cv2 摄像头对象
#     # cv2.VideoCapture(0) to use the default camera of PC,
#     # and you can use local video name by use cv2.VideoCapture(filename)
#
#     # cap = cv2.VideoCapture(addr)
#
#     # cap.set(propId, value)
#     # 设置视频参数，propId 设置的视频参数，value 设置的参数值
#     # cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1200)
#     # cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 600)
#
#     while True:
#         # Get frames
#         ret, original_img = cap.read()
#         size = original_img.shape
#         w = size[1]
#         h = size[0]
#         gx = 0
#         gy = 0
#         gw = w
#         gh = h
#         if cno == 1:
#             gx = gol.get_value('xpos1') * w
#             gy = gol.get_value('ypos1') * h
#             gw = gol.get_value('width1') * w
#             gh = gol.get_value('height1') * h
#         elif cno == 2:
#             gx = gol.get_value('xpos2') * w
#             gy = gol.get_value('ypos2') * h
#             gw = gol.get_value('width2') * w
#             gh = gol.get_value('height2') * h
#         elif cno == 3:
#             gx = gol.get_value('xpos3') * w
#             gy = gol.get_value('ypos3') * h
#             gw = gol.get_value('width3') * w
#             gh = gol.get_value('height3') * h
#         elif cno == 4:
#             gx = gol.get_value('xpos4') * w
#             gy = gol.get_value('ypos4') * h
#             gw = gol.get_value('width4') * w
#             gh = gol.get_value('height4') * h
#         gx = int(gx)
#         gy = int(gy)
#         gw = int(gw)
#         gh = int(gh)
#         frame = original_img[gy:(gy + gh), gx:(gx + gw)]
#         cv2.rectangle(original_img, (gx, gy), (gx + gw, gy + gh), (0, 255, 255), 5)
#         # Get active buttons list
#         active_buttons = button.active_buttons_list()
#         # print("Active buttons", active_buttons)
#
#         # Object Detection
#         (class_ids, scores, bboxes) = model.detect(frame, confThreshold=0.3, nmsThreshold=.4)
#         for class_id, score, bbox in zip(class_ids, scores, bboxes):
#             (x, y, w, h) = bbox
#             class_name = classes[class_id]
#             color = colors[class_id]
#
#             if class_name in active_buttons:
#                 face_video = True
#                 cv2.putText(frame, class_name, (x, y - 10), cv2.FONT_HERSHEY_PLAIN, 2, color, 2)
#                 cv2.rectangle(frame, (x, y), (x + w, y + h), color, 5)
#
#         # Display buttons
#         button.display_buttons(frame)
#
#         # cv2.imshow("Frame", frame)
#         key = cv2.waitKey(1)
#         if key == 27:
#             break
#
#         if (face_video == True):
#             if (face_count == 0):
#                 now_time = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
#                 path = 'F:/Python work/IntrusionDetectionSystem/media/videos/' + str(now_time) + '.mp4'
#                 face_video_out = cv2.VideoWriter(path, fourcc, 10, (1000, 600))
#                 ###############################################################################
#                 # 写入数据库
#                 ###############################################################################
#                 video = videos(startTime=now_time, videoName=now_time + '.mp4')
#                 video.save()
#             if (face_count == 100):
#                 face_video = False
#                 face_count = 0
#             else:
#                 # img = frame
#                 img = original_img
#                 img = cv2.resize(img, (1000, 600))  # 一定要记得对获取帧进行尺寸变换，等于定义的写入帧的尺寸
#                 face_video_out.write(img)
#                 face_count += 1
#         else:
#             face_count = 0
#
#         frame = original_img
#         if ret:
#             # 将图片进行解码
#             ret, frame = cv2.imencode('.jpeg', frame)
#             if ret:
#                 # 转换为byte类型的，存储在迭代器中
#                 yield (b'--frame\r\n'
#                        b'Content-Type: image/jpeg\r\n\r\n' + frame.tobytes() + b'\r\n')
















