# -*- coding: UTF-8 -*-
import cv2
import time
import os
import datetime
import numpy as np
import tools.cameraTools as tools
from PIL import Image

# 人脸识别分类器
# faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
faceCascade = cv2.CascadeClassifier('haarcascade_frontalface_alt.xml')
font = cv2.FONT_HERSHEY_SIMPLEX


class Camera(object):

    def __init__(self):
        pass

    # 采集人脸数据并进行训练
    def InterceptFace(self, face_id=None, face_name=None):
        cap = cv2.VideoCapture(0)
        count = 0
        while True:
            # 读取摄像头中的图像，ok为是否读取成功的判断参数
            ok, img = cap.read()
            img, faces = tools.GetFace(img)
            cv2.imshow('face', img)
            cv2.waitKey(1)
            if face_id is not None and faces != ():
                for (x, y, w, h) in faces:
                    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
                    cv2.imwrite("face_data/User-" + str(face_id) + '-' + str(count) + '.jpg', gray[y: y + h, x: x + w])
                    count += 1
                    print(count)
            if count > 1000:
                break

        cap.release()
        cv2.destroyAllWindows()
        self.TrainFace()

    # 训练人脸识别模型
    @staticmethod
    def TrainFace():
        recognizer = cv2.face.LBPHFaceRecognizer_create()
        detector = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
        path = 'face_data'

        # 获取本地采集的训练图片
        def getImagesAndLabels(path):
            imagePaths = [os.path.join(path, f) for f in os.listdir(path)]
            faceSamples = []
            ids = []
            for imagePath in imagePaths:
                PIL_img = Image.open(imagePath).convert('L')  # convert it to grayscale
                img_numpy = np.array(PIL_img, 'uint8')
                id = int(os.path.split(imagePath)[-1].split("-")[1])
                faces = detector.detectMultiScale(img_numpy)
                for (x, y, w, h) in faces:
                    faceSamples.append(img_numpy[y:y + h, x: x + w])
                    ids.append(id)
            return faceSamples, ids

        print('Training faces. It will take a few seconds. Wait ...')
        faces, ids = getImagesAndLabels(path)
        recognizer.train(faces, np.array(ids))

        recognizer.write(r'trainer.yml')
        print('Train Done')

    @staticmethod
    def GetPicture(rec_queue, img_queue, result_queue):
        cap = cv2.VideoCapture(0)
        n = 0
        while True:
            state, img = cap.read()
            temp_img = img.copy()
            now_time = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
            if n == 0:
                img_queue.put(temp_img)
                rec_queue.put(((), None, temp_img))
                n += 1
            if not result_queue.empty():
                faces, cnts, shape, move_time, r_img = result_queue.get(False)
                img_queue.put(temp_img)
                if rec_queue.empty():
                    rec_queue.put((faces, move_time, temp_img))
                r_img = cv2.putText(r_img, now_time, (0, 10), font, 0.4, (255, 255, 255), 1)
                cv2.imshow('Processing', r_img)
            elif rec_queue.empty():
                rec_queue.put(((), None, temp_img))

            show_img = temp_img.copy()
            show_img = cv2.putText(show_img, now_time, (0, 10), font, 0.4, (255, 255, 255), 1)
            cv2.imshow('now', show_img)

            k = cv2.waitKey(1)
            if k == 27:
                break
        cap.release()
        cv2.destroyAllWindows()

    @staticmethod
    def ProcessingPictures(img_queue, result_queue):
        background_img = None
        while True:
            img = img_queue.get()
            temp_img = img.copy()

            faces = tools.GetFace(img)

            if background_img is None:
                background_img = img

            cnts, shape, move_time = tools.GetMove(background_img, img)

            background_img = temp_img

            if cnts:
                cv2.drawContours(img, cnts, -1, (0, 0, 255), 2)
            if len(faces) != 0:
                img = tools.DrawRectangel(img, faces)

            data = (faces, cnts, shape, move_time, img)
            result_queue.put(data)

    @staticmethod
    def RecordVideo(rec_queue):
        frame_num = 0
        start_time = 0
        fourcc = cv2.VideoWriter_fourcc(*"DIVX")
        out = None
        file_path = ''
        file_name = ''
        faces_max = 0
        finish = False

        while True:
            now_time = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
            faces, move_time, img = rec_queue.get()
            faces_n = len(faces)
            if move_time:
                start_time = time.time()

            if faces_n != 0:
                faces_max = max(faces_n, faces_max)

            end_time = time.time()
            last_time = int(end_time - start_time)

            if last_time < 5:
                frame_num += 1
                if frame_num == 1 and move_time is not None:
                    print(f'get someone move at {move_time}')
                    file_path = f'./Vedio/{now_time}.avi'
                    file_name = file_path[file_path.rfind(r'/')+1:]
                    out = cv2.VideoWriter(file_path, fourcc, 25, (640, 480))
                elif frame_num > 1:
                    img = cv2.putText(img, now_time, (0, 10), font, 0.4, (255, 255, 255), 1)
                    out.write(img)
            elif frame_num < 100 and out is not None:
                print(f'too short! remove{file_name}')
                os.remove(file_path)
                finish = True
            elif out is not None:
                new_name = f'./Vedio/({faces_max})faces-({frame_num})-{file_name}'
                os.rename(file_path, new_name)
                print(new_name[new_name.rfind('/')+1:], 'Save Successfully!')
                finish = True
            else:
                finish = True
            # 释放内存
            if finish:
                if out is not None:
                    out.release()
                    out = None
                frame_num = 0
                faces_max = 0
                finish = False


    @staticmethod
    def SaveVideo(save_queue):
        while True:
            now_time = datetime.datetime.now().strftime('%Y-%m-%d %H-%M-%S')
            frame_group = save_queue.get()
            print('start re')
            fourcc = cv2.VideoWriter_fourcc(*"DIVX")
            print(f'start recording... ({len(frame_group)})')
            out = cv2.VideoWriter(f'./Vedio/{len(frame_group)}-{now_time}.avi', fourcc, 25, (640, 480))
            n = 0
            while n < len(frame_group):
                out.write(frame_group[n])
                n += 1
            print(f'{now_time}.avi save!')
            frame_group.clear()
            out.release()