import os
import cv2
import re
import sys
from PIL import Image
import numpy as np
import urllib
import urllib.request
import hashlib
from flask import Flask, render_template, jsonify,make_response,json,request
from flask_cors import *
app= Flask(__name__)
app.config['JSON_AS_ASCII']=False

CORS(app, supports_credentials=True)     # 解决跨域

def getImageAndLabels(path):
    facesSamples=[]
    ids=[]
    imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
    #检测人脸
    face_detector = cv2.CascadeClassifier('./model/haarcascade_frontalface_default.xml')
    #face_detector = cv2.CascadeClassifier('D:/opencv-4.5.5/data/haarcascades/haarcascade_frontalface_default.xml')
    #打印数组imagePaths
    print('数据排列：',imagePaths)
    #遍历列表中的图片
    for imagePath in imagePaths:
        #打开图片,黑白化
        PIL_img=Image.open(imagePath).convert('L')
        #将图像转换为数组，以黑白深浅
       # PIL_img = cv2.resize(PIL_img, dsize=(400, 400))
        img_numpy=np.array(PIL_img,'uint8')
        #获取图片人脸特征
        faces = face_detector.detectMultiScale(img_numpy)
        #获取每张图片的id和姓名
        id = int(os.path.split(imagePath)[1].split('.')[0])
        #预防无面容照片
        for x,y,w,h in faces:
            ids.append(id)
            facesSamples.append(img_numpy[y:y+h,x:x+w])
        #打印脸部特征和id
        #print('fs:', facesSamples)
        print('id:', id)
        #print('fs:', facesSamples[id])
    print('fs:', facesSamples)
    #print('脸部例子：',facesSamples[0])
    #print('身份信息：',ids[0])
    return facesSamples,ids

@app.route('/index')
def getCheck():
    #图片路径
    path='./data/photos/'
    #获取图像数组和id标签数组和姓名
    faces,ids=getImageAndLabels(path)
    #获取训练对象
    recognizer=cv2.face.LBPHFaceRecognizer_create()
    #recognizer.train(faces,names)#np.array(ids)
    recognizer.train(faces,np.array(ids))
    #保存文件
    recognizer.write('trainer/trainer.yml')
    #save_to_file('names.txt',names)
    return "successTrained"

recogizer=cv2.face.LBPHFaceRecognizer_create()
names=[]
warningtime = 0
nameId=""
numId = 0
def md5(str):
    import hashlib
    m = hashlib.md5()
    m.update(str.encode("utf8"))
    return m.hexdigest()

statusStr = {
    '0': '短信发送成功',
    '-1': '参数不全',
    '-2': '服务器空间不支持,请确认支持curl或者fsocket,联系您的空间商解决或者更换空间',
    '30': '密码错误',
    '40': '账号不存在',
    '41': '余额不足',
    '42': '账户已过期',
    '43': 'IP地址限制',
    '50': '内容含有敏感词'
}

def warning():
    return "warn!"

#准备识别的图片
def face_detect_demo(img):
    gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)#转换为灰度
    face_detector=cv2.CascadeClassifier('./model/haarcascade_frontalface_default.xml')
    #face_detector=cv2.CascadeClassifier('D:/opencv-4.5.5/data/haarcascades/haarcascade_frontalface_default.xml')
    face=face_detector.detectMultiScale(gray)
    #face=face_detector.detectMultiScale(gray)
    for x,y,w,h in face:
        cv2.rectangle(img,(x,y),(x+w,y+h),color=(0,0,255),thickness=2)
        cv2.circle(img,center=(x+w//2,y+h//2),radius=w//2,color=(0,255,0),thickness=1)
        # 人脸识别
        ids, confidence = recogizer.predict(gray[y:y + h, x:x + w])
        print('标签id:',ids,'置信评分：', confidence)
        print(names)
        if confidence > 90:
            global warningtime
            warningtime += 1
            if warningtime > 100:
               warning()
               warningtime = 0
            # cv2.putText(img, 'unkonw', (x + 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 1)
            print("it's unknow!!!")
        else:
            print("predict id: "+str(ids))
            print("verify id: "+nameId)
            # cv2.putText(img,str(names[ids-1]), (x + 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 255, 0), 1)
            if(str(ids)==nameId):
                return True
    return False
            #print(str(names[ids-1])+"!")
    #在linux中下面的inshow是不必要的，因为不需要在视频中打标签，而且加了会报错因为放不了视频
    #cv2.imshow('result',img)
    #print('bug:',ids)

def name():
    path = './data/photos/'
    #names = []
    imagePaths=[os.path.join(path,f) for f in os.listdir(path)]
    for imagePath in imagePaths:
       name = str(os.path.split(imagePath)[1].split('.',2)[0])
       names.append(name)

@app.route('/verifyFace',methods=['post'])
def verifyFace():
    global recogizer
    global names
    global warningtime
    global nameId
    global numId

    nameId = request.values.to_dict().get('id')
    video = request.files.get("video")
    video_byte = video.stream.read()
    videoPath = "./videos/"
    with open(os.path.join(videoPath,nameId+video.filename[-4:]),'wb') as f:
        f.write(video_byte)

    recogizer=cv2.face.LBPHFaceRecognizer_create()
    recogizer.read('trainer/trainer.yml')
    names=[]
    warningtime = 0
    cap=cv2.VideoCapture("videos/"+nameId+".mp4")
    name()
    judgeFace=False
    nowTime=0
    while True:
        nowTime+=1
        flag,frame=cap.read()
        if not flag:
            break
        if(face_detect_demo(frame)==True):
            judgeFace=True
            break
        if ord(' ') == cv2.waitKey(10):
            break
        #print(nowTime)
        if (nowTime>60):
            break
    cv2.destroyAllWindows()
    cap.release()
    print(str(judgeFace))
    if(judgeFace==True):
        return "1"
    else:
        return "0"


@app.route('/saveImg',methods=['post'])
def savaImg():
    id = request.values.to_dict().get('id')
    image = request.files.get("image")
    image_byte = image.stream.read()
    imagePath = "./data/photos/"
    with open(os.path.join(imagePath,id+image.filename[-4:]),'wb') as f:
        f.write(image_byte)
    return getCheck()
