from __future__ import print_function
from imutils.object_detection import non_max_suppression
from imutils import paths
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import argparse
import time
import pandas as pd
import imutils
import cv2
import pymysql
import json

interval = 1
nextTime = int(time.time())
postUrl = "http://192.168.2.13:82/index.php/home/Api/ReceiveCameraLocation"
def WeightFilter(rects, weights, threshold = 0.7):
	if len(rects) <= 0:
		#print("未检测到人体，", rects)
		return rects
	recW = np.hstack((rects, weights))
	#print(rects, weights)
	dfRecW = pd.DataFrame(recW, columns=('xA', 'yA', 'xB', 'yB', 'weights'))
	dfRecW = dfRecW.sort_values(by='weights', ascending=False)
	pick = np.int32(dfRecW[dfRecW.weights > threshold])
	if pick.size >= 1:
		return tuple(pick[:,:4])
	else :
		return []
def trans(bef, M):
	bef = np.append(bef, [1])#补齐位数，应用变换矩阵
	bef = bef.reshape(3,1)#变换结构 3行1列
	befm = np.matrix(bef)

	#src(x,y),计算dst(x1,y1)的公式：
	#dst(x1,y1) = ((M11x+M12y+M13)/(M31x+M32y+M33),(M21x+M22y+M23)/(M31x+M32y+M33))
	#矩阵运算 M(3*3) befm(3*1)
	aftm = M * befm
	#变换后的坐标
	aftm = (aftm / aftm[2,0])[:2, 0]
	return np.array(aftm).reshape(1,2)
def postData(picks):
	global nextTime,interval,postUrl
	nt = int(time.time())
	if nt > nextTime:
		nextTime += interval
	else:
		return 0 

	print(nt,nextTime)
	if len(picks) <= 0:
		#print("未检测到人体，", rects)
		return picks
	dic = {}
	temp = []
	for xA, yA, xB, yB in picks:
		bef = np.int32([[(xB - xA) / 2 + xA, yB]])
		aft = trans(bef, M)
		#转换后坐标系原点
		#radt = trans(rad, M)
		#坐标系中坐标
		out = np.abs(aft - radt)
		temp += [[out[0,0], out[0,1]]]
	pData = json.dumps(dict(zip(range(0, len(temp)), temp)))
	#print(pData)
	
	response = requests.post(postUrl, data=pData)
	#print(response.text)
	return 1

db = pymysql.connect("192.168.2.13","root","root","ibeacon_kc",use_unicode=True, charset="utf8")
cursor = db.cursor()
sql = "SELECT * FROM tf_blts_camera"

cursor.execute(sql)
# 获取所有记录列表
names = locals()
results = cursor.fetchall()
#cap = range(0,len(results))
db.close()

radt = np.array([[0,0]])

for key in range(0,len(results)):
	row = results[key]
	names['M%s' % key] = cv2.getPerspectiveTransform(np.float32(json.loads(row[5])),np.float32(json.loads(row[6])))
	names['cap%s' % key] = cv2.VideoCapture(row[3])


hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
currentFrame = 0
while True:
    filt = "WF"
    threshold = 0.7
    for key in range(0,len(results)):
        #image = ''
        row = results[key]
        names['ret%s' % key],names['frame%s' % key] = names['cap%s' % key].read()
        image = names['frame%s' % key]
        if(currentFrame % 15 ==0):
            #print("open key "+str(key))
            orig = image.copy() 
            (rects, weights) = hog.detectMultiScale(orig, winStride=(8, 8), padding=(16,16), scale=1.03)
            rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
    			#print("检测到人体：", len(rects))
            if filt == "NMS":
                pick = NMSFilter(rects)
            elif filt == "WF":
            	pick = WeightFilter(rects, weights, threshold)
            else:
	            pick = rects
            temp = []
            if len(pick) > 0: 
	            for (xA, yA, xB, yB) in pick:
	                cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)#对角线画法
	                bef = np.int32([[(xB - xA) / 2 + xA, yB]])
	                aft = trans(bef, names['M%s' % key])
	                out = np.abs(aft - radt)
	                temp += [[out[0,0], out[0,1]]]
				#绘制坐标
	                cv2.circle(image, tuple(bef[0]), 3, (0, 0, 255), -1)
	                cv2.putText(image, '%.2f,%.2f' % (out[0,0], out[0,1]),tuple(bef[0]),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
	            pData = dict(zip([row[0]],zip(range(0, len(temp)), temp)))
	            print(pData)
        cv2.imshow("frame"+str(key),image)
    currentFrame += 1
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
cv2.waitKey(0)




