from __future__ import print_function
from imutils.object_detection import non_max_suppression
from imutils import paths
from concurrent.futures import ProcessPoolExecutor
import numpy as np
import argparse
import pandas as pd
import imutils
import cv2
import requests
import json
import time

#cap = cv2.VideoCapture("rtsp://admin:admin12345@192.168.1.230:554/h264/ch43/sub/av_stream")
cap = cv2.VideoCapture("rtsp://admin:admin123@192.168.1.164:554/h264/ch38/sub/av_stream")

print (cap.isOpened())
postUrl = "http://192.168.2.13:82/index.php/home/Api/ReceiveCameraLocation"

hog = cv2.HOGDescriptor()
hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
#基本坐标系构建，计算转换因子M

#场景1
# pts1 = np.float32([[200,144],[243,156],[180,177],[229,192]])
# pts2 = np.float32([[228,149],[228,179],[198,149],[198,179]])
#rad = np.float32([[162,52]])#坐标系原点坐标
#正本展厅
pts1 = np.float32([[106.7,185],[168.85,206],[63.25,230],[129.25,261]])
pts2 = np.float32([[330,164],[398,155],[328,75],[395,90]])
radt = np.array([[0,0]])

M = cv2.getPerspectiveTransform(pts1,pts2)

def humDet(filt = "NMS", threshold = 0):
	while cap.isOpened():
		ret,frame = cap.read()
		image = frame
		#用灰度图检测，加快检测速度
		orig = image.copy()
		orig = cv2.cvtColor(orig, cv2.COLOR_BGR2GRAY)
		rects, weights = hog.detectMultiScale(orig, winStride=(4, 4), padding=(8, 8), scale=1.25)
		#print(rects, weights)
		# print("检测到人体：", len(rects))
		# print(rects)
		rects = np.array([[x, y, x + w, y + h] for (x, y, w, h) in rects])
		weightWF = ()
		if filt == "NMS":
			pick = NMSFilter(rects)
		elif filt == "WF":
			pick = WeightFilter(rects, weights, threshold)
		else:
			pick = rects

		#发送数据
		#time.sleep(1)
		postData(pick)
		#print(pick)

		for (xA, yA, xB, yB) in pick:
			cv2.rectangle(image, (xA, yA), (xB, yB), (0, 255, 0), 2)#对角线画法
			bef = np.int32([[(xB - xA) / 2 + xA, yB]])
			aft = trans(bef, M)

			#print(aft)

			#转换后坐标系原点
			#radt = trans(rad, M)
			#坐标系中坐标
			out = np.abs(aft - radt)
			#绘制坐标
			cv2.circle(image, tuple(bef[0]), 3, (0, 0, 255), -1)
			cv2.putText(image, '%.2f,%.2f' % (out[0,0], out[0,1]),tuple(bef[0]),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),1)
		cv2.imshow("humanDetect" + filt,image)
		cv2.waitKey(1)

def realtimeVideo():
	while cap.isOpened():
		ret,frame = cap.read()
		cv2.imshow("video",frame)
		cv2.waitKey(1)

def NMSFilter(rects):
	#修正多余的画框
	pick = non_max_suppression(rects, probs=None, overlapThresh=0.65)
	return pick

def WeightFilter(rects, weights, threshold = 0.7):
	if len(rects) <= 0:
		#print("未检测到人体，", rects)
		return rects
	recW = np.hstack((rects, weights))
	dfRecW = pd.DataFrame(recW, columns=('xA', 'yA', 'xB', 'yB', 'weights'))
	dfRecW = dfRecW.sort_values(by='weights', ascending=False)
	pick = np.array(dfRecW[dfRecW.weights > threshold])
	#print(pick)
	if pick.size >= 1:
		return tuple(np.int32(pick[:,:4]))
	else :
		return []

def trans(bef, M):
	bef = np.append(bef, [1])#补齐位数，应用变换矩阵
	bef = bef.reshape(3,1)#变换结构 3行1列
	befm = np.matrix(bef)

	#src(x,y),计算dst(x1,y1)的公式：
	#dst(x1,y1) = ((M11x+M12y+M13)/(M31x+M32y+M33),(M21x+M22y+M23)/(M31x+M32y+M33))
	#矩阵运算 M(3*3) befm(3*1)
	aftm = M * befm
	#变换后的坐标
	aftm = (aftm / aftm[2,0])[:2, 0]
	return np.array(aftm).reshape(1,2)

def postData(picks):
	global nextTime,interval
	nt = int(time.time())
	if nt > nextTime:
		nextTime += interval
	else:
		return 0 

	#print(nt,nextTime)
	if len(picks) <= 0:
		#print("未检测到人体，", rects)
		return picks
	dic = {}
	temp = []
	for xA, yA, xB, yB in picks:
		bef = np.int32([[(xB - xA) / 2 + xA, yB]])
		aft = trans(bef, M)
		#转换后坐标系原点
		#radt = trans(rad, M)
		#坐标系中坐标
		out = np.abs(aft - radt)
		temp += [[out[0,0], out[0,1]]]
	pData = json.dumps(dict(zip([1],zip(range(0, len(temp)), temp))))
	print(pData)
	
	#response = requests.post(postUrl, data=pData)
	#print(response.text)
	return 1


if __name__ == '__main__':
	interval = 1
	nextTime = int(time.time())
	 #p = ProcessPoolExecutor(2)
	# #p.submit(humDet, "NMS")
	 #p.submit(humDet, "WF", 0.9)
	 #p.submit(realtimeVideo)
	humDet("WF", 0.9)

