# -*-coding:utf-8 -*-
import tkinter as tk
from MusicPlay import MusicPlay
from tkinter import StringVar
from PIL import Image,ImageTk
import cv2
import threading
import argparse
import time
import numpy as np 
import random
import os
from blueData import *
from blueAPI import blueAPI
from blueRunner import *
from GeneralKit import *
from selfRunner import startImageTrain
import copy
from tkinter.filedialog import askdirectory
import pickle
import tkinter.messagebox as msg
from keras.models import load_model

defaultModName = "mod.h5"
defaultModName2 = "blueMod.h5"
global model
global model2
gestDict = get_dict()
global conf

def InitialConfFile():
	global conf
	#初始化配置文件
	if not os.path.exists("configure"):
		f = open("configure", "wb")
		f.seek(3)
		for i in range(len(gestDict)):
			conf[i] = [0,0]
		pickle.dump(conf, f)
		f.close()
	else:
		f = open("configure", "rb")
		conf = pickle.load(f)
		f.close()

def load_current_model():
	global model
	global model2
	#载入图像数据模型
	if os.path.exists(defaultModName):
		model = load_model(defaultModName)
		model.predict_classes(np.zeros((1, 24, 24, 1)))
		print("[INFO] 图像学习模型加载完成")
	else:
		model = None
	#载入传感数据模型
	if os.path.exists(defaultModName2):
		model2 = load_model(defaultModName2)
		model2.predict_classes(np.zeros((1, 1, 3)))
		print("[INFO] 传感学习模型加载完成")
	else:
		model2 = None

load_current_model()
InitialConfFile()

def staticImage(img):
	frame = copy.deepcopy(img)
	#获取右侧的图像
	frame = frame[:,:frame.shape[1]//2,:]
	#滤镜
	blur = cv2.blur(frame,(3,3))
	#转换为HSV色彩空间
	hsv = cv2.cvtColor(blur,cv2.COLOR_BGR2HSV)
	#肤色的范围
	mask2 = cv2.inRange(hsv,np.array([2,50,50]),np.array([15,255,255]))
	#形态学处理
	kernel_square = np.ones((11,11),np.uint8)
	kernel_ellipse= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
	dilation = cv2.dilate(mask2,kernel_ellipse,iterations = 1)
	erosion = cv2.erode(dilation,kernel_square,iterations = 1)
	dilation2 = cv2.dilate(erosion,kernel_ellipse,iterations = 1)
	filtered = cv2.medianBlur(dilation2,5)
	kernel_ellipse= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(8,8))
	dilation2 = cv2.dilate(filtered,kernel_ellipse,iterations = 1)
	kernel_ellipse= cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))
	dilation3 = cv2.dilate(dilation2,kernel_ellipse,iterations = 1)
	median = cv2.medianBlur(dilation3,5)

	ret,thresh = cv2.threshold(median,127,255,0)
    #寻找边缘
	image, contours, hierarchy = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
	max_area=100
	ci=0
	for i in range(len(contours)):
		cnt=contours[i]
		area = cv2.contourArea(cnt)
		if(area>max_area):
			max_area=area
			ci=i
    #最大边缘
	if len(contours)==0:
		cv2.destroyAllWindows()
		return None
	else:                  
		cnts = contours[ci]

	x,y,w,h = cv2.boundingRect(cnts)
	for i in range(y,y+h):
		for j in range(x,x+w):
			if mask2[i][j]==0:
				frame[i,j,:]=0
	frame = frame[y:y+h,x:x+w]
	return (frame, (x,y), (x+w,y+h))

def preProcessImage(img, size):
	image = copy.deepcopy(img)
	frame = cv2.resize(image, size, interpolation=cv2.INTER_CUBIC)
	image = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)	#转换图像色彩模式以numpy_ndarray形式储存
	image = Image.fromarray(image).transpose(0)	#对图像进行水平反转（0为水平反转，1为垂直反转）
	image = ImageTk.PhotoImage(image)	#将RGB模式的IMAGE对象转换成图形
	return image

def show_panel_image(panel, canv, img, place):
	if panel is None:			#创建并打包显示图片的容器
		panel = tk.Label(canv,image=img)
		panel.image = img
		panel.pack(side=place,padx=5,pady=5)
	else:
		panel.configure(image=img, highlightthickness=5, borderwidth=5)
		panel.image = img
	return panel

class App:
	def __init__(self, root):
		#初始化手势采集机制
		self.voicePlay = MusicPlay()
		self.blueStatusVar = StringVar() #蓝牙状态变量
		self.blueStatusVar.set("未连接")
		self.valRes = StringVar()		#识别结果变量
		self.valRes.set("无")
		self.gest = StringVar()		#手势编号显示变量
		self.index =StringVar()		#索引变量
		self.imgPath = StringVar()		#图像路径变量
		self.bluePath = StringVar()		#蓝牙路径变量
		self.imgPath.set("new_data2")
		self.bluePath.set("blue_data")

		self.valFlag = False	#开始识别标志位
		self.validFlag = False	#输入的姿势编号有效标志
		self.staticFlag = False	#开始收集数据标志位
		self.confirmFlag = False	#开始验证标志位
		self.ValidateFlag = [False,False]
		self.stopEvent = None	#窗口关闭事件
		self.flag = True	#窗体是否还处于工作状态
		self.thread = None	#图像显示函数以子线程的方式运行
		self.isValidIndex = False	#验证输入框的索引内容是否可用
		self.vs = None	
		self.blue = blueAPI()
#绘制界面
		self.root = root
		self.panel = None	#画布空间内层放置图像的容器
		self.minipanelA = None
		self.minipanelB = None
		self.root.title('珞樱V1.0')
		self.root.resizable(False, False)	
		#self.root.iconbitmap('logo.ico')
		self.mainframe = tk.LabelFrame(self.root, text='数据构建')
		self.mainframe.pack(padx=5, pady=10)
		#图像帧容器
		self.videoFrame = tk.LabelFrame(self.mainframe)
		self.videoFrame.pack(side=tk.LEFT, padx=5, pady=5)
		self.miniFrame = tk.LabelFrame(self.mainframe, text='缩略图')
		self.miniFrame.pack(side=tk.LEFT, padx=5, pady=5)

		self.frame = tk.LabelFrame(self.mainframe, text='状态')
		self.frame.pack(side=tk.RIGHT, padx=5, pady=5)

		self.canv = tk.Canvas(self.videoFrame)
		self.canv.configure(background='black', relief='ridge', width=600, height=450, highlightthickness=5, borderwidth=5)
		self.canv.pack(side=tk.LEFT, padx=5, pady=5)
		self.miniCanvA = tk.Canvas(self.miniFrame)
		self.miniCanvA.configure(background='black', relief='ridge', width=240, height=240, highlightthickness=2, borderwidth=2)
		self.miniCanvA.pack(side=tk.TOP, padx=5, pady=5)
		self.miniCanvB = tk.Canvas(self.miniFrame)
		self.miniCanvB.configure(background='black', relief='ridge', width=240, height=240, highlightthickness=2, borderwidth=2)
		self.miniCanvB.pack(side=tk.TOP, padx=5, pady=5)
		#img = self.show_Image(source)
		#self.canv.create_image(0, 0, image=img)
		
		#姿势编号子控件窗口
		self.labelGestFrame = tk.Frame(self.frame)
		self.labelGestFrame.pack(side=tk.TOP, padx=5, pady=2)
		self.labelGest = tk.Label(self.labelGestFrame, text='姿势编号:')
		self.labelGest.pack(side=tk.LEFT, padx=5, pady=2)
		self.labelGestEntry = tk.Entry(self.labelGestFrame, width=10, textvariable=self.gest)
		self.labelGestEntry.pack(side=tk.LEFT, padx=5, pady=2)
		self.labelGestEntry.bind('<FocusOut>', self.refreshLabel)
		#当前姿势显示子控件
		self.labelGestShowFrame = tk.Frame(self.frame)
		self.labelGestShowFrame.pack(side=tk.TOP, padx=5, pady=2)
		self.labelGestShow = tk.Label(self.labelGestShowFrame, text='当前姿势为:')
		self.labelGestShow.pack(side=tk.LEFT, pady=2)
		self.labelGestShowInfo = tk.Label(self.labelGestShowFrame, text='')
		self.labelGestShowInfo.pack(side=tk.LEFT, padx=15, pady=5)
		#蓝牙链接状态子控件
		self.bluetoothFrame = tk.Frame(self.frame)
		self.bluetoothFrame.pack(side=tk.TOP, padx=5, pady=3)
		self.bluetoothLabel = tk.Label(self.bluetoothFrame, text='蓝牙连接状态:')
		self.bluetoothLabel.pack(side=tk.LEFT, padx=5, pady=2)
		self.bluetoothStatus = tk.Label(self.bluetoothFrame, textvariable=self.blueStatusVar)
		self.bluetoothStatus.pack(side=tk.LEFT, padx=5, pady=2)
		self.bTConnectButton = tk.Button(self.bluetoothFrame, text="连接蓝牙")
		self.bTConnectButton.bind('<ButtonRelease-1>', self.blueToothConnect)
		self.bTConnectButton.pack(side=tk.LEFT, padx=5, pady=2)
		#按钮区域子控件
		self.buttonFrame = tk.Frame(self.frame)
		self.buttonFrame.pack(side=tk.BOTTOM, padx=5, pady=2)
		#存储路径
		self.pathFrame = tk.LabelFrame(self.frame)
		self.pathFrame.pack(side=tk.TOP, padx=5, pady=5)
		self.pathLabel = tk.Label(self.pathFrame, text='图像存储路径:')
		self.pathLabel.pack(side=tk.LEFT, padx=5, pady=5)
		self.pathEntry = tk.Entry(self.pathFrame, width=25, textvariable=self.imgPath, state='readonly')
		self.pathEntry.pack(side=tk.LEFT, padx=5, pady=5)
		self.pathDir = tk.Button(self.pathFrame, text='选择路径', command=self.openImgDir)
		self.pathDir.pack(side=tk.LEFT, padx=5, pady=5)

		self.pathbFrame = tk.LabelFrame(self.frame)
		self.pathbFrame.pack(side=tk.TOP, padx=5, pady=5)
		self.pathbLabel = tk.Label(self.pathbFrame, text='蓝牙存储路径:')
		self.pathbLabel.pack(side=tk.LEFT, padx=5, pady=5)
		self.pathbEntry = tk.Entry(self.pathbFrame, width=25, textvariable=self.bluePath, state='readonly')
		self.pathbEntry.pack(side=tk.LEFT, padx=5, pady=5)
		self.pathbDir = tk.Button(self.pathbFrame, text='选择路径', command=self.openBlueDir)
		self.pathbDir.pack(side=tk.LEFT, padx=5, pady=5)

		#收集按钮
		self.collectFrame = tk.LabelFrame(self.frame)
		self.collectFrame.pack(side=tk.TOP, padx=5, pady=2)
		self.selFrame = tk.Frame(self.collectFrame)
		self.selFrame.pack(side=tk.LEFT)
		self.v = tk.IntVar()
		self.selection1 = tk.Radiobutton(self.selFrame, text="图像数据", variable=self.v, value=1)
		self.selection1.pack(side=tk.TOP)
		self.selection2 = tk.Radiobutton(self.selFrame, text="蓝牙数据", variable=self.v, value=2)
		self.selection2.pack(side=tk.TOP)
		self.startButton = tk.Button(self.collectFrame, text='开始采集', command=self.start_col)
		self.startButton.pack(side=tk.LEFT, padx=5, pady=2)
		self.stopButton = tk.Button(self.collectFrame, text='停止采集', command=self.stop_col)
		self.stopButton.pack(side=tk.RIGHT, padx=5, pady=2)
		#开始训练按钮
		self.TrainFrame = tk.LabelFrame(self.buttonFrame)
		self.TrainFrame.pack(side=tk.TOP, padx=5, pady=2)
		self.CB1 = tk.IntVar()
		self.CB2 = tk.IntVar()
		self.CBFrame = tk.Frame(self.TrainFrame)
		self.CBFrame.pack(side=tk.LEFT)
		self.TrainCB1 = tk.Checkbutton(self.CBFrame, text="图像数据", variable=self.CB1)
		self.TrainCB1.pack(side=tk.TOP)
		self.TrainCB2 = tk.Checkbutton(self.CBFrame, text="蓝牙数据", variable=self.CB2)
		self.TrainCB2.pack(side=tk.TOP)
		self.trainButton = tk.Button(self.TrainFrame, text='训练数据', command=self.startDataTrain)
		self.trainButton.pack(side=tk.LEFT, padx=5, pady=2)
		#验证模块
		self.confirmFrame = tk.LabelFrame(self.buttonFrame)
		self.confirmFrame.pack(side=tk.TOP, padx=5, pady=2)
		
		self.confirmSeq = tk.Frame(self.confirmFrame)
		self.confirmSeq.pack(side=tk.TOP, padx=5, pady=2)
		self.confirmText = tk.Label(self.confirmSeq, text='自我验证编号')
		self.confirmText.pack(side=tk.LEFT, padx=5, pady=2)
		self.confirmEntry = tk.Entry(self.confirmSeq, width=10)
		self.confirmEntry.pack(side=tk.RIGHT, padx=5, pady=2)
		self.confirmEntry.bind('<Leave>', self.validate)
		self.startConfirm = tk.Button(self.confirmFrame, text='开始验证', command=self.startPreConfirm)
		self.startConfirm.pack(side=tk.TOP, padx=5,pady=2)
		
		self.confirmResult = tk.Frame(self.confirmFrame)
		self.confirmResult.pack(side=tk.TOP, padx=5, pady=2)
		self.ResultText = tk.Label(self.confirmResult, text='识别结果')
		self.ResultText.pack(side=tk.LEFT, padx=5, pady=2)
		self.ResultStatus = tk.Label(self.confirmResult, textvariable=self.valRes)
		self.ResultStatus.pack(side=tk.RIGHT, padx=5, pady=2)
		#识别按钮
		self.identify = tk.Button(self.confirmFrame, text='开始识别', command=self.startIdentify)
		self.identify.pack(side=tk.BOTTOM, padx=5, pady=2)
		
		#线程初始化
		self.blueToothSignal = threading.Event()
		self.data_train = threading.Event()
		#为图像显示函数创建线程	
		self.thread = threading.Thread(target=self.videoloop,args=())		
		self.thread.setDaemon(True)
		self.thread.start()	
		#蓝牙连接监测线程
		TBlue = threading.Thread(target=self.blueToothMonitor, args=())
		TBlue.start()

		self.root.wm_title("珞樱V1.0")	
		self.root.wm_protocol('WM_DELETE_WINDOW', self.onClose)	#捕获窗口关闭行为，以ONCLOSE函数代替

	def validate(self,Event):
		key = self.confirmEntry.get()
		if key.isdigit():
			key = int(key)
			if key < len(gestDict):
				self.isValidIndex = True
				return True
			else:
				self.isValidIndex = False
		else:
			self.isValidIndex = False

	#输入框失去焦点时更新姿势编号
	def refreshLabel(self, Event):
		if self.gest.get():
			key = self.gest.get()
			if key.isdigit():
				key = int(key)
				if key < len(gestDict):
					self.validFlag = True
					gest = gestDict[key].strip('\n')
					self.labelGestShowInfo.config(text=gest)
				else:
					self.validFlag = False

	#当蓝牙连接按钮发生点击事件时，线程同步信号置位，蓝牙开始连接
	def blueToothConnect(self, Event):
		self.blueToothSignal.set()

	#打开窗口后尝试连接蓝牙
	def blueToothMonitor(self):
		while self.flag :
			if self.blue.status == False:
				self.bTConnectButton.enable = False
				self.blueToothSignal.clear()
				self.blueToothSignal.wait()
				print("[INFO] BlueTooth Connect:尝试连接蓝牙...")
				self.blue.connect()
				try:
					if(self.blue.sock.recv(35)):
						self.blueStatusVar.set("已连接")
						self.blue.status = True
						break
					else:
						self.blueStatusVar.set("未连接")
						self.blue.status = False
				except IOError as e:
					print("[INFO] BlueTooth Data Receive:接收数据失败")
					self.blueToothMonitor()
					self.bTConnectButton.enable = True
		if(self.blue.status):
				t = threading.Thread(target=self.blue.keepConnect, args=())
				t.start()

	#获取摄像头图像，刷新视频窗口显示

	def videoloop(self):					#视频显示函数
		global model
		global model2
		global conf
		print("[INFO] warming up WebCamera...")
		#self.vs = VideoStream(0).start()		#从网络摄像头捕获视频流
		self.vs = cv2.VideoCapture(0)
		time.sleep(1.0)					#设定摄像头启动时间
		#self.blue = blueAPI()
		#self.blue.connect()
		#if self.blue.isconnect():
		#	self.bluetoothStatus.configure(text="已连接")
		gestStr = None
		index = None
		try:
			while self.flag:
				if self.validFlag and index == None:
					gestStr = self.gest.get()
					index = conf[int(gestStr)][0]
				time.sleep(0.04)
				gestStr = self.gest.get()
				ret, self.frame = self.vs.read()	#读取网络摄像头中的帧数据
				#收集图像数据
				if self.validFlag == True and self.staticFlag == True and index != None and self.frame.all() != None:
					res = staticImage(self.frame)
					miniFrame = None
					a = None
					b = None
					if res != None:
						miniFrameA, a, b = staticImage(self.frame)
					else:
						continue
					try:
						savingData = imageProcess(staticImage(self.frame)[0], (24, 24))
					except TypeError as e:
						print("[INFO] 未检测到图像边缘，请确认摄像头位置！")
					miniFrameA = preProcessImage(miniFrameA, (240,240))

					miniFrameB = staticImage(self.frame)[0]
					ret, miniFrameB = cv2.threshold(miniFrameB, 30, 255, 0)
					miniFrameB = preProcessImage(miniFrameB, (240,240))

					self.minipanelA = show_panel_image(self.minipanelA, self.miniCanvA, miniFrameA, "top")
					self.minipanelB = show_panel_image(self.minipanelB, self.miniCanvB, miniFrameB, "top")

					cv2.rectangle(self.frame, a, b, (0,0,255), 2)
					cv2.imwrite(self.imgPath.get() + "/" + gestStr + "_" + str(index) + ".jpg", savingData)
					index += 1
					if index % 25 == 0:
						conf[int(gestStr)][0] = index
						f = open("configure", "wb")
						pickle.dump(conf, f)
						f.close()
						print("[INFO] Image Data Collect:图像数据已存档!")
				#手机图像进行识别
				elif self.valFlag:
					self.valFlag = False
					try:
						savingData = imageProcess(staticImage(self.frame)[0], (24, 24))
					except TypeError as e:
						print("[INFO] 未检测到图像边缘，请确认摄像头位置！")
						continue
					data = np.reshape(savingData, (24, 24, 1))
					res = []
					res.append(data)
					res = np.array(res)
					print(model.predict_classes(res))
					output = gestDict[model.predict_classes(res)[0]].strip('\n')
					#self.voicePlay.SelectFile(output)
					#self.voicePlay.play()
					self.valRes.set(output)
				elif self.confirmFlag and self.isValidIndex:
					sourcedir = self.imgPath.get()
					Index = int(self.confirmEntry.get())
					templeImage = []

					try:
	 					image = imageProcess(staticImage(self.frame)[0], (24, 24))
					except TypeError as e:
						print("\r [INFO] 未检测到图像边缘，请确认摄像头位置！",end="")
						continue
					data = imageProcess(self.frame, (24, 24))
					data = np.reshape(data, (24, 24, 1))
					templeImage.append(data)
					if self.ValidateFlag[1] == True:
						temple = np.array(templeImage)
						res = model.predict_classes(temple)[0]
						if res != Index:
							print("MMP, 开始自我增殖")
							name = str(Index) + "_e_" + str(time.time()).replace(".", "") + ".jpg"
							data = imageProcess(self.frame, (24, 24))
							cv2.imwrite(sourcedir + name, data)
							augument(data, str(Index), sourcedir, 20)
							#startImageTrain(sourcedir, defaultModName)
							#model = load_model(defaultModName)
						else:
							print ("当前收集的手语姿势为", gestDict[res])
						self.confirmFlag = False

				image = preProcessImage(self.frame, (600,450))
				self.panel = show_panel_image(self.panel, self.canv, image, "left")

		except RuntimeError as e:
			print("[INFO] caught a RuntimeError!")
			self.vs.stop()
		print("[INFO] WebCamera has been shut down!")

	def startPreConfirm(self):
		if isValidIndex == False:
			msg.showwarning("[INFO] 请输入有效的姿势编号！")
		else:
			self.confirmFlag = True
			t = threading.Thread(target=stop,args=(self.ValidateFlag,1,))
			t.start()


	#开始收集按钮，可选择收集蓝牙或视频数据
	def start_col(self):
		self.refreshLabel(1)
		gest = self.labelGestEntry.get()
		if self.validFlag and self.v.get()==1:
			self.labelGestEntry.config(state='disabled')
			print("[INFO] Image Data Collect:开始收集数据...")
			self.staticFlag = True
		elif self.v.get()==2:
			t4 = threading.Thread(target=self.blueData, args=())
			t4.start()
			#self.blueData()
			print("[INFO] Sensor Data :当前手势已收集", conf[int(gest)][1], "个样本")
		else:
			msg.showwarning("姿态数据为空", "请输入合法的姿态编号")

	#开始收集一次蓝牙数据样本
	def blueData(self):
		global conf
		geststr = self.labelGestEntry.get()
		geststr = geststr.strip()
		try:
			index = int(geststr)
		except Exception as e:
			msg.showwarning("提示", "输入的编号有误！")
			return 0	
		if dyncollect(self.blue, geststr, conf[int(geststr)][1]):
			conf[int(geststr)][1] += 1
			f = open("configure", "wb")
			pickle.dump(conf, f)
			f.close()
			print("[INFO] Sensor Data Collect:行动数据已存档!")

	#停止图像数据的收集
	def stop_col(self):
		self.labelGestEntry.config(state='normal')
		print("[INFO] Image Data Collect:停止收集")
		self.staticFlag = False

	#数据验证
	def startIdentify(self):
		global model
		global model2
		'''flags = [False, False]
		res = []
		datas = []
		if self.blue.status == True and model2 != None and model !=None:
			self.valFlag = True
			t = threading.Thread(target=stop, args=(flags,1,))
			t.start()
			flags[0] = True
			print("[INFO] Data Identify:数据收集中...")
			while(flags[0] == True):
				temple = tuple(self.blue.getData())
				temple = np.reshape(temple,(1,3))
				time.sleep(0.04)
				print(temple)
				res.append(temple)
			print("[INFO] Data Identify:数据采集完成!")
			count = 0
			for x in range(len(res)):
				if x == int((len(res) / 12) * count):
					datas.append(res[x])
					count += 1
			dst = np.array(datas)
			print(dst)
			#data = np.reshape(dst, (None, 1, 3))
			#print(model2.predict_classes(dst))
			output = gestDict[model2.predict_classes(dst)[0]].strip('\n')
			print(output)
		elif self.blue.status == False:
			msg.showwarning("提示","请先连接蓝牙！")'''
		if model != None:
			self.valFlag = True
		else:
			msg.showwarning("数据未训练","请先完成数据的训练")

	#选择图像存储路径
	def openImgDir(self):
		path_ = askdirectory()
		self.imgPath.set(path_)


	#模型训练主线程，选择图像数据、蓝牙数据，分别开始训练
	def startDataTrain(self):
		if self.CB1.get() == 1:
			startImageTrain(self.imgPath.get() + "/", defaultModName)
			model = load_model(defaultModName)
		if self.CB2.get() == 1:
			startBlueTrain(self.bluePath.get() + "/", defaultModName2)
		msg.showwarning("提示","数据训练已完成！")
		self.valFlag = True

	#选择蓝牙存储路径
	def openBlueDir(self):
		path_ = askdirectory()
		self.bluePath.set(path_)

	#窗口关闭时行为	
	def onClose(self):	
		self.vs.release()
		self.blueToothSignal.set()
		self.flag = False
		self.root.destroy()
		self.root.quit()
		self.blue.cutDown = True
		cv2.destroyAllWindows()

	def __del__(self):
		self.onClose()

				
root = tk.Tk()				#创建根窗口
app = App(root)

app.root.mainloop()