from dataAccess import getData
from immediatelyJudge import *
from HereticJudger import layerClassification,iterateClassification,optimizeClassification
import matplotlib
# 没有这一行保存的图片不显示图像
matplotlib.use("Agg")
from plotUtil import roc 
import matplotlib.pyplot as plt
FILE_PATH_HJ="C:\\Users\\LENOVO\\Desktop\\machineLearning\\Code\\testSite\\myDetector\\detectorTools\\HereticJudger\\records\\"
badData = []
# 这个文件里都是用来手动操作开启模型训练的函数
def prepareBadData(target):
	# 这里返回的数据集是为了能够对模型训练效果有个测试。
	global badData
	badData = getData.getBad(where = "limit 300,360")
	badData += getData.getBad("unnormalparams", target, "limit 0,60")

def trainAndSaveAll():
	# 自动对所有参数进行cm、lm的训练并保存内容
	columnName = getData.getAllColumnName("normalparams")
	del(columnName[0])
	for x in columnName:
		offlineTrainingAndSave(x)
		offlineTrainingAndSave(x,modelType="charModel")

def generatePicOfModels(targetFld = "email",date = "20180204",threshold=0.05):
	# date这个参数在形成的时候有问题，要改
	# cm可以画roc曲线图
	# 需要加入异常参数集并且要将real和data对应起来
	# 另外cm的positiveRate是在调用预测(preidct)的时候产生的
	# roc曲线画图函数还得改改(记得是改过了的)
	global badData
	modelType = "cm"
	fileName = getFileName(targetFld,date,modelType)
	setIfReadWithTrainingSets(True)
	cmModel = readModelFromFile(fileName,obj_hook = cmFromFile)
	# 模型读取完成
	goodData = cmModel.trainingSet[0:100]
	data = goodData + badData
	real = []
	for x in goodData:
		real.append(1)
	for x in badData:
		real.append(0)
	predictRst = cmModel.predict(data,real,threshold=threshold)
	tprandfpr=roc.generateScore((data,real),cmModel.predict,cmModel.getPositiveRate()[0:20])
	plt.xlim([0,1])
	plt.ylim([0,1])
	tpr = []
	fpr = []
	for x in tprandfpr:
	    tpr.append(x[0])
	    fpr.append(x[1])
	print("tpr:",tpr)
	print("fpr:",fpr)
	plt.plot(fpr,tpr,'b')
	plt.title(targetFld+"_"+modelType+"_"+date)
	plt.xlabel("fpr")
	plt.ylabel("tpr")
	plt.savefig(FILE_PATH+"pics\\"+targetFld+"_"+date+"_"+modelType)
	plt.close()
	# plt.show()
	
def distinguishValue(tb='mydefensor_argvaluesubmit',fld='value',argName='',dateStr="20180210"):
	# 最终生成一个分类建议文件供读取
	goodData = getData.getGood(tb,fld,'where arg_name="'+argName+'" and confirmed="false"')
	if (goodData.__len__()>6):
		# 接下来分类
		res1 = layerClassification.process(goodData)
		res2 = iterateClassification.process(res1)
		res3 = optimizeClassification.process(res2)
		rst = {"0":res3[0],"1":res3[1]}
		path = FILE_PATH_HJ+argName
		with open(path+".txt","w") as f:
			f.write(json.dumps(rst))
	else:
		print("the size of data is too small!stop~")



if __name__ == '__main__':
	print("main")	
	columnName = getData.getAllColumnName("normalparams")
	del(columnName[0])
	for x in columnName:
		distinguishValue(argName = x)
		# print(x)
		# prepareBadData(x)
		# generatePicOfModels(targetFld = x)
