from sklearn.metrics import precision_score ,recall_score,accuracy_score,f1_score,classification_report
from visualFunction.plot_report import plot_classification_report 
# mdzz sklearn里面有实现 何必自己写....
# # real 和predict既要有白样本也要有黑样本
# # #没想到居然还是有用，在计算fpr,tpr的时候还是有用
def basicData(real,predict):
	"""	
	根据样本的真实类标记（real）、在某个模型预测下给出的类标记（predict）
	计算tp(truePositive),tn(trurNegative),fp(falsePositive),fn(falseNegative)
	"""
	tp=0
	tn=0
	fp=0
	fn=0

	def tPostive():
		nonlocal  tp
		tp+=1
		return tp
	def tNegative():
		nonlocal  tn
		tn+=1
		return tn
	def fPostive():
		nonlocal  fp
		fp+=1
		return fp
	def fNegativeNegative():
		nonlocal  fn
		fn+=1
		return fn

	T={0:tNegative,1:tPostive}
	F={0:fNegativeNegative,1:fPostive}
	Tv={0:tn,1:tp}
	Fv={0:fn,1:fp}

	if len(real)!=len(predict):
		print("there's difference in length of arguments")
		os._exit(0)

	for index in range(len(real)):
		if real[index]==predict[index]:
			# T
			Tv[predict[index]]=T[predict[index]]()
		else:
			# N
			Fv[predict[index]]=F[predict[index]]()
	return tp,tn,fp,fn

def getFpr(fp,n):
	# calculate the fpr
	# fp false postive
	# n  falsePostive+trueNegative
	return float(fp/n)
def getTpr(tp,p):
	# calculate the tpr
	# tp true postive
	# p  truePostive+falseNegative
	return float(tp/p)

# def precision(real,predict):
# 	tp,tn,fp,fn=basicData(real,predict)
# 	return float(tp/(tp+fp))
# def recall(real,predict):
# 	tp,tn,fp,fn=basicData(real,predict)
# 	return float(tp/(tp+fn))
# def accuracy(real,predict):
# 	tp,tn,fp,fn=basicData(real,predict)
# 	return float((tp+tn)/(tp+tn+fp+fn))
# def f1():
# 	pass
# 以下四个函数计算分类质量指标
def precision(real,predict):
	# calculate the precision
	# real: real result
	# predict: result given by your model
	return precision_score(real,predict)
def f1(real,predict):
	# calculate the f1
	# real: real result
	# predict: result given by your model
	return f1_score(real,predict)
def accuracy(real,predict):
	# calculate the accuracy
	# real: real result
	# predict: result given by your model
	return accuracy_score(real,predict)
def recall(real,predict):
	# calculate the recall
	# real: real result
	# predict: result given by your model
	return recall_score(real,predict)

def plotClassificationReport(real,predict,title=None):
	# generate the classification report（生成分类报告）
	# real: real result
	# predict: result given by your model
	cr=classification_report(real,predict)
	plot_classification_report(cr,title=title)


def generateScore(material,callableFunc,scoreList):
	"""
	undone 未完全完成（截止12.10日我已经记不起来我这里有什么没做完的了），待改进，但是可用
	material:tuple(sample,realRst) 一组待预测样本和对应样本的类标记
	callableFunc:用来预测样本类别的可调用函数，一次应该能处理一整个样本集 
	scoreList:阈值列表
	"""
	predictRst=[]
	tprAndFpr=[]
	scoreList=sorted(scoreList,reverse=True)
	for x in scoreList:
		predictRst.append(callableFunc(material[0],material[1],threshold=x))
	for x in predictRst:
		tp,tn,fp,fn=basicData(material[1],x)
		tprAndFpr.append((getTpr(tp,tp+fn),getFpr(fp,fp+tn)))
	return tprAndFpr




































		