import numpy as np
from sklearn.metrics import f1_score, precision_score, recall_score, confusion_matrix
import os
def sigmoid(a, b, x):
	'''
	定义Sigmoid函数: g(z) = 1/(1+e^-(ax+b))
	'''
	return 1.0/ (1 + np.exp(-1.0 * (x.dot(a) + b)))

# def ELM(X, T, D, L):
# 	np.random.seed(1)
# 	a = np.random.normal(0, 1, (D, L))
# 	b = np.random.normal(0, 1)
# 	# 使用特征映射求解输出矩阵
# 	H = sigmoid(a, b, X)
# 	# 计算输出权重和输出函数
# 	HH = H.T.dot(H)
# 	HT = H.T.dot(T)
# 	beta = np.linalg.pinv(HH).dot(HT)
# 	Fl = H.dot(beta)
# 	return beta, Fl

def input_2_hidden(D, L):
	np.random.seed(1)
	W = np.random.normal(0, 1, (D, L))
	b = np.random.normal(0, 1)
	return W, b

def calculate_beta(H, T, L, normal):
	# 计算输出权重和输出函数
	
	HH = H.T.dot(H)
	I = np.eye(N=L, M=L) / normal
	HT = H.T.dot(T)
	beta = np.linalg.pinv(I + HH).dot(HT)
	return beta

def caculate_top(labels, predicts):
	topk_num = [0,0,0,0,0]
	predicts = np.array(predicts)
	for k in range(5):
		for i in range(len(labels)):
			topk = np.argsort(predicts[i])[-(k+1):]
			# topk = np.argsort(predicts[i])[:(k+1)]
			if labels[i] in topk:
				topk_num[k] += 1
		# print('top1_num={}, top5_num={}'.format(top1_num, top5_num))
	# return top1_num/len(labels), top5_num / len(labels)
	print(topk_num)
	return list(map(lambda num: num / len(labels), topk_num))

def caculate_fpr(y_true, predicts):
	#print(np.shape(y_true))
	#print(np.shape(y_pred))
	y_pred = np.reshape(np.argmax(np.array(predicts), axis=1), (-1,))
	# y_pred = np.reshape(np.argmin(np.array(predicts), axis=1), (-1,))
	# y_pred = np.argmin(predicts, axis=1)
	# print(y_pred)
	f1 = f1_score( y_true, y_pred, average='macro' )
	p = precision_score(y_true, y_pred, average='macro')
	r = recall_score(y_true, y_pred, average='macro')

	# f1 = f1_score( y_true, y_pred, average=None )
	# p = precision_score(y_true, y_pred, average=None)
	# r = recall_score(y_true, y_pred, average=None)
	return f1, p, r

def get_probs_and_labels(logits, labels):
	# datas = np.zeros(shape=(len(logits), len(logits[0])))
	datas = []
	hard_labels = []		# one-hot的硬编码
	# softmax原始概率
	for i in range(len(logits)):
		datas.append(np.exp(logits[i]) / np.sum(np.exp(logits[i])))
		# for j in range(len(logits[i])):
			# datas[i][j] = 1.0/(1+np.exp(-logits[i][j]))
	# datas = logits
	developer_size = len(logits[1])
	hard_labels = np.zeros(shape=(len(labels), developer_size))        # shape=(n_samples, n_developers)
	for i in range(len(labels)):
		hard_labels[i][int(labels[i])] = 1.0
	# print(hard_labels[0])
	# hard_labels = 1 - hard_labels	# 变取最大为取最小
	# hard_labels = (1 - hard_labels)*100	# 变取最大为取最小
	# hard_labels = (-1) * hard_labels
	print(hard_labels[0])
	return datas, hard_labels

def ax_b(train_logits, train_labels, val_logits, val_labels, test_logits, test_labels):
	train_probs, train_hard_labels = get_probs_and_labels(train_logits, train_labels)
	val_probs, val_hard_labels = get_probs_and_labels(val_logits, val_labels)
	test_probs, test_hard_labels = get_probs_and_labels(test_logits, test_labels)

	print('origin_val_f1:{}, origin_val_p:{}, origin_val_r:{}'.format(*caculate_fpr(val_labels, val_probs)))
	print('origin_test_f1:{}, origin_test_p:{}, origin_test_r:{}'.format(*caculate_fpr(test_labels, test_probs)))


	# temp_probs, temp_hard_labels = val_probs, val_hard_labels		# 使用val矫正的结果反而不如使用train矫正的结果好，这个问题有时间可以继续解决
	temp_probs, temp_hard_labels = train_probs, train_hard_labels
	
	print(np.mat(temp_hard_labels).shape)
	print(np.mat(temp_probs).shape)
	# x = np.mat(temp_hard_labels).T.dot(np.linalg.pinv(np.mat(temp_probs)).T)
	x = np.dot(np.mat(temp_hard_labels).T, np.linalg.pinv(np.mat(temp_probs)).T).T
	# print(x)
	print(x.shape)
	val_f1, val_p, val_r = caculate_fpr(val_labels, np.matmul(val_probs, x), )
	test_f1, test_p, test_r = caculate_fpr(test_labels, np.matmul(test_probs, x), )
	print('val_f1:{}, val_p:{}, val_r:{}'.format(val_f1, val_p, val_r))
	print('test_f1:{}, test_p:{}, test_r:{}'.format(test_f1, test_p, test_r))
	val_topK = caculate_top(val_labels, np.matmul(val_probs, x), )
	test_topK = caculate_top(test_labels, np.matmul(test_probs, x), )
	print('val_top1:{:.3f}, val_top2:{:.3f}, val_top3:{:.3f}, val_top4:{:.3f}, val_top5:{:.3f}'.format(val_topK[0], val_topK[1], val_topK[2], val_topK[3], val_topK[4]))
	print('test_top1:{:.3f}, test_top2:{:.3f}, test_top3:{:.3f}, test_top4:{:.3f}, test_top5:{:.3f}'.format(test_topK[0], test_topK[1], test_topK[2], test_topK[3], test_topK[4]))
	# print('top1:{}, top2:{}, top3:{}, top4:{}, top5:{}'.format(topK[0], topK[1], topK[2], topK[3], topK[4]))

def ELM(train_logits, train_labels, val_logits, val_labels, test_logits, test_labels):
	'''
		返回top1～top5准确率
	'''
	# train_probs, train_labels = get_probs_and_labels(root_dir, timestamp, 0)
	# thresold = int((len(train_probs)/8)*7)
	# temp_probs, temp_labels = train_probs[-thresold:], train_labels[-thresold:]
	# test_probs, test_labels = get_probs_and_labels(root_dir, timestamp, 1)
	# # val_probs, val_labels = gent_probs_and_labels(timestamp, 2)
	# val_probs, val_labels = test_probs, test_labels

	train_probs, train_hard_labels = get_probs_and_labels(train_logits, train_labels)
	val_probs, val_hard_labels = get_probs_and_labels(val_logits, val_labels)
	test_probs, test_hard_labels = get_probs_and_labels(test_logits, test_labels)
	developer_size = len(test_logits[1])
	# train_probs, train_hard_labels = val_probs, val_hard_labels
	L = 1000
	normal = 0.003
	W, b = input_2_hidden(developer_size, L=L,)
	H = sigmoid(W, b, np.mat(train_probs))
	beta = calculate_beta(H, train_hard_labels, L, normal)		# 这个矩阵的值可以象征性的看做是惩罚矩阵的值

	H_test = sigmoid(W, b, np.mat(test_probs))
	H_val = sigmoid(W, b, np.mat(val_probs))
	# H_test = H.dot(beta)
	val_f1, val_p, val_r = caculate_fpr(val_labels, np.matmul(H_val, beta), )
	test_f1, test_p, test_r = caculate_fpr(test_labels, np.matmul(H_test, beta), )
	print('val_f1:{}, val_p:{}, val_r:{}'.format(val_f1, val_p, val_r))
	print('test_f1:{}, test_p:{}, test_r:{}'.format(test_f1, test_p, test_r))

	# return caculate_top(test_labels, np.matmul(H_test, beta), )		 # 注意这里比较的时候，必须使用test_labels, 而不是test_hard_labels

def count_part_category():
	# 统计各类recall，返回低于平均recall的类别的样本？以部分类样本来求x
	train_logits, train_labels, val_logits, val_labels, test_logits, test_labels = read_logits_and_labels_file()

	# train_probs, train_hard_labels = get_probs_and_labels(train_logits, train_labels)
	# val_probs, val_hard_labels = get_probs_and_labels(val_logits, val_labels)
	# test_probs, test_hard_labels = get_probs_and_labels(test_logits, test_labels)
	single_c = 2
	y_true, predicts = train_labels, train_logits
	# y_true, predicts = val_labels, val_logits

	y_pred = np.argmax(predicts, axis=1)
	recalls = recall_score(y_true, y_pred, average=None)
	f1 = f1_score(y_true, y_pred, average=None)
	scores = f1
	print(scores[single_c-1])
	aver_scores = np.average(scores)
	target_category = np.reshape(np.argwhere(scores < aver_scores), (-1, )) + 1
	# target_category = list(range(len(predicts[0])))
	print(target_category)
	# 取出这些类别对应的样本
	# temp_logits = np.where(predicts)
	# temp_labels = np.argwhere(y_true in target_category)
	# target_category = np.argsort(recalls)[:-10]
	# target_category = [single_c,]
	temp_logits = []
	temp_labels = []
	for i in range(len(y_true)):
		if y_true[i] in target_category:
			temp_logits.append(predicts[i])
			temp_labels.append(y_true[i])
	# print(temp_labels)
	temp_probs, temp_hard_labels = get_probs_and_labels(temp_logits, temp_labels)
	temp_y_pred = np.reshape(np.argmax(np.array(temp_probs), axis=1), (-1,))
	print(temp_y_pred)
	# temp_f1, temp_p, temp_r = caculate_fpr(temp_labels, temp_logits)
	n_tp = len(np.argwhere(temp_y_pred == target_category[0]))
	print('origin_temp_true_predict:{}'.format(n_tp))
	print('origin_temp_all_numer:{}'.format(len(temp_y_pred)))
	print('origin_temp_recall:{}'.format(scores[target_category[0]]))
	# print('origin_temp_f1:{}, temp_p:{}, temp_r:{}'.format(temp_f1, temp_p, temp_r))

	train_probs, train_hard_labels = get_probs_and_labels(train_logits, train_labels)
	val_probs, val_hard_labels = get_probs_and_labels(val_logits, val_labels)
	test_probs, test_hard_labels = get_probs_and_labels(test_logits, test_labels)

	# temp_probs, temp_hard_labels = val_probs, val_hard_labels
	# temp_probs, temp_hard_labels = train_probs, train_hard_labels
	
	print(np.mat(temp_hard_labels).shape)
	print(np.mat(temp_probs).shape)
	# x = np.mat(temp_hard_labels).T.dot(np.linalg.pinv(np.mat(temp_probs)).T)
	x = np.dot(np.mat(temp_hard_labels).T, np.linalg.pinv(np.mat(temp_probs)).T).T
	print(x[0])
	print(x)
	print(x.shape)

	# temp_f1, temp_p, temp_r = caculate_fpr(temp_labels, np.matmul(temp_probs, x),)
	# print('after_temp_f1:{}, temp_p:{}, temp_r:{}'.format(temp_f1, temp_p, temp_r))

	temp_y_pred = np.reshape(np.argmax(np.array(np.matmul(temp_probs, x)), axis=1), (-1,))
	# temp_f1, temp_p, temp_r = caculate_fpr(temp_labels, temp_logits)
	# print(temp_y_pred)
	print(np.matmul(temp_probs, x))
	# f1 = f1_score(y_true, y_pred, average=None)
	# print('after_temp_r:{}'.format(len(np.argwhere(temp_y_pred == target_category[0]))))
	# print('after_temp_r:{}'.format())

	# val_f1, val_p, val_r = caculate_fpr(val_labels, np.matmul(val_probs, x), )
	# test_f1, test_p, test_r = caculate_fpr(test_labels, np.matmul(test_probs, x), )
	# print('val_f1:{}, val_p:{}, val_r:{}'.format(val_f1, val_p, val_r))
	# print('test_f1:{}, test_p:{}, test_r:{}'.format(test_f1, test_p, test_r))
	# val_topK = caculate_top(val_labels, np.matmul(val_probs, x), )
	# test_topK = caculate_top(test_labels, np.matmul(test_probs, x), )
	# print('val_top1:{:.3f}, val_top2:{:.3f}, val_top3:{:.3f}, val_top4:{:.3f}, val_top5:{:.3f}'.format(val_topK[0], val_topK[1], val_topK[2], val_topK[3], val_topK[4]))
	# print('test_top1:{:.3f}, test_top2:{:.3f}, test_top3:{:.3f}, test_top4:{:.3f}, test_top5:{:.3f}'.format(test_topK[0], test_topK[1], test_topK[2], test_topK[3], test_topK[4]))
	# # print('top1:{}, top2:{}, top3:{}, top4:{}, top5:{}'.format(topK[0], topK[1], topK[2], topK[3], topK[4]))

def pure_ax_b():
	pass

def combine_every_category_in_x():
	'''
		分类别进行ax=b，然后取出每个x中的对应列，水平拼接成一个最终的x
	'''
	train_logits, train_labels, val_logits, val_labels, test_logits, test_labels = read_logits_and_labels_file()
	
	train_probs, train_hard_labels = get_probs_and_labels(train_logits, train_labels)
	val_probs, val_hard_labels = get_probs_and_labels(val_logits, val_labels)
	test_probs, test_hard_labels = get_probs_and_labels(test_logits, test_labels)

	print('origin_val_f1:{}, origin_val_p:{}, origin_val_r:{}'.format(*caculate_fpr(val_labels, val_probs)))
	print('origin_test_f1:{}, origin_test_p:{}, origin_test_r:{}'.format(*caculate_fpr(test_labels, test_probs)))

	# train_probs, train_hard_labels = get_probs_and_labels(train_logits, train_labels)
	# val_probs, val_hard_labels = get_probs_and_labels(val_logits, val_labels)
	# test_probs, test_hard_labels = get_probs_and_labels(test_logits, test_labels)
	all_x = np.zeros(shape=(len(train_logits[0]), len(train_logits[0])))
	target_logits, target_labels = np.array(train_logits), np.array(train_labels)

	for c in range(1, len(train_logits[0])):		# 遍历所有类别
		# 0. 初始化一个all_x， 用来存放最终的x
		# 1. 取出本类中的所有的样本，存入temp
		# 2. 使用temp中的样本，做ax=b
		# 3. 取出x中的第c列，放进all_x
		idx_c = np.reshape(np.argwhere(target_labels == c), (-1,))		# 取出本类的所有样本
		temp_logits = target_logits[idx_c]
		temp_labels = target_labels[idx_c]
		# print(temp_logits.shape)
		# print(temp_labels.shape)
		temp_probs, temp_hard_labels = get_probs_and_labels(temp_logits, temp_labels)
		x = np.dot(np.mat(temp_hard_labels).T, np.linalg.pinv(np.mat(temp_probs)).T).T
		x = np.array(x)
		# print(x[:,c].shape)
		# print(all_x[:,c].shape)
		all_x[:, c] = x[:, c]
	x = all_x
	

	temp_probs, temp_hard_labels = train_probs, train_hard_labels

	print(np.mat(temp_hard_labels).shape)
	print(np.mat(temp_probs).shape)

	# x = np.dot(np.mat(temp_hard_labels).T, np.linalg.pinv(np.mat(temp_probs)).T).T
	# print(x)
	print(x.shape)
	val_f1, val_p, val_r = caculate_fpr(val_labels, np.matmul(val_probs, x), )
	test_f1, test_p, test_r = caculate_fpr(test_labels, np.matmul(test_probs, x), )
	print('val_f1:{}, val_p:{}, val_r:{}'.format(val_f1, val_p, val_r))
	print('test_f1:{}, test_p:{}, test_r:{}'.format(test_f1, test_p, test_r))
	val_topK = caculate_top(val_labels, np.matmul(val_probs, x), )
	test_topK = caculate_top(test_labels, np.matmul(test_probs, x), )
	print('val_top1:{:.3f}, val_top2:{:.3f}, val_top3:{:.3f}, val_top4:{:.3f}, val_top5:{:.3f}'.format(val_topK[0], val_topK[1], val_topK[2], val_topK[3], val_topK[4]))
	print('test_top1:{:.3f}, test_top2:{:.3f}, test_top3:{:.3f}, test_top4:{:.3f}, test_top5:{:.3f}'.format(test_topK[0], test_topK[1], test_topK[2], test_topK[3], test_topK[4]))


def observe_every_category():
	train_logits, train_labels, val_logits, val_labels, test_logits, test_labels = read_logits_and_labels_file()
	# y_true, predicts = train_labels, train_logits
	y_true, predicts = val_labels, val_logits
	# y_true, predicts = test_labels, test_logits

	y_pred = np.argmax(predicts, axis=1)
	recalls = recall_score(y_true, y_pred, average=None)
	pres = precision_score(y_true, y_pred, average=None)
	f1 = f1_score(y_true, y_pred, average=None)

	print(recalls)
	print(pres)
	print(f1)


def read_logits_and_labels_file():
	train_logits = []
	train_labels = []
	val_logits = []
	val_labels = []
	test_logits = []
	test_labels = []
	model_dir = 'C:/Users/wanglinhui/Desktop/收拾旧山河/runs/1582159119'	# Mozilla
	# model_dir = 'C:/Users/wanglinhui/Desktop/收拾旧山河/runs/1582180074'	# GCC
	# model_dir = 'C:/Users/wanglinhui/Desktop/收拾旧山河/runs/1582162825'	# Netbeans
	# model_dir = 'C:/Users/wanglinhui/Desktop/收拾旧山河/runs/1582188187'	# Eclipse
	# model_dir = 'C:/Users/wanglinhui/Desktop/收拾旧山河/runs/1582217372'	# OpenOffice
	# with open('../data/train_logits_epoch=40.txt', 'r') as reader:
	with open(os.path.join(model_dir, 'train_time_logits.txt'), 'r') as reader:
		for line in reader.readlines():
			temp = line.strip().split(",")
			train_logits.append(list(map(float, temp[0].split(' '))))
			train_labels.append(int(float(temp[1])))
	# with open('../data/test_logits_epoch=40.txt', 'r') as reader:
	with open(os.path.join(model_dir, 'val_time_logits.txt'), 'r') as reader:
		for line in reader.readlines():
			temp = line.strip().split(",")
			val_logits.append(list(map(float, temp[0].split(' '))))
			val_labels.append(int(float(temp[1])))
	with open(os.path.join(model_dir, 'test_time_logits.txt'), 'r') as reader:
		for line in reader.readlines():
			temp = line.strip().split(",")
			test_logits.append(list(map(float, temp[0].split(' '))))
			test_labels.append(int(float(temp[1])))
	return train_logits, train_labels, val_logits, val_labels, test_logits, test_labels

def read_file_to_ELM():
	train_logits, train_labels, val_logits, val_labels, test_logits, test_labels = read_logits_and_labels_file()
	# train_logits, train_lables = get_probs_and_labels(train_logits, train_lables)
	print(caculate_fpr(test_labels, test_logits))
	# print(caculate_top(train_lables, train_logits))
	# thresold = int(len(train_labels)/8)*8
	# topK = ELM(train_logits[-thresold:], train_labels[-thresold:], test_logits, test_labels)
	# ELM(train_logits, train_labels, val_logits, val_labels, test_logits, test_labels)
	ax_b(train_logits, train_labels, val_logits, val_labels, test_logits, test_labels)
	# print(topK)

if __name__ == "__main__":
	read_file_to_ELM()
	# count_part_category()
	# observe_every_category()
	# combine_every_category_in_x()
