import numpy as np
from config import *

import matplotlib.pyplot as plt
from sklearn.neighbors import KernelDensity
# from keras.optimizers import Adam, Adagrad
from scipy.stats import beta
from sklearn.linear_model import LogisticRegression
# from keras.models import Sequential
# from keras.layers import Dense, Activation
# from keras.layers.advanced_activations import LeakyReLU
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = ""
Eplison = 1e-5


class Model():
	def __init__(self, dataSet=None):
		self.dataSet = dataSet
		self.malScoreList = []
		self.benScoreList = []
		self.allScoreList = []
		self.nMalZero = 0
		self.nBenZero = 0
		self.nMal = 0
		self.nBen = 0
		self.nAll = 0
		self.nZero = 0
		self.nMalMuchNodule = 0
		self.nBenMuchNodule = 0
		self.noduleNumthred = 10
		self.lowerBound = 0.05
		self.upperBound = 0.95

	def _MaxScore(self, name):
		cdds = self.dataSet[name]['candidates']
		# score = self.zeroProb
		score = 0
		for cdd in cdds:
			assert cdd['prob'] >= 0.1
			score = max(score, cdd['mal_prob'])
		return score

	def statsTrain(self, nameList):
		### while doing logistics, it is better to add zero scans into it
		for name in nameList:
			score = self._MaxScore(name)
			if score == 0:
				self.allScoreList.append(0)
				if self.dataSet[name]['cancer'] == 1:
					self.malScoreList.append(score)
					self.nMalZero += 1
				else:
					self.benScoreList.append(score)
					self.nBenZero += 1
				continue

			if len(self.dataSet[name]['candidates']) > self.noduleNumthred:
				if self.dataSet[name]['cancer'] == 1:
					self.nMalMuchNodule += 1
				else:
					self.nBenMuchNodule += 1

			if self.dataSet[name]['cancer'] == 1:
				self.malScoreList.append(score)
			else:
				self.benScoreList.append(score)
			self.allScoreList.append(score)

		self.nMal = self.nMalZero + len(self.malScoreList)
		self.nBen = self.nBenZero + len(self.benScoreList)
		self.nAll = len(self.allScoreList)
		self.nZero = self.nMalZero + self.nBenZero
		self.zeroProb = (self.nMalZero/self.nMal)*(self.nMal/self.nAll) / (self.nZero/self.nAll)
		self.muchProb = self.nMalMuchNodule / (self.nBenMuchNodule+self.nMalMuchNodule)
		# print('Zero Prob = {}'.format(self.zeroProb))

	def predictAtName(name):
		pass

class Logistic(Model):
	# Total: 0.4076, train: 0.4054, test: 0.4227
	def __init__(self, dataSet=None):
		super(Logistic, self).__init__(dataSet)

	def _Expand(self, score):
		a = np.zeros((1, self.nFeature))
		a[0][0] = score
		for i in range(1, self.nFeature):
			a[0][i] = a[0][i-1]*score
		a[0][10] = np.log(score+1)
		a[0][11] = np.sqrt(score)
		a[0][12] = np.exp(score+1)
		a[0][13] = 1/(score+1)
		return a

	def fit(self, nameList):

		Model.statsTrain(self, nameList)
		x = np.hstack((np.asarray(self.malScoreList), np.asarray(self.benScoreList)))
		self.nFeature = 14
		inputX = np.zeros((len(x), self.nFeature))
		inputX[:,0] = x
		for i in range(1, self.nFeature):
			inputX[:, i] = np.multiply(inputX[:, i-1],x)
		inputX[:,10] = np.log(x+1)
		inputX[:,11] = np.sqrt(x)
		inputX[:,12] = np.exp(x+1)
		inputX[:,13] = 1/(x+1)
		inputY = np.hstack((np.ones_like(self.malScoreList), np.zeros_like(self.benScoreList)))
		self.model = LogisticRegression(C = 1, max_iter=1000)
		self.model.fit(inputX, inputY)


	def predictAtScore(self, score):
		if score < Eplison:
			return self.zeroProb
		inputX = self._Expand(score)
		pMal = self.model.predict_proba(inputX)[0][1]
		return min(max(pMal, self.lowerBound), self.upperBound)

	def predictAtName(self, name):
		score = Model._MaxScore(self, name)
		if score == 0:
			return self.zeroProb
		return self.predictAtScore(score)

	def check(self):
		inputX = np.linspace(0,1,500)
		inputY = []
		for x in inputX:
			inputY.append(self.predictAtScore(x))
		plt.plot(inputX, inputY)
		plt.show()

class DoubleLogistic(Model):
	# Total:0.405670, train:0.40337, test:0.41992
	def __init__(self, dataSet=None):
		super(DoubleLogistic, self).__init__(dataSet)
		self.nPoly = 1
		self.nFeature = self.nPoly + 4

	def _2MaxScore(self, name):
		cdds = self.dataSet[name]['candidates']
		# score = self.zeroProb
		# secScore = self.zeroProb
		score  = 0
		secScore = 0
		for cdd in cdds:
			assert cdd['prob'] >= 0.1
			if score < cdd['mal_prob']:
				score = cdd['mal_prob']
			else:
				secScore = max(secScore, cdd['mal_prob'])
		return score, secScore


	def _Expand(self, score, secScore, nCdds):
		a = np.zeros((1, self.nFeature*2+2))
		a[0][0] = score
		a[0][self.nFeature] = secScore
		a[0][self.nFeature*2] = np.sqrt(nCdds)
		# a[0][self.nFeature*2+1] = score*secScore
		for i in range(1, self.nPoly):
			a[0][i] = a[0][i-1]*score
			a[0][i+self.nFeature] = a[0][i+self.nFeature-1]*secScore

		a[0][self.nPoly] = np.log(score + 1)
		a[0][self.nPoly+1] = np.sqrt(score)
		a[0][self.nPoly+2] = np.exp(score+1)
		# a[0][self.nPoly+3] = 1/(score+1)
		a[0][self.nPoly+self.nFeature] = np.log(secScore+1)
		a[0][self.nPoly+1+self.nFeature] = np.sqrt(secScore)
		a[0][self.nPoly+2+self.nFeature] = np.exp(secScore+1)
		# a[0][self.nPoly+3+self.nFeature] = 1/(secScore+1)
		return a

	def fit(self, nameList):

		Model.statsTrain(self, nameList)
		
		inputX = np.zeros((len(nameList), 2*self.nFeature+2))
		inputY = np.zeros((len(nameList)))
		for i, name in enumerate(nameList):
			score, secScore = self._2MaxScore(name)
			nCdds = len(self.dataSet[name]['candidates'])
			caseX = self._Expand(score, secScore,nCdds)
			inputX[i] = caseX
			inputY[i] = self.dataSet[name]['cancer']
		self.model = LogisticRegression(C=200, max_iter=1000)
		self.model.fit(inputX, inputY)
		# C = self.model.coef_
		# print(C)
	


	def predictAtScore(self, score, secScore, nCdds):
		# if score < Eplison:
		# 	return self.zeroProb
		# if score > 0.8:
		# 	return min(score, self.upperBound)

		inputX = self._Expand(score, secScore, nCdds)
		pMal = self.model.predict_proba(inputX)[0][1]
		return min(max(pMal, self.lowerBound), self.upperBound)

	def predictAtName(self, name):
		score, secScore = self._2MaxScore(name)
		# if score == 0:
		# 	return self.zeroProb
		nCdds = len(self.dataSet[name]['candidates'])
		return self.predictAtScore(score, secScore, nCdds)

	def check(self):
		inputX = np.linspace(0,1,500)
		inputY = []
		for x in inputX:
			inputY.append(self.predictAtScore(x))
		plt.plot(inputX, inputY)
		plt.show()

class SimpleLogistic(Model):
	#score: Total:0.4087, Train:0.4067, Test: 0.4231
	#score+SecScore: Total:0.4064, Train: 0.4043, test: 0.4199
	#score+SecScore+nCdds: not so good
	#score+SecScore+ except much candidates: total: 0.4104, train:0.4095, test:0.4169
	def __init__(self, dataSet=None):
		super(SimpleLogistic, self).__init__(dataSet)

	def _2MaxScore(self, name):
		cdds = self.dataSet[name]['candidates']
		score = 0
		secScore = 0
		for cdd in cdds:
			assert cdd['prob'] >= 0.1
			if score < cdd['mal_prob']:
				score = cdd['mal_prob']
			else:
				secScore = max(secScore, cdd['mal_prob'])
		return score, secScore

	def _Expand(self, score, secScore):
		a = np.zeros((1,2))
		a[0][0] = score
		a[0][1] = secScore
		return a

	def _ExpandThree(self, score, secScore, nCdds):
		a = np.zeros((1,3))
		a[0][0] = score
		a[0][1] = secScore
		a[0][2] = np.sqrt(nCdds)
		return a

	def _ExpandOne(self, score):
		a = np.zeros((1,1))
		a[0][0] = score
		return a

	def fit(self, nameList):

		Model.statsTrain(self, nameList)
		self.nFeature = 1
		inputX = np.zeros((len(nameList), 3))
		inputY = np.zeros((len(nameList)))
		for i, name in enumerate(nameList):
			# score = Model._MaxScore(self, name)
			score, secScore = self._2MaxScore(name)
			nCdds = len(self.dataSet[name]['candidates'])
			caseX = self._Expand(score, secScore)
			# caseX = self._ExpandOne(score)			
			caseX = self._ExpandThree(score, secScore, nCdds)
			inputX[i] = caseX
			inputY[i] = self.dataSet[name]['cancer']
		self.model = LogisticRegression(C=1, max_iter=1000)
		self.model.fit(inputX, inputY)
	


	def predictAtScore(self, score, secScore, nCdds):
		# if zero return this prob is better
		if score < Eplison:
			return self.zeroProb
		# if nCdds > self.noduleNumthred:
		# 	# print('Many Nodule')
		# 	return self.muchProb
		# inputX = self._Expand(score, secScore)
		# inputX = self._ExpandOne(score)
		inputX = self._ExpandThree(score, secScore, nCdds)
		pMal = self.model.predict_proba(inputX)[0][1]
		# return pMal
		return min(max(pMal, self.lowerBound), self.upperBound)

	def predictAtName(self, name):
		score, secScore = self._2MaxScore(name)
		nCdds = len(self.dataSet[name]['candidates'])
		if score == 0:
			return self.zeroProb
		return self.predictAtScore(score, secScore, nCdds)

	def check(self):
		inputX = np.linspace(0,1,500)
		inputY = []
		for x in inputX:
			inputY.append(self.predictAtScore(x))
		plt.plot(inputX, inputY)
		plt.show()


class Linear(Model):
	# Lower = 0.05, Upper = 0.75
	# Total:0.4059, train:0.4037, test: 0.4209
	def __init__(self, dataSet=None):
		Model.__init__(self, dataSet)

	def fit(self, nameList):
		# pass
		Model.statsTrain(self, nameList)

	def predictAtName(self, name):
		score = Model._MaxScore(self, name)
		# if score == 0:
		# 	return self.zeroProb
		return min(max(score, self.lowerBound), self.upperBound)
	

class NN(Model):
	# Total: 0.40798, train: 0.40622, test: 0.42052
	def __init__(self, dataSet=None):
		Model.__init__(self, dataSet)
		from keras.optimizers import Adam, Adagrad
		from keras.models import Sequential
		from keras.layers import Dense, Activation
		from keras.layers.advanced_activations import LeakyReLU
		from keras.regularizers import l1, l2
		import os
		os.environ["CUDA_VISIBLE_DEVICES"] = ""

		np.random.seed(0)
		layerDim = [10,10,10,10,10,1]# [6,4,3,1]
		learning_rate = 0.001
		self.model = Sequential()
		self.model.add(Dense(layerDim[0], input_dim=3, activation='elu', W_regularizer=l2(weightDecay)))
		self.model.add(Dense(layerDim[1], activation='elu', W_regularizer=l2(weightDecay)))
		self.model.add(Dense(layerDim[2], activation='elu', W_regularizer=l2(weightDecay)))
		self.model.add(Dense(layerDim[3], activation='elu', W_regularizer=l2(weightDecay)))
		self.model.add(Dense(layerDim[4], activation='elu', W_regularizer=l2(weightDecay)))
		self.model.add(Dense(layerDim[5], activation='sigmoid', W_regularizer=l2(weightDecay)))
		optim = Adam(lr = learning_rate)
		self.model.compile(optimizer=optim, loss='binary_crossentropy', metrics=['accuracy'])

	def _2MaxScore(self, name):
		cdds = self.dataSet[name]['candidates']
		score = 0
		secScore = 0
		for cdd in cdds:
			assert cdd['prob'] >= 0.1
			if score < cdd['mal_prob']:
				score = cdd['mal_prob']
			else:
				secScore = max(secScore, cdd['mal_prob'])
		return score, secScore

	def _Expand(self, score, secScore):
		a = np.zeros((1,2))
		a[0][0] = score
		a[0][1] = secScore
		return a

	def _ExpandThree(self, score, secScore, nCdds):
		a = np.zeros((1,3))
		a[0][0] = score
		a[0][1] = secScore
		a[0][2] = nCdds
		return a

	def _ExpandOne(self, score):
		a = np.zeros((1,1))
		a[0][0] = score
		return a
	
		

	def fit(self, nameList, validList=None):
		Model.statsTrain(self, nameList)
		inputX = np.zeros((len(nameList), 3))
		inputY = np.zeros((len(nameList)))
		validInputX = np.zeros((len(validList), 3))
		validInputY = np.zeros((len(validList)))
		for i, name in enumerate(nameList):
			# score = Model._MaxScore(self, name)
			score, secScore = self._2MaxScore(name)
			nCdds = np.sqrt(len(self.dataSet[name]['candidates']))
			# caseX = self._Expand(score, secScore)
			# caseX = self._ExpandOne(score)			
			caseX = self._ExpandThree(score, secScore, nCdds)
			inputX[i] = caseX
			inputY[i] = self.dataSet[name]['cancer']
		
		for i, name in enumerate(validList):
			# score = Model._MaxScore(self, name)
			score, secScore = self._2MaxScore(name)
			nCdds = len(self.dataSet[name]['candidates'])
			# caseX = self._Expand(score, secScore)
			# caseX = self._ExpandOne(score)			
			caseX = self._ExpandThree(score, secScore, nCdds)
			validInputX[i] = caseX
			validInputY[i] = self.dataSet[name]['cancer']

		self.model.fit(inputX, inputY, validation_data=(validInputX, validInputY),batch_size=len(nameList), nb_epoch=4000, verbose=1, shuffle=True)

	def fitWithoutValid(self, nameList):
		Model.statsTrain(self, nameList)
		inputX = np.zeros((len(nameList), 3))
		inputY = np.zeros((len(nameList)))

		for i, name in enumerate(nameList):
			# score = Model._MaxScore(self, name)
			score, secScore = self._2MaxScore(name)
			nCdds = len(self.dataSet[name]['candidates'])
			# caseX = self._Expand(score, secScore)
			# caseX = self._ExpandOne(score)			
			caseX = self._ExpandThree(score, secScore, nCdds)
			inputX[i] = caseX
			inputY[i] = self.dataSet[name]['cancer']

		self.model.fit(inputX, inputY, batch_size=len(nameList), nb_epoch=4000, verbose=1, shuffle=True)


	def predictAtScore(self, score, secScore, nCdds):
		# if zero return this prob is better
		if score < Eplison:
			return self.zeroProb
		# if score > :
		# 	return self.upperBound
		# inputX = self._Expand(score, secScore)
		inputX = self._ExpandOne(score)
		inputX = self._ExpandThree(score, secScore, nCdds)
		pMal = self.model.predict(inputX)[0][0]
		# return pMal
		return min(max(pMal, self.lowerBound), self.upperBound)

	def predictAtName(self, name):
		score, secScore = self._2MaxScore(name)
		nCdds = min(len(self.dataSet[name]['candidates']), 10)
		if score == 0:
			return self.zeroProb
		return self.predictAtScore(score, secScore, nCdds)

class NaiveBayes(Model):
	def __init__(self, dataSet=None):
		Model.__init__(self, dataSet)
	def fit(self, nameList):
		Model.statsTrain(self, nameList)

	def _GetProb(self, score, scoreList):
		nAll = len(scoreList)
		n = 0
		mid = 0.06
		for s in scoreList:
			if s < (score + mid) and s > (score - mid):
				n += 1
		if n == 0:
			n = 1
		return n / nAll

	def predictAtScore(self, score):
		pScoreMal = self._GetProb(score, self.malScoreList)
		pScoreBen = self._GetProb(score, self.benScoreList)
		pMal = pScoreMal*(self.nMal/self.nAll)
		pBen = pScoreBen*(self.nBen/self.nAll)
		pMal = pMal / (pMal + pBen)
		return min(max(pMal, self.lowerBound), self.upperBound)


	def predictAtName(self, name):
		score = Model._MaxScore(self, name)
		score = self.predictAtScore(score)
		return min(max(score, self.lowerBound), self.upperBound)
	
class NoduleLogistic(Model):
	# Total:0.405670, train:0.40337, test:0.41992
	def __init__(self, dataSet=None):
		super(NoduleLogistic, self).__init__(dataSet)
		self.nPoly = 1
		self.nFeature = self.nPoly + 0

	def _2MaxScore(self, name):
		cdds = self.dataSet[name]['candidates']
		score  = 0
		fstProb = 0
		secScore = 0
		secProb = 0
		for cdd in cdds:
			assert cdd['prob'] >= 0.1
			if score < cdd['mal_prob']:
				score = cdd['mal_prob']
				fstProb = cdd['prob']
			else:
				if secScore < cdd['mal_prob']:
					secScore = cdd['mal_prob']
					secProb = cdd['prob']

		return score, secScore, fstProb, secProb


	def _Expand(self, score, secScore, fstProb, secProb, nCdds):
		a = np.zeros((1, self.nFeature*4+3))
		a[0][0] = score
		a[0][self.nFeature] = secScore
		a[0][self.nFeature*2] = fstProb
		a[0][self.nFeature*3] = secProb
		a[0][self.nFeature*4] = np.sqrt(nCdds)
		a[0][self.nFeature*4+1] = fstProb+score
		a[0][self.nFeature*4+2] = secProb+secScore


		for i in range(1, self.nPoly):
			a[0][i] = a[0][i-1]*score
			a[0][i+self.nFeature] = a[0][i+self.nFeature-1]*secScore
			a[0][i+self.nFeature*2] = a[0][i+self.nFeature-1]*fstProb
			a[0][i+self.nFeature*3] = a[0][i+self.nFeature-1]*secProb

		# a[0][self.nPoly] = np.log(score + 1)
		# a[0][self.nPoly+1] = np.sqrt(score)
		# a[0][self.nPoly+2] = np.exp(score+1)
		# a[0][self.nPoly+3] = 1/(score+1)

		# a[0][self.nPoly+self.nFeature] = np.log(secScore+1)
		# a[0][self.nPoly+1+self.nFeature] = np.sqrt(secScore)
		# a[0][self.nPoly+2+self.nFeature] = np.exp(secScore+1)
		# a[0][self.nPoly+3+self.nFeature] = 1/(secScore+1)

		# a[0][self.nPoly+self.nFeature*2] = np.log(fstProb+1)
		# a[0][self.nPoly+1+self.nFeature*2] = np.sqrt(fstProb)
		# a[0][self.nPoly+2+self.nFeature*2] = np.exp(fstProb+1)
		# a[0][self.nPoly+3+self.nFeature*2] = 1/(fstProb+1)

		# a[0][self.nPoly+self.nFeature*3] = np.log(secProb+1)
		# a[0][self.nPoly+1+self.nFeature*3] = np.sqrt(secProb)
		# a[0][self.nPoly+2+self.nFeature*3] = np.exp(secProb+1)
		# a[0][self.nPoly+3+self.nFeature*3] = 1/(secProb+1)

		return a

	def fit(self, nameList):

		Model.statsTrain(self, nameList)
		
		inputX = np.zeros((len(nameList), 4*self.nFeature+3))
		inputY = np.zeros((len(nameList)))
		for i, name in enumerate(nameList):
			score, secScore, fstProb, secProb = self._2MaxScore(name)
			nCdds = len(self.dataSet[name]['candidates'])
			caseX = self._Expand(score, secScore, fstProb, secProb, nCdds)
			inputX[i] = caseX
			inputY[i] = self.dataSet[name]['cancer']
		self.model = LogisticRegression(C=100, max_iter=1000)
		self.model.fit(inputX, inputY)
		# C = self.model.coef_
		# print(C)
	


	def predictAtScore(self, score, secScore, fstProb, secProb, nCdds):
		if score < Eplison:
			return self.zeroProb
		inputX = self._Expand(score, secScore, fstProb, secProb, nCdds)
		pMal = self.model.predict_proba(inputX)[0][1]
		return min(max(pMal, self.lowerBound), self.upperBound)

	def predictAtName(self, name):
		score, secScore, fstProb, secProb = self._2MaxScore(name)
		if score == 0:
			return self.zeroProb
		nCdds = len(self.dataSet[name]['candidates'])
		return self.predictAtScore(score, secScore, fstProb, secProb, nCdds)



