import numpy as np
import pandas as pd
import os
import pickle
import h5py
import random
import matplotlib.pyplot as plt
import sys
from config import *
from utils import *
from model import Logistic, DoubleLogistic, Linear, SimpleLogistic, NN, NaiveBayes, NoduleLogistic


#########################
# Length of trainsetList and testSetList must match.

WDModelDir = '../../data/tmp_classification_branch1/results/'
JYModelDir = '../../data/tmp_classification_branch2/result(baseline)/'


trainsetNameList = [
				WDModelDir + 'vgg_shortcut_cddv15_v2_without_test_epoch-0099/vgg_shortcut_cddv15_v2_without_test_fold',
				WDModelDir + 'vgg_shortcut_cddv15_v2_without_test_epoch-0099/vgg_shortcut_cddv15_v2_without_test_fold',
				WDModelDir + 'vgg_shortcut_cddv9_without_test_epoch-0099/vgg_shortcut_cddv9_without_test_fold',
				WDModelDir + 'vgg_shortcut_cddv9_without_test_epoch-0099/vgg_shortcut_cddv9_without_test_fold',
				
				JYModelDir + 'v15_without_test_90/vgg13_v15_cv_fold',
				JYModelDir + 'v15_without_test_97/vgg13_v15_cv_fold',
				JYModelDir + 'v15_without_test_99/vgg13_v15_cv_fold',
				JYModelDir + 'v9_without_test_120/vgg13_v9_cv_fold',

				WDModelDir + 'vgg_shortcut_cddv15_v2_with_test_epoch-0099/vgg_shortcut_cddv15_v2_with_test_fold',
				WDModelDir + 'vgg_shortcut_cddv15_v2_with_test_epoch-0099/vgg_shortcut_cddv15_v2_with_test_fold',
				WDModelDir + 'vgg_shortcut_cddv9_with_test_epoch-0099/vgg_shortcut_cddv9_with_test_fold',
				WDModelDir + 'vgg_shortcut_cddv9_with_test_epoch-0099/vgg_shortcut_cddv9_with_test_fold',
				

				JYModelDir + 'v15_test_90/vgg13_v15_cv_fold',
				JYModelDir + 'v15_test_100/vgg13_v15_cv_fold',
				JYModelDir + 'v9_test_120/vgg13_v9_cv_fold',
				
				]

def trainAllModel(trainsetData):

	#######################################################
	# Loading training data
	for datasetName in trainsetNameList:
		print(datasetName)
		dataset = dict()
		dataset['name'] = datasetName
		cddsList = []
		for iFold in range(nFold):
			fName = datasetName + str(iFold) + '.pkl'
			f = open(fName, 'rb')
			predCdds = pickle.load(f)
			f.close()
			cddsList.extend(predCdds)
		if datasetName.find('without') != -1:	
			print('Without testset')
			dataset['predScan'] = cdds2scan(cddsList, mode='train')
		else:
			print('With testset')
			dataset['predScan'] = cdds2scan(cddsList, mode='all')
		trainsetData.append(dataset)


	if checkScanConsist:
		CheckConsist(trainsetData)

	# print(trainsetData[0])
	######################################################
	# Variables
	trainScanList = list(trainsetData[0]['predScan'].keys())
	losses = []
	testLoss = []
	trainLoss = []
	testPred = []
	pred = []
	#########################################################################################
	# Train and cross validate the model
	# nFold=5
	for iFold in range(nMyFold):
		trainSet, testSet = DivideData(trainScanList, iFold=iFold, nFold=nMyFold)
		# Train model seprately
		for dataset in trainsetData:
			if modelType == 'Logistic':
				model = Logistic(dataset['predScan'])
				allmodel = Logistic(dataset['predScan'])
			elif modelType == 'NaiveBayes':
				model = NaiveBayes(dataset['predScan'])
				allmodel = NaiveBayes(dataset['predScan'])
			elif modelType == 'NN':
				model = NN(dataset['predScan'])
				allmodel = NN(dataset['predScan'])
			elif modelType == 'DoubleLogistic':
				model = DoubleLogistic(dataset['predScan'])
				allmodel = DoubleLogistic(dataset['predScan'])
			elif modelType == 'Linear':
				model = Linear(dataset['predScan'])
				allmodel = Linear(dataset['predScan'])
			elif modelType == 'SimpleLogistic':
				model = SimpleLogistic(dataset['predScan'])
				allmodel = SimpleLogistic(dataset['predScan'])
			elif modelType == 'NoduleLogistic':
				model = NoduleLogistic(dataset['predScan'])
				allmodel = NoduleLogistic(dataset['predScan'])
			else:
				raise NameError('Unknown model type')
			if modelType != 'NN':
				model.fit(trainSet)
			else:
				model.fit(trainSet, testSet)
			if verboseModel:
				model.check()
			dataset['model'] = model
			#########################################################
			# train the model on the whole dataset.
			if iFold == (nFold-1):
				if modelType == 'NN':
					allmodel.fitWithoutValid(trainScanList)
				else:
					allmodel.fit(trainScanList)
				dataset['allmodel'] = allmodel
		
		# Only Doing cross validation here
		predScan = trainsetData[0]['predScan']
		
		for scanName in testSet:
			scoreList = []
			for dataset in trainsetData:
				score = dataset['model'].predictAtName(scanName)
				scoreList.append(score)

			s = np.sum(scoreList)
			s = s - np.max(scoreList) - np.min(scoreList)
			score = s / (len(scoreList) - 2)
			# score = np.mean(scoreList)
			scan = predScan[scanName]
			
			pred.append(score)
			loss = scan['cancer']*np.log(score) + (1-scan['cancer'])*np.log(1-score)
			losses.append(-loss)
			# assert predScan[scanName]['source'] == 'kaggle_train'
			trainLoss.append(-loss)
		
		loss = np.mean(losses)
		print("Your score is {}".format(loss))	

	f = open('trainsetData1.pkl', 'wb')
	pickle.dump(trainsetData, f)
	f.close()
	f = open('trainsetData2.pkl', 'wb')
	pickle.dump(trainsetData, f)
	f.close()



if __name__ == '__main__':
	print(modelType)
	trainsetData = []
	trainAllModel(trainsetData)

	