import json
import sys
import numpy as np
import matplotlib.pyplot as plt
import collections
import ast
import pickle
import multiprocessing
import os
from math import log
from math import exp
from heapq import nlargest
from time import sleep
from time import time, ctime
from itertools import groupby
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsOneClassifier
from sklearn import datasets
from sklearn.externals import joblib

from util import *
	
def kbeam(trainThres,data,labels, CH, PH, getResultM):
	printLog('kbeam'+str(getResultM))
	CLF_name = 'ML_NB'
	K = 3
	printLog('K='+str(K))

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	time0 = time()

	tested = dict()
	predicted = [[[0]] for i in range(len(data))]
	predictedNext = [[] for i in range(len(data))]
	prob = [[0] for i in range(len(data))]
	probNext = [[] for i in range(len(data))]

	dataDict = dict()

	dataDict[0]=[]
	for i in range(len(data)):
		dataDict[0].append((i, 0))	#(index, index in predicted[i])

	parents = [0]
	cnt = 0

	seenLayer = 0
	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH): # and (p not in tested): # and (seenLayer < 10): # (p not in tested): # and (p not in LabelDict[4]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				# if not os.path.isfile(path+str(p)+'.pkl'):
				# 	continue

				subData = []
				subIdx = []
				subProb = []
				subPathIdx = []
				for i, j in dataDict.get(p, []):
					subIdx.append(i)
					subData.append(data[i])
					subProb.append(prob[i][j])
					subPathIdx.append(j)
				dataDict[p]=[]

				if len(subIdx) == 0:
					continue

				# #print 'size =',len(subIdx)

				# if p not in text_clf:
				# 	text_clf[p] = joblib.load(path+str(p)+'.pkl')
				# resultM = text_clf[p].predict_log_proba(subData)
				# indexes = range(len(resultM[0]))

				# #WHY?
				# if len(text_clf[p].steps[-1][-1].classes_) != len(resultM[0]):
				# 	indexes = indexes[:len(text_clf[p].steps[-1][-1].classes_)]

				tmpTime = time()
				classes, resultM = getResultM(p, path, CH, PH, text_clf, subData)
				if type(classes) is int:
					continue

				indexes = range(len(classes))

				for i in range(len(resultM)):
					for l in indexes:
						predictedNext[subIdx[i]].append(predicted[subIdx[i]][subPathIdx[i]] + [classes[l]])
						probNext[subIdx[i]].append(resultM[i][l]+subProb[i])
				tmpTime=time()-tmpTime
				analysisLog('#'+str(cnt)+' time ='+str(round((time()-time0),1))+' ('+str(p)+')'+' size = '+str(len(subIdx))+' IT = '+str(tmpTime))

		parents = children
		for i in range(len(probNext)):
			if len(probNext[i]) != 0:
				indexes = range(len(probNext[i]))
				largestKIdx = nlargest(K, indexes, key=lambda j: probNext[i][j])
				prob[i] = []
				predicted[i] = []
				for l in largestKIdx:
					pred = predictedNext[i][l][-1]
					try:
						dataDict[pred].append((i, len(predicted[i])))
					except:
						dataDict[pred]=[(i, len(predicted[i]))]
					prob[i].append(probNext[i][l])
					predicted[i].append(predictedNext[i][l])
				predictedNext[i] = []
				probNext[i] = []


	printLog( 'Testing is Done in '+ str(round(time()-time0)))

	for i in range(len(prob)):
		idx = prob[i].index(max(prob[i]))
		predicted[i] = predicted[i][idx]

	correct = [0] * 10
	printLog('L1 to L9 in %')
	for i in range(len(predicted)):
		ret = NumPathMatched(predicted[i][1:],labels[i])
		for l in range(0,ret+1):
			correct[l]+=1

	for i in range(1,10):
		printLog(str(round(float(correct[i])/correct[0]*100, 2)))
	printLog('--------------')

def KbeamV4(trainThres,data,labels, CH, PH):
	printLog(logFile,'KbeamV4')
	CLF_name = 'ML_NB'
	K = 3
	printLog(logFile,'K='+str(K))
	
	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(logFile,path)

	global text_clf
	text_clf = dict()
	
	##################TEST SET###################

	time0 = time()
	
	tested = dict()
	predicted = [[[0]] for i in range(len(data))]
	predictedNext = [[] for i in range(len(data))]
	prob = [[0] for i in range(len(data))]
	probNext = [[] for i in range(len(data))]

	subdata = dict()
	subdata[0]=[]
	for i in range(len(data)):
		subdata[0].append((i, 0))

	parents = [0]
	cnt = 0

	seenLayer = 0
	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH): # and (p not in tested): # and (seenLayer < 10): # (p not in tested): # and (p not in LabelDict[4]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				subData = []
				subIdx = []
				subProb = []
				for i, j in subdata.get(p, []):
					subIdx.append(i)
					subData.append(data[i])
					subProb.append(prob[i][j])
				subdata[p]=[]
				if len(subIdx) == 0:
					continue

				if not os.path.isfile(path+str(p)+'.pkl'):
					continue

				subdataCount = len(subIdx)
				if p not in text_clf:
					text_clf[p] = joblib.load(path+str(p)+'.pkl')
				tmpTime = time()
				resultM = text_clf[p].predict_log_proba(subData)
				indexes = range(len(resultM[0]))

				#WHY?
				if len(text_clf[p].steps[-1][-1].classes_) != len(resultM[0]):
					indexes = indexes[:len(text_clf[p].steps[-1][-1].classes_)]

				for i in range(len(resultM)):
					for l in indexes:
						for j in predicted[subIdx[i]]:
							predictedNext[subIdx[i]].append(j + [text_clf[p].steps[-1][-1].classes_[l]])
							probNext[subIdx[i]].append(resultM[i][l]+subProb[i])
				tmpTime=time()-tmpTime
				# analysisLog('Node '+str(p)+', Size: '+str(subdataCount)+', timeCost: '+str(tmpTime))
				
		parents = children
		for i in range(len(probNext)):
			if len(probNext[i]) != 0:
				indexes = range(len(probNext[i]))
				largestKIdx = nlargest(K, indexes, key=lambda j: probNext[i][j])
				prob[i] = []
				predicted[i] = []
				for l in largestKIdx:
					tmpp = predictedNext[i][l][-1]
					try:
						subdata[tmpp].append((i, len(predicted[i])))
					except:
						subdata[tmpp]=[(i, len(predicted[i]))]
					prob[i].append(probNext[i][l])
					predicted[i].append(predictedNext[i][l])
				predictedNext[i] = []
				probNext[i] = []

	
	printLog(logFile, 'Testing is Done in '+ str(round(time()-time0)))

	for i in range(len(prob)):
		idx = prob[i].index(max(prob[i]))
		predicted[i] = predicted[i][idx]

	correct = [0] * 10
	printLog(logFile,'L1 to L9 in %')
	for i in range(len(predicted)):
		ret = NumPathMatched(predicted[i][1:],labels[i])
		for l in range(0,ret+1):
			correct[l]+=1
		
	for i in range(1,10):
		printLog(logFile,str(round(float(correct[i])/correct[0]*100, 2)))
	printLog(logFile,'--------------')

def KbeamV5(trainThres,data,labels, CH, PH):
	printLog('KbeamV4')
	CLF_name = 'ML_NB'
	K = 3
	printLog('K='+str(K))

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	##################TEST SET###################

	time0 = time()

	tested = dict()
	predicted = [[[0]] for i in range(len(data))]
	predictedNext = [[] for i in range(len(data))]
	prob = [[0] for i in range(len(data))]
	probNext = [[] for i in range(len(data))]

	dataDict = dict()

	dataDict[0]=[]
	for i in range(len(data)):
		dataDict[0].append((i, 0))	#(index, index in predicted[i])

	parents = [0]
	cnt = 0

	seenLayer = 0
	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH): # and (p not in tested): # and (seenLayer < 10): # (p not in tested): # and (p not in LabelDict[4]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				if not os.path.isfile(path+str(p)+'.pkl'):
					continue

				subData = []
				subIdx = []
				subProb = []
				subPath = []
				subPathIdx = []
				for i, j in dataDict.get(p, []):
					subIdx.append(i)
					subData.append(data[i])
					subProb.append(prob[i][j])
					subPathIdx.append(j)
				dataDict[p]=[]

				if len(subIdx) == 0:
					continue

				#print 'size =',len(subIdx)

				if p not in text_clf:
					text_clf[p] = joblib.load(path+str(p)+'.pkl')
				tmpTime = time()
				resultM = text_clf[p].predict_log_proba(subData)
				indexes = range(len(resultM[0]))

				#WHY?
				if len(text_clf[p].steps[-1][-1].classes_) != len(resultM[0]):
					indexes = indexes[:len(text_clf[p].steps[-1][-1].classes_)]

				for i in range(len(resultM)):
					for l in indexes:
						predictedNext[subIdx[i]].append(predicted[subIdx[i]][subPathIdx[i]] + [text_clf[p].steps[-1][-1].classes_[l]])
						probNext[subIdx[i]].append(resultM[i][l]+subProb[i])
				tmpTime=time()-tmpTime
				analysisLog('#'+str(cnt)+' time ='+str(round((time()-time0),1))+' ('+str(p)+')'+' size = '+str(len(subIdx))+' IT = '+str(tmpTime))

		parents = children
		for i in range(len(probNext)):
			if len(probNext[i]) != 0:
				indexes = range(len(probNext[i]))
				largestKIdx = nlargest(K, indexes, key=lambda j: probNext[i][j])
				prob[i] = []
				predicted[i] = []
				for l in largestKIdx:
					pred = predictedNext[i][l][-1]
					try:
						dataDict[pred].append((i, len(predicted[i])))
					except:
						dataDict[pred]=[(i, len(predicted[i]))]
					prob[i].append(probNext[i][l])
					predicted[i].append(predictedNext[i][l])
				predictedNext[i] = []
				probNext[i] = []


	printLog( 'Testing is Done in '+ str(round(time()-time0)))

	for i in range(len(prob)):
		idx = prob[i].index(max(prob[i]))
		predicted[i] = predicted[i][idx]

	correct = [0] * 10
	printLog('L1 to L9 in %')
	for i in range(len(predicted)):
		ret = NumPathMatched(predicted[i][1:],labels[i])
		for l in range(0,ret+1):
			correct[l]+=1

	for i in range(1,10):
		printLog(str(round(float(correct[i])/correct[0]*100, 2)))
	printLog('--------------')


def KbeamBinary(trainThres,data,labels, CH, PH):
	printLog(logFile, 'KbeamBinary')
	CLF_name = 'BL_NB'
	K = 5
	printLog(logFile, 'K='+str(K))

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(logFile, path)

	global text_clf
	text_clf = dict()

	##################TEST SET###################

	time0 = time()

	tested = dict()
	predicted = [[[0]] for i in range(len(data))]
	predictedNext = [[] for i in range(len(data))]
	prob = [[0] for i in range(len(data))]
	probNext = [[] for i in range(len(data))]

	dataDict = dict()

	dataDict[0]=[]
	for i in range(len(data)):
		dataDict[0].append((i, 0))	#(index, index in predicted[i])

	parents = [0]
	cnt = 0

	seenLayer = 0
	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH): # and (p not in tested): # and (seenLayer < 10): # (p not in tested): # and (p not in LabelDict[4]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				subData = []
				subIdx = []
				subProb = []
				subPath = []
				subPathIdx = []
				for i, j in dataDict.get(p, []):
					subIdx.append(i)
					subData.append(data[i])
					subProb.append(prob[i][j])
					subPathIdx.append(j)
				dataDict[p]=[]

				if len(subIdx) == 0:
					continue

				#print 'size =',len(subIdx)

				# if p not in text_clf:
				# 	text_clf[p] = joblib.load(path+str(p)+'.pkl')
				tmpTime = time()
				# classes = [children, [0.0]*len(children)]
				# resultM = text_clf[p].predict_log_proba(subData)
				# indexes = range(len(resultM[0]))
				resultM = [[0.0]*len(children)]*len(subIdx) # each data has a array of probabilites for each class
				indexes = range(len(resultM[0]))

				for j in range(len(children)): # process each child node
					currentNode = children[j]
					if currentNode not in text_clf:
						try:
							text_clf[currentNode] = joblib.load(path+str(currentNode)+'.pkl')
						except:
							text_clf[currentNode] =  None

					if text_clf[currentNode] == None: # check node classifier exists or not
						tmpProb=0.0
					else:
						tmpProb = text_clf[currentNode].predict_log_proba(subData) # classifiy all data
						tmpClas = text_clf[currentNode].steps[-1][-1].classes_
						if tmpClas[0]==1:
							classIdx = 0
						else:
							classIdx = 1

						for dataIdx in range(len(subIdx)): # store result in resultM
							dataProb = tmpProb[dataIdx]
							if len(dataProb)!=2:
								print 'weird!', dataProb
								input('Any issue?')
							resultM[dataIdx][j] = tmpProb[dataIdx][classIdx]

				for i in range(len(resultM)):
					for l in indexes:
						predictedNext[subIdx[i]].append(predicted[subIdx[i]][subPathIdx[i]] + [children[l]])
						probNext[subIdx[i]].append(resultM[i][j]+subProb[i])
						# probNext[subIdx[i]].append(resultM[i][j])

				# for i in range(len(resultM)):
				# 	for l in indexes:
				# 		predictedNext[subIdx[i]].append(predicted[subIdx[i]][subPathIdx[i]] + [text_clf[p].steps[-1][-1].classes_[l]])
				# 		probNext[subIdx[i]].append(resultM[i][l]+subProb[i])
				tmpTime=time()-tmpTime
				analysisLog('#'+str(cnt)+' time ='+str(round((time()-time0),1))+' ('+str(p)+')'+' size = '+str(len(subIdx))+' IT = '+str(tmpTime))

		parents = children
		for i in range(len(probNext)):
			if len(probNext[i]) != 0:
				indexes = range(len(probNext[i]))
				largestKIdx = nlargest(K, indexes, key=lambda j: probNext[i][j])
				prob[i] = []
				predicted[i] = []
				for l in largestKIdx:
					pred = predictedNext[i][l][-1]
					try:
						dataDict[pred].append((i, len(predicted[i])))
					except:
						dataDict[pred]=[(i, len(predicted[i]))]
					prob[i].append(probNext[i][l])
					predicted[i].append(predictedNext[i][l])
				predictedNext[i] = []
				probNext[i] = []


	printLog(logFile,  'Testing is Done in '+ str(round(time()-time0)))

	for i in range(len(prob)):
		idx = prob[i].index(max(prob[i]))
		predicted[i] = predicted[i][idx]

	resultLog(predicted, labels)