#!/usr/bin/python

import json
import sys
import numpy as np
import matplotlib.pyplot as plt
import collections
import ast
import pickle
import multiprocessing
import os
from math import log
from math import exp
from heapq import nlargest
from time import sleep
from time import time, ctime
from itertools import groupby
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB, GaussianNB
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsOneClassifier
from sklearn import datasets
from sklearn.externals import joblib

from score_methods import *

validLabels = None
StatFile = None
CMFile = None
LabelDict = None
CH=None
PH=None
text_clf=None
lock = multiprocessing.Lock()
manager = multiprocessing.Manager()
#text_clf = manager.dict()

logFile = open('log.txt','a')
debugFile = open('debug.txt', 'a')

def printLog(s):
	print s
	print >>logFile, s
	logFile.flush()

def printDebug(s):
	print s
	print >>debugFile, s
	debugFile.flush()

def resultLog(predicted, labels):
	correct = [0] * 10
	printLog('L1 to L9 in %')
	for i in range(len(predicted)):
		ret = NumPathMatched(predicted[i][1:],labels[i])
		for l in range(0,ret+1):
			correct[l]+=1

	for i in range(1,10):
		printLog(str(round(float(correct[i])/correct[0]*100, 2)))
	printLog('--------------')

def main():
	init()
	#trainThres: 1 = 33706,2 = 101118,3 = 303354

	#UTILITY
	#CheckDupBFS()

	#TRAIN ML_NB
	#run(1,'PER_ascii_stem_stopword','Label_Path.pkl',ML_NB)
	#run(3,'PER_ascii_stem_stopword','Label_Path.pkl',ML_NB)
	#run(9,'PER_ascii_stem_stopword','Label_Path.pkl',ML_NB)
	# run(1,'PER_ascii_stem_stopword','Label_Path.pkl',BL_NB, message, test)



	#GREEDY
	#run(1,'PER_ascii_stem_stopword','Label_Path.pkl', GreedyWrapper)
	#run(3,'PER_ascii_stem_stopword','Label_Path.pkl', GreedyWrapper)
	#run(9,'PER_ascii_stem_stopword','Label_Path.pkl', GreedyWrapper)

	#KBEAM
	#run(9,'PER_ascii_stem_stopword','Label_Path.pkl', KbeamV4)
	message='nothing specific'
	test = False

	runAdv(1, 'PER_ascii_stem_stopword', 'Label_Path.pkl', KbeamWrapperAdv, kbeam, message, 3, getResultM, test)


# def runAdv(trainThres,data,label,model, method, message, K, test=False):



def CheckDupDFS():
	seen = dict()
	parents = [0]
	while len(parents)!= 0:
		p=parents[0]
		parents=parents[1:]
		if p in seen:
			print p,"DUP!"
			continue
		seen[p] = True

		if p in CH:
			parents = CH[p] + parents

def CheckDupBFS():
	seen = dict()
	parents = [0]
	while len(parents)!= 0:
		children = []
		for p in parents:
			if p in seen:
				print p,"DUP!"
				continue
			seen[p] = True

			if p in CH:
				children += CH[p]
		parents = children

def GreedyWrapper(trainThres,trainData,trainLabels,testData,testLabels):
	printLog('Test Set:')
	#GreedyV3(trainThres,testData[1:1000],testLabels[1:1000])
	GreedyV3(trainThres,testData,testLabels)
	printLog('Train Set:')
	GreedyV3(trainThres,trainData,trainLabels)

def KbeamWrapperAdv(trainThres,trainData,trainLabels,testData,testLabels, method, K, getResultM):
	printLog('Test Set:')
	kbeam(trainThres,testData,testLabels, CH, PH, getResultM, K)
	printLog('Train Set:')
	kbeam(trainThres,trainData,trainLabels, CH, PH, getResultM, K)

def KbeamWrapper(trainThres,trainData,trainLabels,testData,testLabels):
	printLog('Test Set:')
	#KbeamV4(trainThres,testData[1:1000],testLabels[1:1000])
	kbeam(trainThres,testData,testLabels)
	printLog('Train Set:')
	kbeam(trainThres,trainData,trainLabels)

def kbeam(trainThres,data,labels, CH, PH, getResultM, K=3):
	printLog('kbeam'+str(getResultM))
	CLF_name = 'ML_NB'
	# K = 3
	printLog('K='+str(K))

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	time0 = time()

	tested = dict()
	predicted = [[[0]] for i in range(len(data))]
	predictedNext = [[] for i in range(len(data))]
	prob = [[0] for i in range(len(data))]
	probNext = [[] for i in range(len(data))]

	dataDict = dict()

	dataDict[0]=[]
	for i in range(len(data)):
		dataDict[0].append((i, 0))	#(index, index in predicted[i])

	parents = [0]
	cnt = 0

	seenLayer = 0
	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH): # and (p not in tested): # and (seenLayer < 10): # (p not in tested): # and (p not in LabelDict[4]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				# if not os.path.isfile(path+str(p)+'.pkl'):
				# 	continue

				subData = []
				subIdx = []
				subProb = []
				subPathIdx = []
				for i, j in dataDict.get(p, []):
					subIdx.append(i)
					subData.append(data[i])
					subProb.append(prob[i][j])
					subPathIdx.append(j)
				dataDict[p]=[]

				if len(subIdx) == 0:
					continue

				# #print 'size =',len(subIdx)

				# if p not in text_clf:
				# 	text_clf[p] = joblib.load(path+str(p)+'.pkl')
				# resultM = text_clf[p].predict_log_proba(subData)
				# indexes = range(len(resultM[0]))

				# #WHY?
				# if len(text_clf[p].steps[-1][-1].classes_) != len(resultM[0]):
				# 	indexes = indexes[:len(text_clf[p].steps[-1][-1].classes_)]

				tmpTime = time()
				classes, resultM = getResultM(p, path, CH, PH, text_clf, subData)
				if type(classes) is int:
					continue

				indexes = range(len(classes))

				for i in range(len(resultM)):
					for l in indexes:
						predictedNext[subIdx[i]].append(predicted[subIdx[i]][subPathIdx[i]] + [classes[l]])
						probNext[subIdx[i]].append(resultM[i][l]+subProb[i])
				tmpTime=time()-tmpTime
				printDebug('#'+str(cnt)+' time ='+str(round((time()-time0),1))+' ('+str(p)+')'+' size = '+str(len(subIdx))+' IT = '+str(tmpTime))

		parents = children
		for i in range(len(probNext)):
			if len(probNext[i]) != 0:
				indexes = range(len(probNext[i]))
				largestKIdx = nlargest(K, indexes, key=lambda j: probNext[i][j])
				prob[i] = []
				predicted[i] = []
				for l in largestKIdx:
					pred = predictedNext[i][l][-1]
					try:
						dataDict[pred].append((i, len(predicted[i])))
					except:
						dataDict[pred]=[(i, len(predicted[i]))]
					prob[i].append(probNext[i][l])
					predicted[i].append(predictedNext[i][l])
				predictedNext[i] = []
				probNext[i] = []


	printLog( 'Testing is Done in '+ str(round(time()-time0)))

	for i in range(len(prob)):
		idx = prob[i].index(max(prob[i]))
		predicted[i] = predicted[i][idx]

	correct = [0] * 10
	printLog('L1 to L9 in %')
	for i in range(len(predicted)):
		ret = NumPathMatched(predicted[i][1:],labels[i])
		for l in range(0,ret+1):
			correct[l]+=1

	for i in range(1,10):
		printLog(str(round(float(correct[i])/correct[0]*100, 2)))
	printLog('--------------')


def KbeamBinaryCascade(trainThres,data,labels, K=3):
	printLog('KbeamBinaryCascade')
	CLF_name = 'BL_NB'
	# K = 3
	printLog('K='+str(K))

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	##################TEST SET###################

	time0 = time()

	tested = dict()
	predicted = [[[0]] for i in range(len(data))]
	predictedNext = [[] for i in range(len(data))]
	prob = [[0] for i in range(len(data))]
	probNext = [[] for i in range(len(data))]

	dataDict = dict()

	dataDict[0]=[]
	for i in range(len(data)):
		dataDict[0].append((i, 0))	#(index, index in predicted[i])

	parents = [0]
	cnt = 0

	seenLayer = 0
	missingCLFCount = 0
	totalCLFCount = 0
	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH): # and (p not in tested): # and (seenLayer < 10): # (p not in tested): # and (p not in LabelDict[4]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				subData = []
				subIdx = []
				subProb = []
				subPath = []
				subPathIdx = []
				for i, j in dataDict.get(p, []):
					subIdx.append(i)
					subData.append(data[i])
					subProb.append(prob[i][j])
					subPathIdx.append(j)
				dataDict[p]=[]

				if len(subIdx) == 0:
					continue

				#print 'size =',len(subIdx)

				# if p not in text_clf:
				# 	text_clf[p] = joblib.load(path+str(p)+'.pkl')
				tmpTime = time()
				# classes = [children, [0.0]*len(children)]
				# resultM = text_clf[p].predict_log_proba(subData)
				# indexes = range(len(resultM[0]))
				resultM = [[0.0]*len(children)]*len(subIdx) # each data has a array of probabilites for each class
				indexes = range(len(resultM[0]))

				####################################################################
				## TODO: If time permits, replace this part as a function to achive more felxible 
				## 		 developement.
				##		 resultM = getResultM          (...)
				for j in range(len(children)): # process each child node
					currentNode = children[j]
					if currentNode not in text_clf:
						totalCLFCount += 1
						try:
							text_clf[currentNode] = joblib.load(path+str(currentNode)+'.pkl')
						except:
							text_clf[currentNode] =  None
							missingCLFCount += 1

					if text_clf[currentNode] == None: # check node classifier exists or not
						printDebug('CLF missing: '+str(currentNode)+' classifier missing!')
						continue
					else:
						tmpProb = text_clf[currentNode].predict_log_proba(subData) # classifiy all data
						tmpClas = text_clf[currentNode].steps[-1][-1].classes_
						if tmpClas[0]==1:
							classIdx = 0
						else:
							classIdx = 1

						for dataIdx in range(len(subIdx)): # store result in resultM
							dataProb = tmpProb[dataIdx]
							if len(dataProb)!=2:
								print 'weird!', dataProb
								input('Any issue?')
							resultM[dataIdx][j] = tmpProb[dataIdx][classIdx]
				######################################################################

				for i in range(len(resultM)):
					for l in indexes:
						predictedNext[subIdx[i]].append(predicted[subIdx[i]][subPathIdx[i]] + [children[l]])
						probNext[subIdx[i]].append(resultM[i][j]+subProb[i])
						# probNext[subIdx[i]].append(resultM[i][j])

				# for i in range(len(resultM)):
				# 	for l in indexes:
				# 		predictedNext[subIdx[i]].append(predicted[subIdx[i]][subPathIdx[i]] + [text_clf[p].steps[-1][-1].classes_[l]])
				# 		probNext[subIdx[i]].append(resultM[i][l]+subProb[i])
				tmpTime=time()-tmpTime
				printDebug('#'+str(cnt)+' time ='+str(round((time()-time0),1))+' ('+str(p)+')'+' size = '+str(len(subIdx))+' IT = '+str(tmpTime))

		parents = children
		for i in range(len(probNext)):
			if len(probNext[i]) != 0:
				indexes = range(len(probNext[i]))
				largestKIdx = nlargest(K, indexes, key=lambda j: probNext[i][j])
				prob[i] = []
				predicted[i] = []
				for l in largestKIdx:
					pred = predictedNext[i][l][-1]
					try:
						dataDict[pred].append((i, len(predicted[i])))
					except:
						dataDict[pred]=[(i, len(predicted[i]))]
					prob[i].append(probNext[i][l])
					predicted[i].append(predictedNext[i][l])
				predictedNext[i] = []
				probNext[i] = []

	printLog('Total clf number: '+str(totalCLFCount)+'. Missing clf number: '+str(missingCLFCount))
	printLog('Testing is Done in '+ str(round(time()-time0)))

	for i in range(len(prob)):
		idx = prob[i].index(max(prob[i]))
		predicted[i] = predicted[i][idx]

	resultLog(predicted, labels)


def KbeamBinary(trainThres,data,labels, K=3):
	printLog('KbeamBinary')
	CLF_name = 'BL_NB'
	# K = 3
	printLog('K='+str(K))

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	##################TEST SET###################

	time0 = time()

	tested = dict()
	predicted = [[[0]] for i in range(len(data))]
	predictedNext = [[] for i in range(len(data))]
	prob = [[0] for i in range(len(data))]
	probNext = [[] for i in range(len(data))]

	dataDict = dict()

	dataDict[0]=[]
	for i in range(len(data)):
		dataDict[0].append((i, 0))	#(index, index in predicted[i])

	parents = [0]
	cnt = 0

	seenLayer = 0
	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH): # and (p not in tested): # and (seenLayer < 10): # (p not in tested): # and (p not in LabelDict[4]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				subData = []
				subIdx = []
				subProb = []
				subPath = []
				subPathIdx = []
				for i, j in dataDict.get(p, []):
					subIdx.append(i)
					subData.append(data[i])
					subProb.append(prob[i][j])
					subPathIdx.append(j)
				dataDict[p]=[]

				if len(subIdx) == 0:
					continue

				#print 'size =',len(subIdx)

				# if p not in text_clf:
				# 	text_clf[p] = joblib.load(path+str(p)+'.pkl')
				tmpTime = time()
				# classes = [children, [0.0]*len(children)]
				# resultM = text_clf[p].predict_log_proba(subData)
				# indexes = range(len(resultM[0]))
				resultM = [[0.0]*len(children)]*len(subIdx) # each data has a array of probabilites for each class
				indexes = range(len(resultM[0]))

				####################################################################
				## TODO: If time permits, replace this part as a function to achive more felxible 
				## 		 developement.
				##		 resultM = getResultM          (...)
				for j in range(len(children)): # process each child node
					currentNode = children[j]
					if currentNode not in text_clf:
						try:
							text_clf[currentNode] = joblib.load(path+str(currentNode)+'.pkl')
						except:
							text_clf[currentNode] =  None

					if text_clf[currentNode] == None: # check node classifier exists or not
						tmpProb=0.0
					else:
						tmpProb = text_clf[currentNode].predict_log_proba(subData) # classifiy all data
						tmpClas = text_clf[currentNode].steps[-1][-1].classes_
						if tmpClas[0]==1:
							classIdx = 0
						else:
							classIdx = 1

						for dataIdx in range(len(subIdx)): # store result in resultM
							dataProb = tmpProb[dataIdx]
							if len(dataProb)!=2:
								print 'weird!', dataProb
								input('Any issue?')
							resultM[dataIdx][j] = tmpProb[dataIdx][classIdx]
				######################################################################

				for i in range(len(resultM)):
					for l in indexes:
						predictedNext[subIdx[i]].append(predicted[subIdx[i]][subPathIdx[i]] + [children[l]])
						# probNext[subIdx[i]].append(resultM[i][j]+subProb[i])
						probNext[subIdx[i]].append(resultM[i][j])

				# for i in range(len(resultM)):
				# 	for l in indexes:
				# 		predictedNext[subIdx[i]].append(predicted[subIdx[i]][subPathIdx[i]] + [text_clf[p].steps[-1][-1].classes_[l]])
				# 		probNext[subIdx[i]].append(resultM[i][l]+subProb[i])
				tmpTime=time()-tmpTime
				printDebug('#'+str(cnt)+' time ='+str(round((time()-time0),1))+' ('+str(p)+')'+' size = '+str(len(subIdx))+' IT = '+str(tmpTime))

		parents = children
		for i in range(len(probNext)):
			if len(probNext[i]) != 0:
				indexes = range(len(probNext[i]))
				largestKIdx = nlargest(K, indexes, key=lambda j: probNext[i][j])
				prob[i] = []
				predicted[i] = []
				for l in largestKIdx:
					pred = predictedNext[i][l][-1]
					try:
						dataDict[pred].append((i, len(predicted[i])))
					except:
						dataDict[pred]=[(i, len(predicted[i]))]
					prob[i].append(probNext[i][l])
					predicted[i].append(predictedNext[i][l])
				predictedNext[i] = []
				probNext[i] = []


	printLog('Testing is Done in '+ str(round(time()-time0)))

	for i in range(len(prob)):
		idx = prob[i].index(max(prob[i]))
		predicted[i] = predicted[i][idx]

	resultLog(predicted, labels)

def KbeamBad(trainThres,data,labels, K=3):

	printLog('KbeamBad')
	CLF_name = 'ML_NB'
	# K = 3
	printLog('K='+str(K))

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	##################TEST SET###################

	time0 = time()

	tested = dict()
	predicted = [[[0]] for i in range(len(data))]
	predictedNext = [[] for i in range(len(data))]
	prob = [[0] for i in range(len(data))]
	probNext = [[] for i in range(len(data))]


	parents = [0]
	cnt = 0

	seenLayer = 0
	tmpcnt = 0
	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH): # and (p not in tested): # and (seenLayer < 10): # (p not in tested): # and (p not in LabelDict[4]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				print 'Testing CLF',cnt,'(',p,')','size=','N/A','time =',(time()-time0)
				if not os.path.isfile(path+str(p)+'.pkl'):
					# printLog('miss'+str(p))
					continue
				tmpcnt+=1
				subData = []
				subIdx = []
				subProb = []
				for i in range(len(predicted)):
					for j in range(len(predicted[i])):
						#print predicted[i][j]
						if predicted[i][j][len(predicted[i][j])-1] == p:
							subData.append(data[i])
							subIdx.append(i)
							subProb.append(prob[i][j])

				if len(subIdx) == 0:
					continue

				if p not in text_clf:
					text_clf[p] = joblib.load(path+str(p)+'.pkl')

				resultM = text_clf[p].predict_log_proba(subData)
				indexes = range(len(resultM[0]))

				#WHY?
				if len(text_clf[p].steps[-1][-1].classes_) != len(resultM[0]):
					indexes = indexes[:len(text_clf[p].steps[-1][-1].classes_)]

				for i in range(len(resultM)):
					for l in indexes:
						for j in predicted[subIdx[i]]:
							predictedNext[subIdx[i]].append(j + [text_clf[p].steps[-1][-1].classes_[l]])
							probNext[subIdx[i]].append(subProb[i])

				print 'size =',len(resultM)

		parents = children
		for i in range(len(probNext)):
			if len(probNext[i]) != 0:
				indexes = range(len(probNext[i]))
				largestKIdx = nlargest(K, indexes, key=lambda j: probNext[i][j])
				for j in range(len(probNext[i])):
					print predictedNext[i][j],'---',probNext[i][j]
				prob[i] = []
				predicted[i] = []
				for l in largestKIdx:
					prob[i].append(probNext[i][l])
					predicted[i].append(predictedNext[i][l])
				predictedNext[i] = []
				probNext[i] = []
				print 'MAX'
				print prob[i]
				print predicted[i]
				# raw_input('ENTER')

	printLog( 'Testing is Done in '+ str(round(time()-time0)))

	for i in range(len(prob)):
		idx = prob[i].index(max(prob[i]))
		predicted[i] = predicted[i][idx]

	correct = [0] * 10
	printLog('L1 to L9 in %')
	for i in range(len(predicted)):
		print labels[i]
		print predicted[i]
		#print correct
		#raw_input('------Enter')
		ret = NumPathMatched(predicted[i][1:],labels[i])
		for l in range(0,ret+1):
			correct[l]+=1

	for i in range(1,10):
		printLog(str(round(float(correct[i])/correct[0]*100, 2)))
	printLog('--------------')
	printLog( str(tmpcnt))


def KbeamV5_thread(lock,p,dataDict,data,prob,probNext,predicted,predictedNext,path,text_clf):

	print 'New thread:','(',p,')'
	
	subData = []
	subIdx = []
	subProb = []
	subPath = []
	subPathIdx = []
	for i, j in dataDict.get(p, []):
		subIdx.append(i)
		subData.append(data[i])
		subProb.append(prob[i][j])
		subPathIdx.append(j)
	#dataDict[p]=[]
	print 'size =',len(subIdx)

	if len(subIdx) == 0:
		return

	if p not in text_clf:
		lock.acquire()
		text_clf[p] = joblib.load(path+str(p)+'.pkl')
		lock.release()
	tmpTime = time()
	resultM = text_clf[p].predict_log_proba(subData)
	indexes = range(len(resultM[0]))

	print 111

	#WHY?
	if len(text_clf[p].steps[-1][-1].classes_) != len(resultM[0]):
		indexes = indexes[:len(text_clf[p].steps[-1][-1].classes_)]

	print 222

	lock.acquire()
	for i in range(len(resultM)):
		print i
		for l in indexes:
			
			#print 'a'
			predictedNext[subIdx[i]].append(predicted[subIdx[i]][subPathIdx[i]] + [text_clf[p].steps[-1][-1].classes_[l]])
			#print 'b'
			probNext[subIdx[i]].append(resultM[i][l]+subProb[i])
			#print 'c'
	lock.release()

	print 333
	tmpTime=time()-tmpTime
	lock.acquire()
	printDebug('#'+str(cnt)+' time ='+str(round((time()-time0),1))+' ('+str(p)+')'+' size = '+str(len(subIdx))+' IT = '+str(tmpTime))
	lock.release()

	print 444

def KbeamV5(trainThres,data,labels):
	printLog('KbeamV5')
	CLF_name = 'ML_NB'
	K = 3
	printLog('K='+str(K))

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	lock = multiprocessing.Lock()

	data = manager.list(data)

	##################TEST SET###################

	time0 = time()

	tested = dict()
	predicted = manager.list([[[0]] for i in range(len(data))])
	predictedNext = manager.list([[] for i in range(len(data))])
	prob = manager.list([[0] for i in range(len(data))])
	probNext = manager.list([[] for i in range(len(data))])

	dataDict = manager.dict()

	l = []
	for i in range(len(data)):
		l.append((i, 0)) #(index, index in predicted[i])
	dataDict[0] = l

	parents = [0]
	cnt = 0

	seenLayer = 0
	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH): # and (p not in tested): # and (seenLayer < 10): # (p not in tested): # and (p not in LabelDict[4]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				if not os.path.isfile(path+str(p)+'.pkl'):
					continue

				while True:
					if  len(multiprocessing.active_children()) < 8:
						multiprocessing.Process(target=KbeamV5_thread, args=(lock,p,dataDict,data,prob,probNext,predicted,predictedNext,path,text_clf)).start()
						break
					else:
						sleep(0.1)

		
		while len(multiprocessing.active_children()) != 0:
			#print 'wait for subprocess to finish'
			#print multiprocessing.active_children()
			sleep(1)

		parents = children
		for i in range(len(probNext)):
			if len(probNext[i]) != 0:
				indexes = range(len(probNext[i]))
				largestKIdx = nlargest(K, indexes, key=lambda j: probNext[i][j])
				prob[i] = []
				predicted[i] = []
				for l in largestKIdx:
					pred = predictedNext[i][l][-1]
					try:
						dataDict[pred].append((i, len(predicted[i])))
					except:
						dataDict[pred]=[(i, len(predicted[i]))]
					prob[i].append(probNext[i][l])
					predicted[i].append(predictedNext[i][l])
				predictedNext[i] = []
				probNext[i] = []


	printLog( 'Testing is Done in '+ str(round(time()-time0)))

	for i in range(len(prob)):
		idx = prob[i].index(max(prob[i]))
		predicted[i] = predicted[i][idx]

	correct = [0] * 10
	printLog('L1 to L9 in %')
	for i in range(len(predicted)):
		ret = NumPathMatched(predicted[i][1:],labels[i])
		for l in range(0,ret+1):
			correct[l]+=1

	for i in range(1,10):
		printLog(str(round(float(correct[i])/correct[0]*100, 2)))
	printLog('--------------')

def KbeamV4(trainThres,data,labels, K=3):
	printLog('KbeamV4')
	CLF_name = 'ML_NB'
	# K = 3
	printLog('K='+str(K))

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	##################TEST SET###################

	time0 = time()

	tested = dict()
	predicted = [[[0]] for i in range(len(data))]
	predictedNext = [[] for i in range(len(data))]
	prob = [[0] for i in range(len(data))]
	probNext = [[] for i in range(len(data))]

	dataDict = dict()

	dataDict[0]=[]
	for i in range(len(data)):
		dataDict[0].append((i, 0))	#(index, index in predicted[i])

	parents = [0]
	cnt = 0

	seenLayer = 0
	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH): # and (p not in tested): # and (seenLayer < 10): # (p not in tested): # and (p not in LabelDict[4]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				if not os.path.isfile(path+str(p)+'.pkl'):
					continue

				subData = []
				subIdx = []
				subProb = []
				subPath = []
				subPathIdx = []
				for i, j in dataDict.get(p, []):
					subIdx.append(i)
					subData.append(data[i])
					subProb.append(prob[i][j])
					subPathIdx.append(j)
				dataDict[p]=[]

				if len(subIdx) == 0:
					continue

				#print 'size =',len(subIdx)

				if p not in text_clf:
					text_clf[p] = joblib.load(path+str(p)+'.pkl')
				tmpTime = time()
				resultM = text_clf[p].predict_log_proba(subData)
				indexes = range(len(resultM[0]))

				#WHY?
				if len(text_clf[p].steps[-1][-1].classes_) != len(resultM[0]):
					indexes = indexes[:len(text_clf[p].steps[-1][-1].classes_)]

				for i in range(len(resultM)):
					for l in indexes:
						predictedNext[subIdx[i]].append(predicted[subIdx[i]][subPathIdx[i]] + [text_clf[p].steps[-1][-1].classes_[l]])
						probNext[subIdx[i]].append(resultM[i][l]+subProb[i])
				tmpTime=time()-tmpTime
				printDebug('#'+str(cnt)+' time ='+str(round((time()-time0),1))+' ('+str(p)+')'+' size = '+str(len(subIdx))+' IT = '+str(tmpTime))

		parents = children
		for i in range(len(probNext)):
			if len(probNext[i]) != 0:
				indexes = range(len(probNext[i]))
				largestKIdx = nlargest(K, indexes, key=lambda j: probNext[i][j])
				prob[i] = []
				predicted[i] = []
				for l in largestKIdx:
					pred = predictedNext[i][l][-1]
					try:
						dataDict[pred].append((i, len(predicted[i])))
					except:
						dataDict[pred]=[(i, len(predicted[i]))]
					prob[i].append(probNext[i][l])
					predicted[i].append(predictedNext[i][l])
				predictedNext[i] = []
				probNext[i] = []


	printLog( 'Testing is Done in '+ str(round(time()-time0)))

	for i in range(len(prob)):
		idx = prob[i].index(max(prob[i]))
		predicted[i] = predicted[i][idx]

	correct = [0] * 10
	printLog('L1 to L9 in %')
	for i in range(len(predicted)):
		ret = NumPathMatched(predicted[i][1:],labels[i])
		for l in range(0,ret+1):
			correct[l]+=1

	for i in range(1,10):
		printLog(str(round(float(correct[i])/correct[0]*100, 2)))
	printLog('--------------')


def KbeamV3(trainThres,data,labels):

	printLog('KbeamV3')
	CLF_name = 'ML_NB'
	K = 3
	printLog('K='+str(K))

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	##################TEST SET###################

	time0 = time()

	tested = dict()
	predicted = [[[0]] for i in range(len(data))]
	predictedNext = [[] for i in range(len(data))]
	prob = [[0] for i in range(len(data))]
	probNext = [[] for i in range(len(data))]


	parents = [0]
	cnt = 0

	seenLayer = 0
	tmpcnt = 0
	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH): # and (p not in tested): # and (seenLayer < 10): # (p not in tested): # and (p not in LabelDict[4]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				print 'Testing CLF',cnt,'(',p,')','size=','N/A','time =',(time()-time0)
				if not os.path.isfile(path+str(p)+'.pkl'):
					# printLog('miss'+str(p))
					continue
				tmpcnt+=1
				subData = []
				subIdx = []
				subProb = []
				subPathIdx = []
				for i in range(len(predicted)):
					for j in range(len(predicted[i])):
						#print predicted[i][j]
						if predicted[i][j][len(predicted[i][j])-1] == p:
							subData.append(data[i])
							subIdx.append(i)
							subProb.append(prob[i][j])
							subPathIdx.append(j)

				if len(subIdx) == 0:
					continue

				if p not in text_clf:
					text_clf[p] = joblib.load(path+str(p)+'.pkl')

				resultM = text_clf[p].predict_log_proba(subData)
				indexes = range(len(resultM[0]))

				#WHY?
				if len(text_clf[p].steps[-1][-1].classes_) != len(resultM[0]):
					indexes = indexes[:len(text_clf[p].steps[-1][-1].classes_)]

				for i in range(len(resultM)):
					for l in indexes:
						#for j in predicted[subIdx[i]]:
						#	predictedNext[subIdx[i]].append(j + [text_clf[p].steps[-1][-1].classes_[l]])
						#	probNext[subIdx[i]].append(resultM[i][l]+subProb[i])
						predictedNext[subIdx[i]].append(predicted[subIdx[i]][subPathIdx[i]] + [text_clf[p].steps[-1][-1].classes_[l]])
						probNext[subIdx[i]].append(resultM[i][l]+subProb[i])

				print 'size =',len(resultM)

		parents = children
		for i in range(len(probNext)):
			if len(probNext[i]) != 0:
				indexes = range(len(probNext[i]))
				largestKIdx = nlargest(K, indexes, key=lambda j: probNext[i][j])
				#for j in range(len(probNext[i])):
				#	print predictedNext[i][j],'---',probNext[i][j]
				prob[i] = []
				predicted[i] = []
				for l in largestKIdx:
					prob[i].append(probNext[i][l])
					predicted[i].append(predictedNext[i][l])
				predictedNext[i] = []
				probNext[i] = []
				#print 'MAX'
				#print prob[i]
				#print predicted[i]
				# raw_input('ENTER')

	printLog( 'Testing is Done in '+ str(round(time()-time0)))

	for i in range(len(prob)):
		idx = prob[i].index(max(prob[i]))
		predicted[i] = predicted[i][idx]

	correct = [0] * 10
	printLog('L1 to L9 in %')
	for i in range(len(predicted)):
		#print labels[i]
		#print predicted[i]
		#print correct
		#raw_input('------Enter')
		ret = NumPathMatched(predicted[i][1:],labels[i])
		for l in range(0,ret+1):
			correct[l]+=1

	for i in range(1,10):
		printLog(str(round(float(correct[i])/correct[0]*100, 2)))
	printLog('--------------')
	printLog( str(tmpcnt))

def KbeamV2(trainThres,data,labels):

	printLog('--------------KbeamV2')
	CLF_name = 'ML_NB'
	K = 3
	printLog('K='+str(K))

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	##################TEST SET###################

	time0 = time()

	tested = dict()
	predicted = [[0] for i in range(len(data))]
	predictedNext = [[] for i in range(len(data))]
	prob = [[log(1)] for i in range(len(data))]
	probNext = [[] for i in range(len(data))]

	parents = [0]
	cnt = 0

	seenLayer = 0

	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH): # and (p not in tested): # and (seenLayer < 10): # (p not in tested): # and (p not in LabelDict[4]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				print 'Testing CLF',cnt,'(',p,')','size=','N/A','time =',(time()-time0)
				if not os.path.isfile(path+str(p)+'.pkl'):
					continue

				subData = []
				subIdx = []
				subProb = []
				for i in range(len(predicted)):
					if p in predicted[i]:
						subData.append(data[i])
						subIdx.append(i)
						subProb.append(prob[i][predicted[i].index(p)])

				if len(subIdx) == 0:
					continue

				if p not in text_clf:
					text_clf[p] = joblib.load(path+str(p)+'.pkl')

				resultM = text_clf[p].predict_log_proba(subData)
				indexes = range(len(resultM[0]))

				#WHY?
				if len(text_clf[p].steps[-1][-1].classes_) != len(resultM[0]):
					indexes = indexes[:len(text_clf[p].steps[-1][-1].classes_)]

				for i in range(len(resultM)):
					largestKIdx = nlargest(K, indexes, key=lambda j: resultM[i][j])

					for l in indexes:
						predictedNext[subIdx[i]].append(text_clf[p].steps[-1][-1].classes_[l])
						probNext[subIdx[i]].append(resultM[i][l]+subProb[i])

				print 'size =',len(resultM)

		parents = children
		for i in range(len(probNext)):
			if len(probNext[i]) != 0:
				indexes = range(len(probNext[i]))
				largestKIdx = nlargest(K, indexes, key=lambda j: probNext[i][j])
				prob[i] = []
				predicted[i] = []
				for l in largestKIdx:
					prob[i].append(probNext[i][l])
					predicted[i].append(predictedNext[i][l])
				predictedNext[i] = []
				probNext[i] = []

	printLog( 'Testing is Done in '+ str(round(time()-time0)))

	confidence = float(0)
	for i in range(len(prob)):
		idx = prob[i].index(max(prob[i]))
		predicted[i] = predicted[i][idx]
		confidence += prob[i][idx]
	confidence /= len(prob)
	printLog('Average log prob = '+str(confidence))

	printLog('L1 to L9 in %')
	for layer in range(1,10,1):
		cnt = 0
		correct = 0
		for i in range(len(predicted)):
			cnt += 1
			#print 'Evaluating #',cnt
			if isDescendantOfLabels(predicted[i],labels[i]) or matchAtLayer(predicted[i],labels[i],layer):
				correct += 1
		printLog(str(round(float(correct)/cnt*100, 2)))
	printLog('--------------')

#OLD VERSION, DON't USE
def Kbeam(trainThres,data,labels):

	printLog('--------------Kbeam')

	K = 3


	CLF_name = 'ML_NB'
	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	##################TEST SET###################

	time0 = time()

	tested = dict()
	dataH = dict()
	dataIdxH = dict()
	predictedH = dict()
	predictedLabelH = dict()
	predictedProbH = dict()
	labelsH = dict()

	INIT_LOG_PROB = log(100)

	dataH[0] = data
	dataIdxH[0] = range(len(data))
	labelsH[0] = labels
	predictedProbH[0] = [INIT_LOG_PROB] * len(data)


	cnt = 0
	parents = [0]
	while len(parents)!= 0:
		children = []
		for p in parents:
			if (p in CH) and (p in dataH) and (p not in tested): #and (p not in LabelDict[LAYER]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				print 'Testing CLF',cnt,'(',p,')','size=',len(dataH[p]),'time =',(time()-time0)
				if not os.path.isfile(path+str(p)+'.pkl'):
					continue
				if p not in text_clf:
					text_clf[p] = joblib.load(path+str(p)+'.pkl')

				probMatrix = text_clf[p].predict_log_proba(dataH[p])

				predictedLabelH[p] = []

				#if float("-inf") in probMatrix[0]:
				#	probMatrix[0] = probMatrix[0].tolist().remove(float("-inf"))
				#	indexes = range(len(probMatrix[0]))
				#else:
				indexes = range(len(probMatrix[0]))
				if len(text_clf[p].steps[-1][-1].classes_) != len(probMatrix[0]):
					indexes = indexes[:len(text_clf[p].steps[-1][-1].classes_)]

				for i in range(len(probMatrix)):
					largestKIdx = nlargest(K, indexes, key=lambda j: probMatrix[i][j])

					predictedLabel = []
					predictedProb = []
					dataIdx = []

					#if cnt == 655:
					#	print probMatrix[i]
					#	print largestKIdx
					#	print text_clf[p].steps[-1][-1].classes_

					for idx in largestKIdx:
						predictedLabel.append(text_clf[p].steps[-1][-1].classes_[idx])
						predictedProb.append(probMatrix[i][idx] + predictedProbH[p][i])

					#print predictedLabel
					#print predictedProb
					#raw_input('enter')

					predictedLabelH[p].append(predictedLabel)
					predictedProbH[p][i] = predictedProb

				for i in range(len(predictedLabelH[p])):
					for j in range(len(predictedLabelH[p][i])):
						if predictedLabelH[p][i][j] not in dataH:
							dataH[predictedLabelH[p][i][j]] = []
							dataIdxH[predictedLabelH[p][i][j]] = []
							labelsH[predictedLabelH[p][i][j]] = []
							predictedLabelH[predictedLabelH[p][i][j]] = []
							predictedProbH[predictedLabelH[p][i][j]] = []

						dataH[predictedLabelH[p][i][j]].append(dataH[p][i])
						dataIdxH[predictedLabelH[p][i][j]].append(dataIdxH[p][i])
						labelsH[predictedLabelH[p][i][j]].append(labelsH[p][i])
						predictedLabelH[predictedLabelH[p][i][j]].append(predictedLabelH[p][i][j])
						predictedProbH[predictedLabelH[p][i][j]].append(predictedProbH[p][i][j])

				del dataH[p]
				del dataIdxH[p]
				del labelsH[p]
				del predictedLabelH[p]
				del predictedProbH[p]

		parents = children

	printLog( 'Testing is Done in '+ str(round(time()-time0)))

	predictedH = dict()
	predictedProbMaxH = dict()

	for k in predictedLabelH.keys():
		for i in range(len(predictedLabelH[k])):
			if dataIdxH[k][i] not in predictedH:
				predictedH[dataIdxH[k][i]] = predictedLabelH[k][i]
				predictedProbMaxH[dataIdxH[k][i]] = predictedProbH[k][i]
			else:
				if predictedProbH[k][i] > predictedProbMaxH[dataIdxH[k][i]]:
					predictedH[dataIdxH[k][i]] = predictedLabelH[k][i]
					predictedProbMaxH[dataIdxH[k][i]] = predictedProbH[k][i]

	print len(predictedH)
	print len(predictedProbMaxH)
	exit(0)

	printLog('L1 to L9 in %')
	for layer in range(1,10,1):
		cnt = 0
		correct = 0
		for k in predictedH.keys():
			for i in range(len(predictedH[k])):
				cnt += 1
				#print 'Evaluating #',cnt
				if isDescendantOfLabels(predictedH[k][i],labelsH[k][i]) or matchAtLayer(predictedH[k][i],labelsH[k][i],layer):
					correct += 1
		printLog(str(round(float(correct)/cnt*100, 2)))
	printLog('--------------')

#OLD VERSION, DON't USE
def Greedy(trainThres,data,labels):

	printLog('--------------Greedy')
	CLF_name = 'ML_NB'

	time0 = time()

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	##################TEST SET###################

	tested = dict()
	dataH = dict()
	predictedH = dict()
	labelsH = dict()

	parents = [0]
	cnt = 0

	dataH[0] = data
	labelsH[0] = labels

	seenLayer = 0

	while len(parents)!= 0:
		children = []
		seenLayer += 1
		for p in parents:
			if (p in CH) and (p in dataH): # and seenLayer<10 : # and (p not in tested): #and (p not in LabelDict[LAYER]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				print 'Testing CLF',cnt,'(',p,')','time =',(time()-time0)
				if not os.path.isfile(path+str(p)+'.pkl'):
					continue
				if p not in text_clf:
					text_clf[p] = joblib.load(path+str(p)+'.pkl')

				predictedH[p] = text_clf[p].predict(dataH[p]).tolist()

				for i in range(len(predictedH[p])):

					if predictedH[p][i] not in dataH:
						dataH[predictedH[p][i]] = []
						labelsH[predictedH[p][i]] = []
						predictedH[predictedH[p][i]] = []
					dataH[predictedH[p][i]].append(dataH[p][i])
					labelsH[predictedH[p][i]].append(labelsH[p][i])
					predictedH[predictedH[p][i]].append(predictedH[p][i])
				print 'size =',len(predictedH[p])

				del dataH[p]
				del labelsH[p]
				del predictedH[p]


		parents = children

	printLog( 'Testing is Done in '+ str(round(time()-time0)))

	printLog('L1 to L9 in %')
	for layer in range(1,10,1):
		cnt = 0
		correct = 0
		for k in predictedH.keys():
			for i in range(len(predictedH[k])):
				cnt += 1
				#print 'Evaluating #',cnt
				if isDescendantOfLabels(predictedH[k][i],labelsH[k][i]) or matchAtLayer(predictedH[k][i],labelsH[k][i],layer):
					correct += 1
		printLog(str(round(float(correct)/cnt*100, 2)))
	printLog('--------------')

#OLD VER. DON't USE
def GreedyV2(trainThres,data,labels):

	printLog('--------------Greedy')
	CLF_name = 'ML_NB'

	time0 = time()

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	##################TEST SET###################

	tested = dict()
	predicted = [0] * len(data)

	parents = [0]
	cnt = 0

	while len(parents)!= 0:
		children = []
		for p in parents:
			if (p in CH): # and (p not in tested): #and (p not in LabelDict[LAYER]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				print 'Testing CLF',cnt,'(',p,')','size=','N/A','time =',(time()-time0)
				if not os.path.isfile(path+str(p)+'.pkl'):
					continue

				subData = []
				idxList = []
				for i in range(len(predicted)):
					if predicted[i]==p:
						subData.append(data[i])
						idxList.append(i)

				if len(idxList) == 0:
					continue
				if p not in text_clf:
					text_clf[p] = joblib.load(path+str(p)+'.pkl')

				result = text_clf[p].predict(subData).tolist()
				for i in range(len(idxList)):
					predicted[idxList[i]] = result[i]

				print 'size =',len(result)

		parents = children

	printLog( 'Testing is Done in '+ str(round(time()-time0)))

	printLog('L1 to L9 in %')
	for layer in range(1,10,1):
		cnt = 0
		correct = 0
		for i in range(len(predicted)):
			cnt += 1
			#print 'Evaluating #',cnt
			if isDescendantOfLabels(predicted[i],labels[i]) or matchAtLayer(predicted[i],labels[i],layer):
				correct += 1
		printLog(str(round(float(correct)/cnt*100, 2)))
	printLog('--------------')

def GreedyV3(trainThres,data,labels):

	printLog('GreedyV3')
	CLF_name = 'ML_NB'

	time0 = time()

	path = '../../../Data229/CLF/'+CLF_name+str(trainThres)+'/'
	printLog(path)

	global text_clf
	text_clf = dict()

	#for i in labels:
	#	print i
	#	raw_input('-----Enter')

	##################TEST SET###################

	tested = dict()
	predicted = [[0] for i in range(len(data))]

	parents = [0]
	cnt = 0

	while len(parents)!= 0:
		children = []
		for p in parents:
			if (p in CH): # and p==0: # and (p not in tested): #and (p not in LabelDict[LAYER]):
				children += CH[p]
				tested[p] = True
				cnt += 1

				print 'Testing CLF',cnt,'(',p,')','size=','N/A','time =',(time()-time0)
				if not os.path.isfile(path+str(p)+'.pkl'):
					continue

				subData = []
				idxList = []
				for i in range(len(data)):
					#print predicted[i]
					#print p
					if predicted[i][len(predicted[i])-1]==p:
						#print 'yes'
						subData.append(data[i])
						idxList.append(i)

				if len(idxList) == 0:
					continue
				if p not in text_clf:
					text_clf[p] = joblib.load(path+str(p)+'.pkl')

				result = text_clf[p].predict(subData).tolist()
				for i in range(len(idxList)):
					predicted[idxList[i]].append(result[i])

				print 'size =',len(result)

		parents = children

	printLog( 'Testing is Done in '+ str(round(time()-time0)))

	correct = [0] * 10
	printLog('L1 to L9 in %')
	for i in range(len(predicted)):
		#print labels[i]
		#print predicted[i]
		#print correct
		#raw_input('------Enter')
		ret = NumPathMatched(predicted[i][1:],labels[i])
		for l in range(0,ret+1):
			correct[l]+=1

	for i in range(1,10):
		printLog(str(round(float(correct[i])/correct[0]*100, 2)))
	printLog('--------------')

def NumPathMatched(predicted,labels):
	maxCnt = 0
	for l in labels:
		cnt = 0
		for i in range(min(len(predicted),len(l))):
			if predicted[i] == l[i]:
				cnt += 1
			else:
				break
		if cnt == len(l):
			return 9
		if cnt > maxCnt:
			maxCnt = cnt

	return maxCnt

def init():
	global PH,CH
	PH = pickle.load(open('../PH.pkl','rb'))
	print 'PH Loaded'
	CH = pickle.load(open('../CH.pkl','rb'))
	print 'CH Loaded'

	global LabelDict
	LabelDict = dict()
	for l in range(1,10):
		LabelDict[l] = np.loadtxt('../validLabels_L'+str(l)+'.txt',dtype=int)

	global validLabels
	validLabels = np.loadtxt('../validLabels_L1.txt',dtype=str)

def run(trainThres,data,label,model, message, test=False):
	modelName=model.__name__

	print '--------------------------'
	printLog(message)
	print 'trainThres = ',trainThres
	print 'modelName = ',modelName
	print 'data = ',data
	print 'label = ',label

	dataFile = open('../'+data,'r')
	labelFile = pickle.load(open('../'+label, 'rb'))

	trainData = []
	testData = []
	cnt=0
	for line in dataFile:
		#print 'Read Data #',cnt+1
		if (cnt % 10) == 9:
			testData.append(line.strip())
		elif (cnt % 10) < trainThres:
			trainData.append(line.strip())
		cnt += 1
	print 'Finish Reading Data'


	trainLabels = []
	testLabels = []
	cnt=0
	for line in labelFile:
		#print 'Read Label #',cnt+1
		if (cnt % 10) == 9:
			testLabels.append(line)
		elif (cnt % 10) < trainThres:
			trainLabels.append(line)
		cnt += 1
	print 'Finish Reading Labels'

	print 'Train Data Size = ',len(trainData)
	print 'Train Label Size = ',len(trainLabels)
	print 'Test Data Size = ',len(trainData)
	print 'Test Label Size = ',len(trainLabels)
	print ''
	if test:
		model(trainThres,trainData[:10],trainLabels[:10],testData[:10],testLabels[:10])
	else:
		model(trainThres,trainData,trainLabels,testData,testLabels)


def runAdv(trainThres,data,label,model, method, message,  K, getResultM, test=False):
	modelName=model.__name__

	print '--------------------------'
	printLog(message)
	print 'trainThres = ',trainThres
	print 'modelName = ',modelName
	print 'data = ',data
	print 'label = ',label

	dataFile = open('../'+data,'r')
	labelFile = pickle.load(open('../'+label, 'rb'))

	trainData = []
	testData = []
	cnt=0
	for line in dataFile:
		#print 'Read Data #',cnt+1
		if (cnt % 10) == 9:
			testData.append(line.strip())
		elif (cnt % 10) < trainThres:
			trainData.append(line.strip())
		cnt += 1
	print 'Finish Reading Data'


	trainLabels = []
	testLabels = []
	cnt=0
	for line in labelFile:
		#print 'Read Label #',cnt+1
		if (cnt % 10) == 9:
			testLabels.append(line)
		elif (cnt % 10) < trainThres:
			trainLabels.append(line)
		cnt += 1
	print 'Finish Reading Labels'

	print 'Train Data Size = ',len(trainData)
	print 'Train Label Size = ',len(trainLabels)
	print 'Test Data Size = ',len(trainData)
	print 'Test Label Size = ',len(trainLabels)
	print ''
	if test:
		model(trainThres,trainData[:10],trainLabels[:10],testData[:10],testLabels[:10], method, K, getResultM)
	else:
		model(trainThres,trainData,trainLabels,testData,testLabels, method, K, getResultM)



def ML_NB_thread(trainThres, trainData, trainLabels, clf):

	subData = []
	subLabels = []

	children = CH[clf]

	for c in children:
		for i in range(len(trainLabels)):
			for l in trainLabels[i]:
				if c in l:
					subData.append(trainData[i])
					subLabels.append(c)

	if len(subLabels) == 0:
		return

	cls = Pipeline([('vect', CountVectorizer()),
				('tfidf', TfidfTransformer()),
				('clf', MultinomialNB()),]).fit(subData, subLabels)

	path = '../../../Data229/CLF/ML_NB'+str(trainThres)+'/'
	joblib.dump(cls, path+str(clf)+'.pkl')


def ML_NB(trainThres,trainData,trainLabels,testData,testLabels):

	time0 = time()
	#main_thread = multiprocessing.current_process()

	path = '../../../Data229/CLF/ML_NB'+str(trainThres)+'/'
	if not os.path.exists(path):
		os.makedirs(path)

	text_clf = dict()
	trained = dict()

	parents = [0]
	cnt = 0
	while len(parents)!= 0:
		children = []
		for p in parents:
			if (p in CH) and (p not in trained):
				children += CH[p]
				trained[p] = True

				#Train p
				cnt += 1
				while True:
					if  len(multiprocessing.active_children()) < 8:
						print 'Training CLF',cnt,'(',p,')','time =',(time()-time0)
						multiprocessing.Process(target=ML_NB_thread, args=(trainThres,trainData,trainLabels,p)).start()
						break
					else:
						sleep(0.1)
		parents = children


	while len(multiprocessing.active_children()) != 0:
		print 'wait for subprocess to finish'
		sleep(1)

	print 'Training is Done'


def BL_NB_thread(trainThres, trainData, trainLabels, clf):

	subData = []
	subLabels = []

	children = CH[clf]

	for i in range(len(trainLabels)):
		for l in trainLabels[i]:
			if clf in l:
				subData.append(trainData[i])
				subLabels.append(trainLabels[i])
				break

	if len(subLabels) == 0:
		return
	for child in children:
		childLabels = []
		for i in range(len(subData)):
			tmpLabel = -1
			for l in subLabels[i]:
				if child in l:
					tmpLabel = 1
					break
			childLabels.append(tmpLabel)

		# if 1 not in childLabels or -1 not in childLabels:
		# 	cls = None
		# else:
		# 	cls = Pipeline([('vect', CountVectorizer()),
		# 				('tfidf', TfidfTransformer()),
		# 				('clf', MultinomialNB()),]).fit(subData, childLabels)		

		cls = Pipeline([('vect', CountVectorizer()),
					('tfidf', TfidfTransformer()),
					('clf', MultinomialNB()),]).fit(subData, childLabels)

		path = '../../../Data229/CLF/BL_NB'+str(trainThres)+'/'
		joblib.dump(cls, path+str(child)+'.pkl')

def BL_NB(trainThres,trainData,trainLabels,testData,testLabels):

	time0 = time()
	#main_thread = multiprocessing.current_process()

	path = '../../../Data229/CLF/BL_NB'+str(trainThres)+'/'
	if not os.path.exists(path):
		os.makedirs(path)

	text_clf = dict()
	trained = dict()

	parents = [0]
	cnt = 0
	while parents!=[]:
		children = []
		for p in parents:
			if p in CH and (p not in trained):

				children += CH[p]

				trained[p] = True

				#Train p
				cnt += 1
				while True:
					if  len(multiprocessing.active_children()) < 8:
						print 'Training CLF',cnt,'(',p,')','time =',(time()-time0)
						multiprocessing.Process(target=BL_NB_thread, args=(trainThres,trainData,trainLabels,p)).start()
						break
					else:
						sleep(0.1)
		parents = children


	while len(multiprocessing.active_children()) != 0:
		print 'wait for subprocess to finish'
		sleep(1)

	print 'Training is Done'

if __name__ == "__main__":
    # main()
	printLog(ctime()+'---------------------------')
	printDebug(ctime()+'---------------------------')
	main()
	printLog('---------------------------------------------------\n')
	printDebug('---------------------------------------------------\n')
