#!/usr/bin/python
#
import json
import sys
import numpy as np
import matplotlib.pyplot as plt
import collections
import ast
import pickle
import multiprocessing
import os
from math import log
from math import exp
from heapq import nlargest
from time import sleep
from time import time, ctime
from itertools import groupby
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsOneClassifier
from sklearn import datasets
from sklearn.externals import joblib


from util import *
from kbeam import *

validLabels = None
StatFile = None
CMFile = None
LabelDict = None
CH=None
PH=None
text_clf=None


def printLog(f,s):
	print s
	print >>f, s
	f.flush()

def main():
	init()

	#trainThres: 1 = 33706,2 = 101118,3 = 303354
	
	#UTILITY
	#CheckDupBFS()
	
	#TRAIN ML_NB
	#run(1,'PER_ascii_stem_stopword','Label_Path.pkl',ML_NB)
	#run(3,'PER_ascii_stem_stopword','Label_Path.pkl',ML_NB)
	#run(9,'PER_ascii_stem_stopword','Label_Path.pkl',ML_NB)
	
	#GREEDY
	#run(1,'PER_ascii_stem_stopword','Label_Path.pkl', GreedyWrapper)
	#run(3,'PER_ascii_stem_stopword','Label_Path.pkl', GreedyWrapper)
	#run(9,'PER_ascii_stem_stopword','Label_Path.pkl', GreedyWrapper)

	#KBEAM
	message='Nothing specific'
	run(1,'PER_ascii_stem_stopword','Label_Path.pkl', KbeamWrapper, message)
	#run(9,'PER_ascii_stem_stopword','Label_Path.pkl', KbeamBadWrapper)
	
	
def KbeamWrapper(trainThres,trainData,trainLabels,testData,testLabels):
	printLog(logFile,'Test Set:')
	kbeam(trainThres,testData,testLabels, CH, PH, getResultM)
	printLog(logFile,'Train Set:')
	kbeam(trainThres,trainData,trainLabels, CH, PH, getResultM)

	
def init():
	global PH,CH
	PH = pickle.load(open('../PH.pkl','rb'))
	print 'PH Loaded'
	CH = pickle.load(open('../CH.pkl','rb'))
	print 'CH Loaded'

	global LabelDict	
	LabelDict = dict()
	for l in range(1,10):
		LabelDict[l] = np.loadtxt('../validLabels_L'+str(l)+'.txt',dtype=int)

	global validLabels
	validLabels = np.loadtxt('../validLabels_L1.txt',dtype=str)

def run(trainThres,data,label,model, message):
	modelName=model.__name__	
	
	print '--------------------------'
	printLog(logFile, message)
	print 'trainThres = ',trainThres
	print 'modelName = ',modelName
	print 'data = ',data
	print 'label = ',label

	dataFile = open('../'+data,'r')
	labelFile = pickle.load(open('../'+label, 'rb'))

	trainData = []
	testData = []
	cnt=0
	for line in dataFile:
		#print 'Read Data #',cnt+1
		if (cnt % 10) == 9:
			testData.append(line.strip())
		elif (cnt % 10) < trainThres:
			trainData.append(line.strip())
		cnt += 1
	print 'Finish Reading Data'

	
	trainLabels = []
	testLabels = []
	cnt=0
	for line in labelFile:
		#print 'Read Label #',cnt+1
		if (cnt % 10) == 9:
			testLabels.append(line)
		elif (cnt % 10) < trainThres:
			trainLabels.append(line)
		cnt += 1
	print 'Finish Reading Labels'

	print 'Train Data Size = ',len(trainData)
	print 'Train Label Size = ',len(trainLabels)
	print 'Test Data Size = ',len(trainData)
	print 'Test Label Size = ',len(trainLabels)
	print ''
	
	model(trainThres,trainData,trainLabels,testData,testLabels)

def ML_NB_thread(trainThres, trainData, trainLabels, clf):

	subData = []
	subLabels = []

	children = CH[clf]

	for c in children:
		for i in range(len(trainLabels)):
			for l in trainLabels[i]:
				if c in l:
					subData.append(trainData[i])
					subLabels.append(c)

	if len(subLabels) == 0:
		return
	
	cls = Pipeline([('vect', CountVectorizer()),
				('tfidf', TfidfTransformer()),
				('clf', MultinomialNB()),]).fit(subData, subLabels)
	
	path = '../../../Data229/CLF/ML_NB'+str(trainThres)+'/'
	joblib.dump(cls, path+str(clf)+'.pkl')
	
def ML_NB(trainThres,trainData,trainLabels,testData,testLabels):

	time0 = time()
	#main_thread = multiprocessing.current_process()
	
	path = '../../../Data229/CLF/ML_NB'+str(trainThres)+'/'
	if not os.path.exists(path):
		os.makedirs(path)

	text_clf = dict()
	trained = dict()

	parents = [0]
	cnt = 0
	while len(parents)!= 0:
		children = []
		for p in parents:
			if (p in CH) and (p not in trained):
				children += CH[p]
				trained[p] = True

				#Train p
				cnt += 1
				while True:
					if  len(multiprocessing.active_children()) < 8:
						print 'Training CLF',cnt,'(',p,')','time =',(time()-time0)
						multiprocessing.Process(target=ML_NB_thread, args=(trainThres,trainData,trainLabels,p)).start()
						break
					else:
						sleep(0.1)
		parents = children


	while len(multiprocessing.active_children()) != 0:
		print 'wait for subprocess to finish'
		sleep(1)
	
	print 'Training is Done'

if __name__ == "__main__":
	printLog(logFile,ctime()+'---------------------------')
	analysisLog(ctime()+'---------------------------')
	main()
	printLog(logFile,'---------------------------------------------------\n')
	analysisLog('---------------------------------------------------\n')
