#!/usr/bin/python

import json
import sys
import numpy as np
import matplotlib.pyplot as plt
import collections
import ast
import pickle
from itertools import groupby
from sklearn.pipeline import Pipeline
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import confusion_matrix
from sklearn.datasets import make_multilabel_classification
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.multiclass import OneVsRestClassifier
from sklearn.svm import LinearSVC
from sklearn.multiclass import OneVsOneClassifier
from sklearn import datasets
from sklearn.externals import joblib



validLabels = None
StatFile = None
CMFile = None
PHFile = None
PHTable = None
LabelDict = None

def main():
	
	# Done
	#train(1,'PER_utf8','Label_L1_last',ScikitMultinomial)
	#train(3,'PER_utf8','Label_L1_last',ScikitMultinomial)
	#train(9,'PER_utf8','Label_L1_last',ScikitMultinomial)
	#train(1,'PER_ascii','Label_L1_last',ScikitMultinomial)
	#train(3,'PER_ascii','Label_L1_last',ScikitMultinomial)
	#train(9,'PER_ascii','Label_L1_last',ScikitMultinomial)
	#train(1,'PER_ascii_stem','Label_L1_last',ScikitMultinomial)
	#train(3,'PER_ascii_stem','Label_L1_last',ScikitMultinomial)
	#train(9,'PER_ascii_stem','Label_L1_last',ScikitMultinomial)
	#train(1,'PER_ascii_stem_stopword','Label_L1_last',ScikitMultinomial)
	#train(3,'PER_ascii_stem_stopword','Label_L1_last',ScikitMultinomial)
	#train(9,'PER_ascii_stem_stopword','Label_L1_last',ScikitMultinomial)
	#train(9,'PER_ascii_stem_stopword','Label_L1_first',ScikitMultinomial)
	#train(9,'PER_ascii_stem','Label_L2_last',ScikitMultinomial)
	#train(1,'PER_ascii_stem_stopword','Label_L2_last',ScikitMultinomial)
	#train(9,'PER_ascii_stem_stopword','Label_L2_last',ScikitMultinomial)
	
	#train(1,'PER_ascii_stem_stopword','Label_Leaves',ML_ScikitMultinomial)
	#train(9,'PER_ascii_stem_stopword','Label_Leaves',ML_ScikitMultinomial)
	
	# Not Run Yet
	train(1,'PER_ascii_stem_stopword','Label_Leaves',ML_ScikitMultinomial)

	
	#train(1,'PER_ascii_stem_stopword','Label_L1_last',ScikitOvR)
	


def train(trainThres,data,label,model):
	modelName=model.__name__
	#cnt= 0 - 337060

	#trainThres = 1/3/9
	#33706 1
	#101118 3
	#303354 9

	#train cnt % 10 < trainThres
	#test cnt % 10 == 9
	print '--------------------------'
	print 'trainThres = ',trainThres
	print 'modelName = ',modelName
	print 'data = ',data
	print 'label = ',label

	dataFile = open('../'+data,'r')
	labelFile = open('../'+label,'r')

	global StatFile
	StatFile = open('../output/Stats_'+modelName+'_'+str(trainThres)+'_'+data+'_'+label+'.txt','w')

	global CMFile
	CMFile = '../output/CM_'+modelName+'_'+str(trainThres)+'_'+data+'_'+label+'.txt'
	
	global PHFile, PHTable
	PHFile = open('../ParentHash.json')
	PHTable = json.load(PHFile)

	global LabelDict	
	LabelDict = dict()
	for l in range(1,10):
		LabelDict[l] = np.loadtxt('../validLabels_L'+str(l)+'.txt',dtype=int)

	global validLabels
	validLabels = np.loadtxt('../validLabels_L1.txt',dtype=int)

	trainLabels = []
	trainData = []
	testLabels = []
	testData = []
	
	cnt=0
	for line in dataFile:
		#print 'Read Data #',cnt+1
		if (cnt % 10) == 9:
			testData.append(line)
		elif (cnt % 10) < trainThres:
			trainData.append(line)
		cnt += 1
	print 'Finish reading data'

	cnt=0
	for line in labelFile:
		#print 'Read Label #',cnt+1
		if (cnt % 10) == 9:
			testLabels.append(line)
		elif (cnt % 10) < trainThres:
			trainLabels.append(line)
		cnt += 1
	print 'Finish reading labels'

	print 'Train Data Size = ',len(trainData)
	print 'Test Data Size = ',len(testData)
	print ''
	predictedTest, predictedTrain = model(trainData,trainLabels,testData,testLabels)
	
def ML_ScikitMultinomial(trainData,trainLabels,testData,testLabels):
	#Train

	text_clf = dict()
	text_clf[0] = Pipeline([('vect', CountVectorizer()),
						('tfidf', TfidfTransformer()),
						('clf', MultinomialNB()),])

	for i in trainLabels:
		curr = str(i.strip())
		while curr != '0' :
			if curr in PHTable:
				if (PHTable[curr] not in text_clf): and (PHTable[curr] == '1064954'):# and (PHTable[curr] in LabelDict[1]) and False:
					text_clf[PHTable[curr]]=Pipeline([('vect', CountVectorizer()),
							('tfidf', TfidfTransformer()),
							('clf', MultinomialNB()),])
				curr = str(PHTable[curr])

	keys = text_clf.keys()
	print 'CLF Size = ',len(keys)
	for i in range(len(keys)):

		print '-'
		print 'Preparing CLF # ',i

		subData = []
		subLabels = []
		
		for j in range(len(trainLabels)):
			ret,label = hasAncestor(trainLabels[j].strip(),keys[i])
			if ret:
				if not trainData[j].strip() == '':
					subData.append(trainData[j])
					subLabels.append(label)

		print 'Training CLF # ',i
		print 'Data Size = ',len(subLabels)
		if len(subLabels) <= 5:
			del text_clf[keys[i]]
			continue
		text_clf[keys[i]] = text_clf[keys[i]].fit(subData, subLabels)
		joblib.dump(text_clf[keys[i]], '../../../Data229/test/'+str(keys[i])+'.pkl')


	print 'Done---------Testing...'
	print text_clf.keys()
	predictedTest = []

	cnt = 0
	for t in testData:
		cnt+=1
		#if cnt == 1000:
		#	break
		print 'Predicting Test ',cnt
		curr=0
		while (curr in text_clf.keys()):
			curr = text_clf[curr].predict([t])
			#print t
			#print curr
		predictedTest.append(curr)

	predictedTrain = []
	cnt = 0
	for t in trainData:
		cnt+=1
		#if cnt == 1000:
		#	break
		print 'Predicting Train ',cnt
		curr=0
		while curr in text_clf.keys():
			curr = text_clf[curr].predict([t])
		predictedTrain.append(curr)

	cnt = float(0)
	for i in range(len(predictedTrain)):
		print 'testing train ',i
		a=int(predictedTrain[i][0])
		b=int(trainLabels[i].strip())
		if covers(a,b) or covers(b,a):
			cnt += 1
		

	printLog(StatFile,'Training size is:'+str(len(predictedTrain)))
	printLog(StatFile,'Accuracy is:' + str(cnt/len(predictedTrain)*100) + "%")

	cnt = float(0)
	for i in range(len(predictedTest)):
		print 'testing test ',i
		a=int(predictedTest[i][0])
		b=int(testLabels[i])

		
		#print 'a=',a
		#print 'b=',b
		
		if covers(a,b) or covers(b,a):
			cnt += 1
			
	printLog(StatFile,'Testing size is:' + str(len(predictedTest)))
	printLog(StatFile,'Accuracy is:' + str(cnt/len(predictedTest)*100) + "%")

	return predictedTrain,predictedTest
	
def covers(c,p):
	
	if c == p:
		return True
	elif c == 0:
		return False
	else:
		return covers(PHTable[str(c)],p)

def hasAncestor(curr,p):
	#print 'c=',type(curr),curr
	#print 'p=',type(p),p

	if curr == '0':
		return False,curr

	if curr in PHTable:
		if int(PHTable[curr]) == p:
			#print 'T'
			#exit(0)
			return True,curr
		else:
			return hasAncestor(str(PHTable[curr]),p)
	else:
		#print 'F'
		#exit(0)
		return False,curr
	
def ScikitMultinomial(trainData,trainLabels,testData,testLabels):
	
	#Train
	text_clf = Pipeline([('vect', CountVectorizer()),
						('tfidf', TfidfTransformer()),
						('clf', MultinomialNB()),])
	text_clf = text_clf.fit(trainData, trainLabels)

	predictedTest = text_clf.predict(testData)
	predictedTrain = text_clf.predict(trainData)

	printLog(StatFile,'Training size is:'+str(len(trainData)))
	printLog(StatFile,'Accuracy is:' + str(np.mean(predictedTrain == trainLabels)*100) + "%")
	printLog(StatFile,'Testing size is:' + str(len(testData)))
	printLog(StatFile,'Accuracy is:' + str(np.mean(predictedTest == testLabels)*100) + "%")
	
	#Confusion Matrix
	cm = confusion_matrix(testLabels, predictedTest, validLabels)
	np.savetxt(CMFile,cm, fmt='%d')
	
	predictedTest = ConvertToParent(predictedTest)
	testLabels = ConvertToParent(testLabels)
	predictedTrain = ConvertToParent(predictedTrain)
	trainLabels = ConvertToParent(trainLabels)
	
	printLog(StatFile,'*****MATCH BACK TO PARENT*****')
	printLog(StatFile,'Training size is:'+str(len(trainData)))
	printLog(StatFile,'Accuracy is:' + str(np.mean(predictedTrain == trainLabels)*100) + "%")
	printLog(StatFile,'Testing size is:' + str(len(testData)))
	printLog(StatFile,'Accuracy is:' + str(np.mean(predictedTest == testLabels)*100) + "%")
	
	#for i in range(len(predictedTest)):
	#	print predictedTest[i],' = ',testLabels[i]
	
	
	return predictedTest, predictedTrain
	
def ConvertToParent(labels):
	ret = []
	for l in labels:
		ret.append(PHTable[str(int(l.strip()))])
	return np.array(ret)

def ScikitOvR(trainData,trainLabels,testData,testLabels):
	
	text_clf = OneVsRestClassifier(Pipeline([
						('vect', CountVectorizer()),
						('tfidf', TfidfTransformer()),
						('clf', MultinomialNB()),
						]),-1)
						
	text_clf = text_clf.fit(trainData, trainLabels)

	predictedTest = text_clf.predict(testData)
	predictedTrain = text_clf.predict(trainData)

	printLog(StatFile,'Training size is:'+str(len(trainLabels)))
	printLog(StatFile,'Accuracy is:' + str(np.mean(predictedTrain == trainLabels)*100) + "%")
	printLog(StatFile,'Testing size is:' + str(len(testLabels)))
	printLog(StatFile,'Accuracy is:' + str(np.mean(predictedTest == testLabels)*100) + "%")
	
	#Confusion Matrix
	cm = confusion_matrix(testLabels, predictedTest, validLabels)
	np.savetxt(CMFile,cm, fmt='%d')
	
	return predictedTest, predictedTrain

def ScikitOvO(trainData,trainLabels,testData,testLabels):

	#Train

	#X, Y = make_multilabel_classification(n_samples=5, random_state=0,return_indicator=False)
	#MultiLabelBinarizer().fit_transform(Y)

	#text_clf = Pipeline([('vect', CountVectorizer()),
	#					('tfidf', TfidfTransformer()),
	#					('clf', MultinomialNB()),])
	#text_clf = text_clf.fit(trainData, trainLabels)

	#predictedTest = text_clf.predict(testData)
	#predictedTrain = text_clf.predict(trainData)

	#tmp = trainLabels
	#trainLabels = []
	#for line in tmp:
		#trainLabels.append(ast.literal_eval(line))

	#tmp = testLabels
	#testLabels = []
	#for line in tmp:
		#testLabels.append(ast.literal_eval(line))

	#allLabels = MultiLabelBinarizer().fit_transform(trainLabels+testLabels)
	#trainLabels = allLabels[0:len(trainLabels)]
	#testLabels = allLabels[len(trainLabels):]

	text_clf = Pipeline([('vect', CountVectorizer()),
						('tfidf', TfidfTransformer()),
						('clf', OneVsOneClassifier(LinearSVC(),-1)),])
	text_clf = text_clf.fit(trainData, trainLabels)

	predictedTest = text_clf.predict(testData)
	predictedTrain = text_clf.predict(trainData)

	printLog(StatFile,'Training size is:'+str(len(trainData)))
	printLog(StatFile,'Accuracy is:' + str(np.mean(predictedTrain == trainLabels)*100) + "%")
	printLog(StatFile,'Testing size is:' + str(len(testData)))
	printLog(StatFile,'Accuracy is:' + str(np.mean(predictedTest == testLabels)*100) + "%")

	#index = 0
	#cnt = 0
	#for p in predictedTrain:
		#print '----'
		#print p.tolist()
		#print trainLabels[cnt].tolist()
		#if p.tolist() == trainLabels[cnt].tolist():
			#cnt += 1
		#index += 1
		#print cnt,'/',index
	
	#Confusion Matrix
	cm = confusion_matrix(testLabels, predictedTest, validLabels)
	np.savetxt(CMFile,cm, fmt='%d')
	
	return predictedTest, predictedTrain

def printLog(f,s):
	print s
	print >>f, s
	
if __name__ == "__main__":
    main()
