import csv
from Document import Document
from liblinearutil import *
import re
from xml.dom import minidom, Node
from math import log
from englishwords import EnglishWords
from stopwords import StopWords
from nltk.stem.wordnet import WordNetLemmatizer
class DocumentCollection:
	def __init__(self, cross_validation=0):
		self.collection = []
		self.df_cache = {}
		self.token_index = {}
		self.cross_validation = cross_validation
		self.weight = [0,0,0]
		self.dictionary_checker = EnglishWords("dat/new.english.words.list")
		self.stop_word_checker = StopWords("dat/stopwords")
		self.lemmatizer = WordNetLemmatizer()
	def addDocument(self, document):
		# print document.get_tokens()
		self.collection.append(document)
		print document.id
		self.__build__df__(document)
	def human_label(self, limit_neg, limit_pos, limit_neu):
		print "HUMAN LABEL"
		for document in self.collection:
			if (document.stars == "1") and (limit_neg > 0):
				document.polarity = "-1"
				document.confidence = "1"
				limit_neg -= 1
			if (document.stars == "3" and limit_neu > 0):
				document.polarity = "0"
				document.confidence = "1"
				limit_neu -= 1
			if (document.stars == "5" and limit_pos > 0):
				document.polarity = "1"
				document.confidence = "1"
				limit_pos -= 1
	def toCSVFile(self, file_name):
		with open(file_name, "wb") as csvfile:
			writer = csv.writer(csvfile, delimiter = ',', quotechar = '|', quoting=csv.QUOTE_MINIMAL)
			writer.writerow(["ID", "URL","USER","DATE","STARS","REVIEW", "POLARITY", "CONFIDENCE"])
			for item in self.collection:
				writer.writerow(item.toCSVRow())
	def toXML(self):
		ret = []
		ret.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>")
		ret.append("<root>")
		for item in self.collection:
			ret.append(item.toXML())
		ret.append("</root>")
		return '\n'.join(ret)
	def toXMLFile(self, file_name):
		handle = open(file_name, "wb")
		handle.write(self.toXML().encode("utf-8", "ignore"))
		handle.close()
	def toXMLFileWithPolarityFirst(self, file_name):
		handle = open(file_name, "wb")
		ret = []
		ret.append("<?xml version=\"1.0\" encoding=\"UTF-8\"?>")
		ret.append("<root>")
		with_polarity_list = []
		without_polarity_list = []
		for document in self.collection:
			if (document.polarity!=""):
				with_polarity_list.append(document)
			else:
				without_polarity_list.append(document)
		for document in with_polarity_list:
			ret.append(document.toXML())
		for document in without_polarity_list:
			ret.append(document.toXML())
		ret.append("</root>")
		handle.write(('\n'.join(ret)).encode("utf-8", "ignore"))
		handle.close()
	def starsCount(self, star):
		count = 0
		for item in self.collection:
			if item.getStars() == "%d" %star:
				count += 1
		return count
	def __build__df__(self, document):
		dict = []
		for doc_word in document.words:
			repetition = False
			for dict_word in dict:
				if (doc_word.word == dict_word.word and doc_word.pos == dict_word.pos):
					repetition = True
			if (repetition==False):
				dict.append(doc_word)
				exist_in_collection = False
				matching = None
				for collection_word in self.df_cache:
					if (doc_word.word == collection_word.word and doc_word.pos == collection_word.pos):
						exist_in_collection = True
						matching = collection_word
						break
				if exist_in_collection:
					self.df_cache[matching] += 1
				else:
					matching = doc_word
					self.df_cache[matching] = 1
					self.token_index[matching] = len(self.token_index) + 1
	def get_token_index(self, token):
		for collection_token in self.token_index:
			if (collection_token.word == token.word) and (collection_token.pos == token.pos):
				return self.token_index[collection_token]
		return -1
	def tf(self, word, document):
		count = 0
		for doc_word in document.words:
			if word.word == doc_word.word and word.pos == doc_word.pos:
				count += 1
		return float(count)/len(document.words)
	def idf(self, word):
		df = self.df(word)
		if df > 0:
			return log(float(len(self.collection))/df)
		else:			
			return 0
	def df(self, word):
		for col_word in self.df_cache:
			if (col_word.word == word.word and col_word.pos == word.pos):
				return self.df_cache[col_word]
		return 0
	def tfidf(self, token, document):
		return self.tf(token,document) * self.idf(token)
	def build_tfidf_table(self):
		tfidf_table = {}
		string_buffer = []
		for document in self.collection:
			real_label = document.polarity
			doc_string_buffer = [real_label]
			dict = {}
			tokens = document.words
			text_id = id
			for token in tokens:
				token_index = self.get_token_index(token)
				if ((token_index in dict)==False):
					doc_string_buffer.append("%d:%f" %(token_index, self.tfidf(token,document)))
					dict[token_index] = 1
			string_buffer.append(' '.join(doc_string_buffer))			
		result = '\n'.join(string_buffer)
		return result
	def get_number_instance_of_label(self, label):
		count = 0
		for document in self.collection:
			print document.polarity
			print label
			if document.polarity == label:
				count += 1
		return count
	def train(self):
		cv = ""
		if self.cross_validation > 0:
			cv = "-v %d" %self.cross_validation
		handle = open("temp/tfidf", "wb")
		tfidf_string = self.build_tfidf_table()
		handle.write(tfidf_string)
		handle.close()
		y, x = svm_read_problem('temp/tfidf')
		try:
			freq_neu = float(self.get_number_instance_of_label("0"))
			freq_neg = float(self.get_number_instance_of_label("-1"))
			freq_pos = float(self.get_number_instance_of_label("1"))
			print "%d %d %d" %(freq_pos,freq_neu,freq_neg)
			self.weight[0] = freq_pos/freq_neu
			self.weight[1] = freq_neg/freq_neu
			self.weight[2] = 1
			print '-s 0 -c 4 -w1 %f -w-1 %f -w0 %f %s' %(self.weight[0], self.weight[1], self.weight[2], cv)
			model = train(y, x, '-s 0 -c 4 -w1 %f -w-1 %f -w0 %f %s' %(self.weight[0], self.weight[1], self.weight[2], cv))
		except ZeroDivisionError:
			import traceback as e
			e.print_exc()
			model = train(y, x, '-s 5 -c 4 -b 1')
			print '-s 5 -c 4'
		# os.remove("temp/tfidf")
		if self.cross_validation<=0:
			p_label, p_acc, p_val = predict(y, x, model)
			ACC, MSE, SCC = evaluations(y, p_label)
		return model
	def set_cross_validation(self, cv):
		self.cross_validation = cv
def gettext(nodelist):
        retlist = []
        for node in nodelist:
            if node.nodeType == Node.TEXT_NODE:
                retlist.append(node.wholeText)
            elif node.hasChildNodes:
                retlist.append(self.gettext(node.childNodes))
        return re.sub('\s+', ' ', ''.join(retlist))
def importXML(file_name):
	collection = DocumentCollection()
	handle = open(file_name, "rb")
	xmldom = minidom.parse(handle)
	root_node = xmldom.childNodes[0]
	for doc in root_node.childNodes:
		if doc.nodeType != Node.ELEMENT_NODE:
			continue
		if doc.tagName != "doc":
			continue
		url = ""
		user = ""
		date = ""
		stars = ""
		review = ""
		polarity = ""
		confidence = ""
		for child in doc.childNodes:
			if (child.nodeType != Node.ELEMENT_NODE):
				continue
			id = doc.attributes["id"].value
			if child.tagName == 'url':
				url = gettext(child.childNodes)
			if child.tagName == 'user':
				user = gettext(child.childNodes)
			if child.tagName == 'stars':
				stars = gettext(child.childNodes)
			if child.tagName == 'date':
				date = gettext(child.childNodes)
			if child.tagName == 'review':
				review = gettext(child.childNodes)
			if child.tagName == 'polarity':
				polarity = gettext(child.childNodes)
			if child.tagName == 'confidence':
				 confidence = gettext(child.childNodes)
		doc = Document(id, url,user,stars,date,review, polarity, confidence, collection.dictionary_checker, collection.stop_word_checker, collection.lemmatizer, False)
		collection.addDocument(doc)
	return collection
def outputCSV(inputf, outputf):
	collection = importXML(inputf)
	collection.toCSVFile(outputf)
def classify_single_text(collection, model_inst, document, options=""):
	label = 0
	words = document.words
	y = [label]
	x = []
	dict = {}
	repetition_array = []
	for word in words:
		repetition = False
		for rep_word in repetition_array:
			if (rep_word.word == word.word and rep_word.pos == word.pos):
				repetition = True
				break
		if repetition == False:
			repetition_array.append(word)
			exists_in_collection =  False
			for col_word in collection.df_cache:
				if (word.word == col_word.word and word.pos == col_word.pos):
					exists_in_collection = True
			if exists_in_collection:
				dict[collection.get_token_index(word)] = collection.tfidf(word,document)
	x.append(dict)
	p_label, p_acc, p_val = predict(y, x, model_inst, options)
	return p_label[0], p_val[0]
if (__name__ == "__main__"):
	# collection = importXML("file.xml")
	# collection.human_label()
	# collection.toXMLFileWithPolarityFirst("file1.xml")
	
	training_set_collection = importXML("training_set.xml")
	string_buffer = []
	for document in training_set_collection.collection:
		string_buffer.append(document.get_tokens())
	for entry in training_set_collection.df_cache:
		print "%s(%s):%d:%d" %(entry.word,entry.pos,training_set_collection.df_cache[entry],training_set_collection.get_token_index(entry))
	tfidf_table = training_set_collection.build_tfidf_table()
	# training_set_collection.set_cross_validation(10)
	# training_set_collection.train()
	training_set_collection.set_cross_validation(0)
	model = training_set_collection.train()
	# import cPickle
	# model_output_file_handle = open("model", "wb")
	# cPickle.dump(model, model_output_file_handle)
	# model_output_file_handle.close()
	
	test_set_collection = importXML("test_set.xml")
	for document in test_set_collection.collection:
		if document.polarity == "":
			label, prob = classify_single_text(test_set_collection, model,document, "-b 1")
			max_prob = 0
			for value in prob:
				print value
				if value>max_prob:
					max_prob = value
			document.polarity = "%d" %label
			document.confidence = "%f" %max_prob
	test_set_collection.toXMLFile("test_set_labelled.xml")