import networkx as nx
from operator import itemgetter
import random, sqlite3, math
import logging
import nltk
import re 
from collections import defaultdict
from nltk.corpus import stopwords
from itertools import combinations
from nltk.metrics import ContingencyMeasures

import logging
logging.basicConfig(format='%(asctime)m %(levelname)s: %(message)s', filename='runtime.log',level=logging.INFO)

class Graph(nx.Graph):

	OUTPUT_FORMAT_BMG = 1
	OUTPUT_FORMAT_SQLITE = 2
	OUTPUT_FORMAT_GML = 3

	def __init__(self, bg_loc="/home/group/discover/background_graph_eng", tp="graph", fgbg=True, version=1, POS=False):
		self.tp = tp
		self.fgbg = fgbg
		self.version = version
		self.POS = POS
		nx.Graph.__init__(self)
		if bg_loc != None:
			self.bg_db = sqlite3.connect(bg_loc)

	def save(self, format=1, filename="output"):
		if format == self.OUTPUT_FORMAT_BMG:
			self.__save_to_bmg(filename + ".bmg", weights=True)
		elif format == self.OUTPUT_FORMAT_SQLITE:
			self.__save_to_sqlite_db(filename + ".sqlite")
			self.__index_db(filename + ".sqlite")
		elif format == self.OUTPUT_FORMAT_GML:
			self.__save_to_gml(filename + ".gml")

	def load_from_db(self, dbloc):
		self.clear()
		fg_db = sqlite3.connect(dbloc)
		used_id = set()
		cursor = fg_db.execute("SELECT id, terma, termb, llr FROM graph")
		for e in cursor:
			if e[0] not in used_id:
				used_id.add(e[0])
				self.add_edge(e[1], e[2], weight=e[3])

	def __index_db(self, filename):
		db = sqlite3.connect(filename)
		db.execute("CREATE INDEX idx_1 ON graph(terma)")
		db.execute("CREATE INDEX idx_2 ON graph(termb)")
		db.execute("CREATE INDEX idx_3 ON graph(terma, llr)")
		db.execute("CREATE INDEX idx_4 ON graph(terma, termb)")
		db.commit()
		db.close()

	def __save_to_sqlite_db(self, dbloc="article_fg.sqlite"):
		fg_db = sqlite3.connect(dbloc)
		fg_db.execute("CREATE TABLE IF NOT EXISTS graph(id int, terma string, termb string, llr float)")
		it = 1
		for e in self.edges(data=True):
			fg_db.execute("INSERT INTO graph(id, terma, termb, llr) VALUES(?,?,?,?)", [it, e[0], e[1], e[2]["weight"]])
			fg_db.execute("INSERT INTO graph(id, terma, termb, llr) VALUES(?,?,?,?)", [it, e[1], e[0], e[2]["weight"]])
			it += 1
		fg_db.commit()
		fg_db.close()

	def __save_to_bmg(self, output="graph.bmg", weights=False):	
		header = ""
		content = ""
		f = open(output, "w")	
		for e in self.edges(data=True):
			header += "#_symmetry " + str(e[2]["weight"]) + "\n"
			if weights == True:
				content += "t_" + e[0] + "	" + "t_" + e[1] + "	" + str(e[2]["weight"]) + "\n"
			else:
				w = "-"
				if e[2]["weight"] == "B":
					w = "B"
				content += "t_" + e[0] + "	" + "t_" + e[1] + "	" + w + "\n"
		f.write(header + content)
		f.close()

	def __save_to_gml(self, output="graph.gml", weights=True):
		nx.write_gml(self, output)

	def __occurrence_to_contingency(self, together, a, b, total):
		k11 = together
		k12 = a - together
		k21 = b - together
		k22 = total - k11 - k12 - k21
		return [k11, k12, k21, k22]

	def __approximate_occurrences(self, pair):
		total = self.bg_db.execute("SELECT (k11+k12+k21+k22) FROM graph LIMIT 1").fetchone()[0]
		ks = [0,0]
		pair = sorted(pair)
		for i in range(2):
			res = self.bg_db.execute("SELECT termb, k11, k12, k21 FROM graph WHERE terma=? LIMIT 1", [pair[i]]).fetchone()
			if res is not None:
				if self.version == 1:
					if res[0] > pair[i]:
						val = res[1] + res[2]
					else:
						val = res[1] + res[3]
				elif self.version == 2:
					val = res[1] + res[2]
				ks[i] = val
		return [0, ks[0], ks[1], total - ks[0] - ks[1]]

	def llog(self, num):
		if num != 0:
			return math.log(num)
		else:
			return num

	def fllr(self, bg_k, fg_kp):
		k = bg_k
		kp = fg_kp

		kp = map(lambda x: float(x), kp)
		doc_total = sum(kp)

		k = map(lambda x: float(x), k)
		if self.fgbg == True:
			ind = (((kp[1] + kp[0])/doc_total)*((kp[2] + kp[0])/doc_total))*doc_total
			k[0] += ind
			k[1] += kp[0] + kp[1] - ind
			k[2] += kp[0] + kp[2] - ind
			k[3] += sum(kp) - 2*kp[0] - kp[1] - kp[2] + ind
		bg_total = sum(k)

		model_1 = map(lambda x: x/bg_total, k)
		model_2 = map(lambda x: x/doc_total, kp)
		try:
			D = -2*(self.llog(model_1[0])*kp[0] + self.llog(model_1[1])*kp[1] + self.llog(model_1[2])*kp[2] + self.llog(model_1[3])*kp[3] - self.llog(model_2[0])*kp[0] - self.llog(model_2[1])*kp[1] - self.llog(model_2[2])*kp[2] - self.llog(model_2[3])*kp[3])
		except:
			logging.error("Error in llr calculation. Graph.py line 139")
		return D
		
	def expand(self, word):
		res = self.bg_db.execute("SELECT * FROM graph WHERE terma=? OR termb=?", [word, word])
		neigh = res.fetchone()
		while neigh != None :
			self.add_edge(word, neigh[2], weight=neigh[3],inv_weight=float(1)/neigh[3])
			neigh = res.fetchone()
		pairs = combinations(self.nodes(), 2)
		for p in pairs:
			edge = self.bg_db.execute("SELECT * FROM graph WHERE terma=? AND termb=?", sorted(p)).fetchone()
			if edge != None:
				self.add_edge(edge[1], edge[2], weight=edge[3], inv_weight=float(1)/edge[3])
	
	def get_bg_model(self,pair):
		res = self.bg_db.execute("SELECT k11, k12, k21, k22 FROM graph WHERE terma=? AND termb=?", sorted(pair)).fetchone()
		if res is not None:
			bg_k = res
		else:
			bg_k = self.__approximate_occurrences(pair)
		return bg_k	

	def get_contexts(self, topic):
		tmp_graph =nx.Graph()
		for e in self.edges(data=True):
			if e[0] != topic and e[1] != topic:
				tmp_graph.add_edge(e[0], e[1], weight=e[2]["weight"], weight_inv=e[2]["inv_weight"])
		return nx.connected_components(tmp_graph)

	def get_centraL_node(self, g=None):
		if g == None:
			result = nx.algorithms.centrality.degree_centrality(self)#	, weight="inv_weight")#, distance="inv_weight")
			result = sorted(map(lambda x: [x, result[x]], result), key=itemgetter(1), reverse=True)
			return result[0][0]
		else:
			result = nx.algorithms.centrality.degree_centrality(g)
			result = sorted(map(lambda x: [x, result[x]], result), key=itemgetter(1), reverse=True)
			return result[0][0]			

	def __remove_edges_avoid_isolation(self):
		degrees = self.degree()
		removables = list()
		ordered_edges = sorted(map(lambda x: [x[0], x[1], x[2]["weight"]], self.edges(data=True)), key=itemgetter(2))
		
		wf = False
		for e in ordered_edges:
			if wf == True:
				if e[2] == weight:
					removables.append(e)
				else:
					break
			degrees[e[0]] -= 1
			degrees[e[1]] -= 1
			weight = e[2]
			removables.append(e)
			if degrees[e[0]] == 0 or degrees[e[1]] == 0:
				degrees[e[0]] += 1
				degrees[e[1]] += 1
				removables = removables[:-1]
				wf = True
		return removables

	def parse_sentences(self, sentences):
		parsed = list()
		for s in sentences:
			tmp_sent = list()
			for w in s.split():
				w = "".join(re.findall("[a-zA-Z\-]+", w))
				if len(w) > 2:
					tmp_sent.append(w.lower())
			tmp_sent = list(set(tmp_sent))
			parsed.append(tmp_sent)
		return parsed

	def analyze_sentences(self, sentences):
		term_dict = defaultdict(int)
		pair_dict = defaultdict(int)
		total_sen = 0
		for s in sentences:
			s = set(s)
			if len(s) > 1:
				for t in s:
					term_dict[t] += 1
				pairs = combinations(sorted(s), 2)
				for p in pairs:
					pair_dict[(p[0], p[1])] += 1
				total_sen += 1
		return term_dict, pair_dict, total_sen

	def textToGraph(self, text):
		if self.POS == False:
			sentences = nltk.sent_tokenize(text)
			parsed_sentences = self.parse_sentences(sentences)
			term_dict, pair_dict, total_sen = self.analyze_sentences(parsed_sentences)
			self.create_foreground_param(term_dict, pair_dict, total_sen, new_pairs=True)
		else:
			self.textToGraphPOS(text)

	def textToGraphPOS(self, text):
		sentences = nltk.sent_tokenize(text)
		taggedSentences = list()
		for s in sentences:
			text = 	nltk.word_tokenize(s)
			text = nltk.pos_tag(text)
			sentence = ""
			sep = ""
			for w in text:
				sentence += sep + "_".join(w)				
				sep = " "
			taggedSentences.append(sentence)
		parsedSentences = self.parse_sentences(taggedSentences)
		term_dict, pair_dict, total_sen = self.analyze_sentences(parsedSentences)
		self.create_foreground_param(term_dict, pair_dict, total_sen, new_pairs=True)

	def llr(self, param):
		return ContingencyMeasures(nltk.collocations.BigramAssocMeasures()).likelihood_ratio(param[0], param[1], param[2], param[3])


	def create_foreground_no_bg(self, word_dict, pair_dict, sentence_counter):
		it = 0
		total = len(pair_dict)
		for p in pair_dict:
			if it%1000 == 0:
				logging.info("Article graph calculation: " + str(round(100*(float(it)/total),2)) + "% done")
			fg_k = self.__occurrence_to_contingency(pair_dict[(p[0], p[1])],word_dict[p[0]], word_dict[p[1]] , sentence_counter)
			if fg_k[0] > 1:
				fg_llr = self.llr(fg_k)
				self.add_edge(p[0], p[1], weight=fg_llr, parameters=fg_k)
			it += 1
		logging.info("Foreground ready.")

	def create_foreground_param(self, word_dict, pair_dict, sentence_counter, edge_zero=False, new_pairs=True):
		it = 0
		total = len(pair_dict)
		for p in pair_dict:
			if it%10000 == 0:
				logging.info("Article graph calculation: " + str(round(100*(float(it)/total),2)) + "% done")
			bg_k = self.get_bg_model(p)
			fg_k = self.__occurrence_to_contingency(pair_dict[(p[0], p[1])],word_dict[p[0]], word_dict[p[1]] , sentence_counter)
			if fg_k[0] > 1 and (new_pairs == True or (new_pairs == False and bg_k[0] > 0)):
				fg_llr = self.fllr(bg_k, fg_k)
				if float(fg_k[0])/float(fg_k[0] + fg_k[3]) > float(bg_k[0])/float(bg_k[0] + bg_k[3]):
					self.add_edge(p[0], p[1], weight=fg_llr, parameters=fg_k)
			it += 1
		logging.info("Foreground ready.")

