import sys
import nltk
import glob
import fnmatch
import os
from BeautifulSoup import BeautifulStoneSoup
import networkx as nx
import matplotlib.pyplot as plt                                          

from nltk.corpus.reader.wordnet import POS_LIST
from nltk.corpus import stopwords
from nltk.corpus import wordnet as wn


path_to_files =  'C:\\Documents and Settings\\Administrador\\Escritorio\\INCC\\nltk\\';


def get_count(word_count_tuple):
  return word_count_tuple[1]

def es_puntuacion(c):
		return (len(c) == 1 and (c in "-.'?!,\":;()|-/")) or c == '""' or  len(c) == 1 or c == '--' or c == ').' or c == '.""' or c == ''

  
def pos2wn(pos):
	if pos == 'JJ':
		return 'a'
	if pos[0].lower() in set(POS_LIST):
		return pos[0].lower()
	return 'n'

def get_tokens(filename):
	
	original = open(filename,'r').read()
	ind = original.find("<table class='cable'>")
	if not ind==-1:
		sin_header = original[ind:]
		sin_html = nltk.clean_html(sin_header)
	
		sin_codigos_feos = BeautifulStoneSoup(sin_html, 
                 convertEntities=BeautifulStoneSoup.XML_SPECIAL_CHARS_TO_ENTITIES).contents[0]
	
		sin_codigos_feos = sin_codigos_feos.lower().replace('&#x000a;','\n')
		return nltk.wordpunct_tokenize( sin_codigos_feos)
	return []


#Ejercicio 1
#Modelo ER{n,p}<=> Los n nodos se relacionan independientemente entre si con probabilidad p
def ejercicio_1():
	return nx.erdos_renyi_graph(1000,0.15)

#Ejercicio 2
#Frecuencias de ocurrencia de nodos de grado i, array(grado => frecuencia)
def ejercicio_2():
	erdos_graph= ejercicio_1()
	return nx.degree_histogram(erdos_graph)

#Ejercicio 3
#Grafo de palabras de un corpus
#Nodos   <=> Palabras (sin repetirse)
#Aristas <=>  Las palabras se "tocan" ... r(p1, p2) si existe oracion "...p1 p2..."

#Levanto wikileaks
#Cargo nodos
#Cargo aristas
#Aparentemente, deberia generar dos grafos, uno "lematizado" y otro "sin lematizar"
def ejercicio_4(test = True):
	stop_words = stopwords.words('english')
	wnl = nltk.WordNetLemmatizer()
	files = glob.glob(path_to_files+'*.html')
	matches = []
	#for root, dirnames, filenames in os.walk('wikis\\cable\\2009'):
	for root, dirnames, filenames in os.walk(path_to_files+'wikis'):
		for filename in fnmatch.filter(filenames, '*.html'):
			matches.append(os.path.join(root, filename))
                                        
	if not test:
		files = matches
	

	g = nx.Graph()
	tokens = []
	for f in files:
		print 'Procesando archivo '+f
			
		tokens = get_tokens(f)
		
		tokens = [word for word in tokens if not word in stop_words]			
		tokens = [t for t in tokens if not es_puntuacion(t) and t.isalpha()]		
		#tokens = tokens[:100]
		g.add_nodes_from(tokens)
		for indice, token in enumerate(tokens):
			if indice > 0:
				g.add_edge(token, tokens[indice-1])	

	grado = nx.degree_centrality(g) # ?? hay varios tipos de centralidad
	clustering = nx.clustering(g)
	clustering_promedio = nx.average_clustering(g)
	diametro = nx.diameter(g)



	#nx.draw(g)
	#plt.show()
	return grado, clustering, clustering_promedio, diametro
 
def ejercicio_4_lemma(test = True):
	stop_words = stopwords.words('english')
	wnl = nltk.WordNetLemmatizer()
	files = glob.glob(path_to_files+'*.html')
	
	matches = []
	#for root, dirnames, filenames in os.walk('wikis\\cable\\2009'):
	for root, dirnames, filenames in os.walk(path_to_files+'wikis'):
		for filename in fnmatch.filter(filenames, '*.html'):
			matches.append(os.path.join(root, filename))
                                        
	if not test:
		files = matches
	
	g = nx.Graph()
	tokens = []
	for f in files:
		#if f.find("TEHRAN") != -1:
		#	continue
		print 'Procesando archivo '+f
		original = open(f,'r').read()
		ind = original.find("<table class='cable'>")
		
		if not ind==-1:
			sin_header = original[ind:]
			sin_html = nltk.clean_html(sin_header)
	                                
			sin_codigos_feos = BeautifulStoneSoup(sin_html, 
                 convertEntities=BeautifulStoneSoup.XML_SPECIAL_CHARS_TO_ENTITIES).contents[0]
	
			sin_codigos_feos = sin_codigos_feos.lower().replace('&#x000a;','\n')
	
			sentences = nltk.sent_tokenize(sin_codigos_feos)
			stem_words = []
			for s in sentences:
				tokens = nltk.wordpunct_tokenize(s)
				pos_tagged = nltk.pos_tag(tokens)
				for (w,p) in pos_tagged:
					stem_words.append(wnl.lemmatize(w,pos2wn(p)))
			
		stem_words = [t for t in stem_words if not t in stop_words and not es_puntuacion(t) and t.isalpha()]
	
		g.add_nodes_from(stem_words)
		for indice, token in enumerate(stem_words):
			if indice > 0:
				g.add_edge(token, stem_words[indice-1])	

	
	grado = nx.degree_centrality(g) # ?? hay varios tipos de centralidad
	clustering = nx.clustering(g)
	clustering_promedio = nx.average_clustering(g)
	diametro = nx.diameter(g)

	return grado, clustering, clustering_promedio, diametro
	#nx.draw(g)
	#plt.show() 




#Ejercicio 5
#No entiendo que grafo levantar
#Idea: Nodo <=> Synset
#		 	 Arista <=> Alguna relacion (hiper, hipo, anto, mero, ??)



def closure_graph(synset, fn):
  seen = set()
  #graph = nx.DiGraph()
  graph = nx.Graph()

  def recurse(s):
    if not s in seen:
        seen.add(s)
        graph.add_node(s.name)
        for s1 in fn(s):
            graph.add_node(s1.name)
            graph.add_edge(s.name, s1.name)
            recurse(s1)

  recurse(synset)
  return graph


def words_to_synsets(words):
	res = []
	for w in words:
		res.append(wn.lemmas(w)[0].synset)
	
	return res	

def ejercicio_5(ss = (wn.all_synsets('n')), fn = (lambda s: s.hypernyms())):
 
	#fn = lambda s: s.hypernyms()
	#fn = lambda s: s.hyponyms()
	#fn = lambda s: s.part_meronyms()
	#fn = lambda s: s.part_holonyms()
	#fn = lambda s: s.member_meronyms()
	#fn = lambda s: s.member_holonyms()
	#fn = lambda s: s.substance_meronyms()
	#fn = lambda s: s.substance_holonyms()
	
	
	

	seen = set()
  #graph = nx.DiGraph()
 	graph = nx.Graph()
 	
	def recurse(s):
		if not s in seen:
			seen.add(s)
			graph.add_node(s.name)
			for s1 in fn(s):
				graph.add_node(s1.name)
				graph.add_edge(s.name, s1.name)
				recurse(s1)

	for s in ss:  
		#s.hypernym_paths()
		#    
		recurse(s)
  	
	return graph	
	#	graph = closure_graph(s,
  #                    )


	nx.draw(graph)
	plt.show()

#ejercicio_5()