import nltk
import glob
import re
import fnmatch
import os
import math

from xml.dom import minidom
from nltk.corpus import stopwords
from BeautifulSoup import BeautifulStoneSoup
import json
import operator

path_to_files = "C:\\Documents and Settings\\Administrador\\Escritorio\\INCC\\word association\\"

def tokenize(raw_text):
  sentences = nltk.sent_tokenize(raw_text)  
  tokens = [token for sentence in sentences for token in nltk.wordpunct_tokenize(raw_text) if re.match('\w+', token)]

  return tokens

def get_object(filename):
	f = open(filename)
	raw = BeautifulStoneSoup(f.read(), convertEntities=BeautifulStoneSoup.XML_SPECIAL_CHARS_TO_ENTITIES).contents[0]
	raw = raw.lower()
	raw = raw.replace('&#x000a;','\n')
	ret = json.loads(raw)
	f.close()
	return ret


def load_ln():
	filenames = glob.glob(path_to_files+"ln\\20114*.json") 
	news = {}
	for filename in filenames:
		print 'Cargando '+filename
		aux =  get_object(filename)
		print '\t '+str(len(aux))+' noticias'
		for auxito in aux:
			news[auxito["titulo"]] = auxito 
	print 'Total: '+str(len(news))+' noticias'
	return news
  
def load_ap():
	print 'Cargando AP'
	xmldoc = minidom.parse(path_to_files+'ap2\\aptest.txt')
	itemlist = xmldoc.getElementsByTagName('DOC')
	news = {}
	for item in itemlist:
		news[item.childNodes[1].childNodes[0].data] = item.childNodes[3].childNodes[0].data
	print '\t '+str(len(news))+' noticias'
	return news 

def process_ap():
	print "Loading ap.."
	news = load_ap()
	
	print "Tokenizing and getting fdist..."
	
	stop_words = stopwords.words('english')
	fdist = nltk.FreqDist()
	tokens = []
		
	for key, raw_text in news.items():
		print("Nota %s" % key)
		aux_tokens = tokenize(raw_text)
		tokens += aux_tokens
		for token in aux_tokens:
			if not token in stop_words: fdist.inc(token)
	
	print "Getting assoc. map"
	words = set(fdist.keys()[:500])
	#assoc_map = assoc_calc.get_association_map(words)

	conjunct_frequency = {}
	frequency = {}	
	
	for word in words: 
		frequency[word] = 0

	
	for index, token in enumerate(tokens):
		if token in words:
			frequency[token] = frequency[token] + 1
			for token2 in tokens[index+1:index+6]:
					if token2 in words:
						if not ((token, token2) in conjunct_frequency.keys()):
							conjunct_frequency[(token, token2)] = 0
						conjunct_frequency[(token, token2)]+=1
	
	
	corpus_size = len(tokens)
	assoc_map = {}
	for (w,freq) in conjunct_frequency.items():
		w1 = w[0]
		w2 = w[1]
		if freq > 6:
			denom = float(freq) * corpus_size 
			quot =	frequency[w1] * frequency[w2]
		assoc_map[(w1,w2)] = math.log(denom / quot) / math.log(2)
	
 

	print max(assoc_map.iteritems(), key=operator.itemgetter(1))[0]
	return assoc_map


def process_ln():
	print "Cargando ln.."
	news = load_ln()
	
	print "Tokenizando y obteniendo fdist..."
	
	stop_words = stopwords.words('spanish')
	fdist = nltk.FreqDist()
	tokens = []
		
	for key, nota in news.items():
		print("Nota %s" % key)
		for raw_text in nota:
			aux_tokens = tokenize(raw_text)
			tokens += aux_tokens
			for token in aux_tokens:
				if not token in stop_words: fdist.inc(token)
	
	print "Obteniendo assoc. map"
	words = set(fdist.keys()[:500])
	#assoc_map = assoc_calc.get_association_map(words)

	conjunct_frequency = {}
	frequency = {}	
	
	for word in words: 
		frequency[word] = 0

	
	for index, token in enumerate(tokens):
		if token in words:
			frequency[token] = frequency[token] + 1
			for token2 in tokens[index+1:index+6]:
					if token2 in words:
						if not ((token, token2) in conjunct_frequency.keys()):
							conjunct_frequency[(token, token2)] = 0
						conjunct_frequency[(token, token2)]+=1
	
	
	corpus_size = len(tokens)
	assoc_map = {}
	for (w,freq) in conjunct_frequency.items():
		w1 = w[0]
		w2 = w[1]
		if freq > 6:
			denom = float(freq) * corpus_size 
			quot =	frequency[w1] * frequency[w2]
		assoc_map[(w1,w2)] = math.log(denom / quot) / math.log(2)
	
 

	print max(assoc_map.iteritems(), key=operator.itemgetter(1))[0]
	return assoc_map