import nltk
import glob
import re
import fnmatch
import multiprocessing
import os
import math
import ex

from xml.dom import minidom
from nltk.corpus import stopwords
from BeautifulSoup import BeautifulStoneSoup
from nltk.corpus import stopwords
import json
import operator




def get_object(filename):
  f = open(filename)
  raw = BeautifulStoneSoup(f.read(), convertEntities=BeautifulStoneSoup.XML_SPECIAL_CHARS_TO_ENTITIES).contents[0]
  f.close()
  raw = raw.lower()
  raw = raw.replace('&#x000a;','\n')
  
  ret = json.loads(raw)
  
  return ret


def load_ln():
  filenames = glob.glob("ln/*.json")
  total_news = {}

  for filename in filenames:
    i = 0
    news_for_that_file = get_object(filename)
    for new in news_for_that_file:
      i+=1
      total_news[filename +"/"+ str(i)] = ' '.join(new['cuerpo'])

  return total_news

def process_ln():
  print "Loading La Nacion.."
  news = load_ln()
  
  print "Tokenizing and getting fdist..."
  assoc_calc = ex.AssociationMapCalculator(news, [])
  assoc_calc.tokenize_and_get_fdist()
  
  print "Getting assoc. map"
  words = set(assoc_calc.fdist.keys()[:50])
  assoc_map = assoc_calc.get_association_map(words)

  max_assoc = max(assoc_map.iteritems(), key=operator.itemgetter(1))[0]
  print max_assoc[0]+ " y " +max_assoc[1]
  return assoc_calc

if __name__ == "__main__":
  process_ln()