import nltk
import glob
import re
import fnmatch
import multiprocessing
import os
import math
from association_map_calculator import AssociationMapCalculator
from xml.dom import minidom
from nltk.corpus import stopwords
from BeautifulSoup import BeautifulStoneSoup
import json
import operator

import sys
sys.path.append("..")
import tokenizer


  
def load_ap():
  xmldoc = minidom.parse('ap/ap.txt')
  itemlist = xmldoc.getElementsByTagName('DOC')
  news = {}
  for item in itemlist:
    news[item.childNodes[1].childNodes[0].data] = item.childNodes[3].childNodes[0].data
  return news

def load_ap_test():
  xmldoc = minidom.parse('ap/aptest.txt')
  itemlist = xmldoc.getElementsByTagName('DOC')
  news = {}
  for item in itemlist:
    news[item.childNodes[1].childNodes[0].data] = item.childNodes[3].childNodes[0].data
  return news






def process_ap_test():
  print "Loading ap.."
  news = load_ap_test()
  
  print "Tokenizing and getting fdist..."
  assoc_calc = AssociationMapCalculator(news)
  assoc_calc.tokenize_and_get_fdist()
  
  print "Getting assoc. map"
  words = set(assoc_calc.fdist.keys()[:500])
  assoc_map = assoc_calc.get_association_map(words)

  print max(assoc_map.iteritems(), key=operator.itemgetter(1))[0]

def process_ap():
  print "Loading ap.."
  news = load_ap()
  
  print "Tokenizing and getting fdist..."
  assoc_calc = AssociationMapCalculator(news)
  assoc_calc.tokenize_and_get_fdist()
  
  print "Getting assoc. map"
  words = set(assoc_calc.fdist.keys()[:500])
  assoc_map = assoc_calc.get_association_map(words)

  print max(assoc_map.iteritems(), key=operator.itemgetter(1))[0]
  return assoc_map

def process_ln():
  return 0
if __name__ == "__main__":
  process_ap_test()