#!/usr/bin/python
# coding: utf-8

from xml.dom.minidom import parse
from mensaje import mensaje
import os



import math
# Not defined in any module
def log2(val):
    return log(val,2)

# Subclass of dict.
class freq(dict) :
	def N(self) :
		n = 0
		for k,v in self.iteritems() :
			n += v
		return n
	def prob_key(self, key) :
		return 	float(self[key])/self.N()
	def entropy(self) :
		h = 0
		for x in self.keys() :
			p_x = self.prob_key(x)
			h += p_x * log2(p_x)
		return -h
	def topK(self, k) :
		l = self.items()
		l.sort(lambda x,y : cmp(x[1],y[1]), reverse=True)
		l = l[:k]
		return l

class freqNGram(freq) :
	def __init__(self,n) :
		self.ngram = n
	def add(self, grams) :
		for i in range(len(grams)-self.ngram) :
			g = tuple(grams[i:i+self.ngram+1]) # tuple is hashable
			#print "DEBUG:", g
			if self.has_key(g) :
				self[g] += 1
			else :
				self[g] = 1

# zipf law, crop the middle of the picture
def cropWordsZipf(corpus, lowPercent) :
        f0Gram = freqNGram(0) # freq of words, 0-grams
        f0Gram.add(corpus)
        # get freqs
        freqs = f0Gram.values()
        #print "DEBUG:", fNGram
        # frequencies of frequencies, using freqs as 0-gram
        ff0Gram = freqNGram(0)
        ff0Gram.add(freqs)
        print "DEBUG: len(f0Gram), len(ff0Gram)", len(f0Gram), len(ff0Gram)
        l = ff0Gram.items()
        l.sort(lambda x,y : cmp(x[1],y[1]), reverse=True)
        #get min erasing first lowPercent frequencies, get max erasing highPercent frequencies
        min_freq = l[int(lowPercent*len(l))][0][0]
        print "DEBUG: min_freq", min_freq
        #for w in corpus :
        #    print w, f0Gram[tuple([w])], min_freq<=f0Gram[tuple([w])] and f0Gram[tuple([w])]<=max_freq
        freqs = filter((lambda f : min_freq<=f[1]), f0Gram.items())
        freqs.sort(lambda x,y : cmp(x[1],y[1]), reverse=True)
        #print freqs
        words = map((lambda f: f[0][0]), freqs)
        #print words
        return words



#
# Main
#

import re
import sys
import re
import sys

if len(sys.argv) > 1:
	assert(sys.argv[1])
	fd = open(sys.argv[1], "r")					# open file
	if len(sys.argv)>2 and sys.argv[2] :				# percent
		highPercent = float(sys.argv[2])
	else :	highPercent = 0.1

	corpus = [] # corpus is a list of words
	m = mensaje()
	doc = parse(fd)
	for root in doc.childNodes :
		if root.localName=="corpus" :
			for message in root.childNodes :
				#print "DEBUG: ", message.localName
				if message.localName=="mensaje" :
					m.load_from_dom(message) # got the text, split into words
					# Santiago's way
					ws = map(unicode.lower, re.split(u'[^a-zA-Z0-9áéíóúÁÉÍÓÚñÑüÜ]+',unicode(m.texto)))
					#print "DEBUG:", ws
					corpus.extend(ws)
	fd.close()
	ngw = cropWordsZipf(corpus, highPercent)
	print "Non gramatical words:"
	for w in ngw :
		print w,

	exit(0)

else:
	print "Usage:", sys.argv[0], "corpus.xml highPercent"
	print
	print "Example:", sys.argv[0], "clarin.xml 0.05, get all the words in the first 5% of frequencies of frequencies"


"""
TODO:
"""

