#!/usr/bin/python
# coding: utf-8

from xml.dom.minidom import parse
from mensaje import mensaje

import math
# Not defined in any module
def log2(val):
    return log(val,2)

# Subclass of dict.
class freq(dict) :
	def N(self) :
		n = 0
		for k,v in self.iteritems() :
			n += v
		return n
	def prob_key(self, key) :
		return 	float(self[key])/self.N()
	def entropy(self) :
		h = 0
		for x in self.keys() :
			p_x = self.prob_key(x)
			h += p_x * log2(p_x)
		return -h
	def topK(self, k) :
		l = self.items()
		l.sort(lambda x,y : cmp(x[1],y[1]), reverse=True)
		l = l[:k]
		return l

class freqNGram(freq) :
	def __init__(self,n) :
		self.ngram = n
	def add(self, grams) :
		for i in range(len(grams)-self.ngram) :
			g = tuple(grams[i:i+self.ngram+1]) # tuple is hashable
			#print "DEBUG:", g
			if self.has_key(g) :
				self[g] += 1
			else :
				self[g] = 1

class nGramModel :
	def __init__(self,n,corpus) :
		self.nGram = n
		self.C = countWordsCaching(corpus)
	def P(self,words) : # P(w1 ... wn) = P(w1) * P(w2|w1) * ... * P(wn|w1...wn-1)
		assert(0<len(words))
		result = 1
		for i in range(len(words)) :
			result *= self.CP(words[i],words[:i])
		return result
	def CP(self,w,words) : # P(wn | w1 ... wn-1), Conditional Probability
		wordsUpToN = words[:self.nGram]
		return self.MLE(w,wordsUpToN)
	def MLE(self,w,words) : # Maximum Likelihood Estimate, [ms98, p.197]
		w1wn = list(words) # a copy
		w1wn.append(w)
		w1wn_1 = words # a reference
		cw1wn = self.C.count(w1wn)
		cw1wn_1 = self.C.count(w1wn_1)
		assert(0<cw1wn_1)
		result = float(cw1wn)/float(cw1wn_1)
		return result

class countWordsCaching :
	def __init__(self,corpus) : # takes a corpus as a list of words
		self.corpus = corpus
		self.cache = {():len(corpus)}	# hash as a count cache, not it stores tuples not lists because te earlier are hashable
		self.hits = 0 # cache stats
		self.miss = 0
	def count(self,words) :
		tw = tuple(words)
		if self.cache.has_key(tw) :
			self.hits+=1
			result = self.cache[tw]
		else : # not in cache, compute it, cache it
			self.miss+=1
			n = len(words)
			result = 0
			for i in range(len(corpus)-n) :
				if corpus[i:i+n]==words :
					result+=1
			self.cache[tw] = result
		#print "DEBUG:",tw,result
		return result
	def stats(self) :
		print self.__class__.__name__, " stat: hits/miss", self.hits, self.miss, float(self.hits)/self.miss



import sys
import os

#
# ftp://ftp.informatics.susx.ac.uk/pub/users/grs2/SGT.c wrapper
#
def SGT_wrapper(freqOfFreq) :
	SGT_cmd = "SGT/SGT_c"
	result = {}
	toChild, fromChild, childError = os.popen3(SGT_cmd, 'r')
	# get r, Nr list sorted by r, write to stdout of SGT
	rs = freqOfFreq.keys()
	rs.sort()
	for r in rs :
		s = str(r[0])+" "+str(freqOfFreq[r])+"\n"
		toChild.write(s)
	toChild.close()
	# read values, add to r->p(r) dict
	ls = fromChild.readlines() 
	for l in ls :
		r_p = l.split()
		result[int(r_p[0])] = float(r_p[1])
	fromChild.close()
	childError.close()
	return result


def nGram_SGT_prob(words, corpus) :
 
	print "DEBUG: nGram_SGT_prob(", words,") = ",
	n = len(words)
	if n==0 : # base case 1
		return 1.0
	# construct n-gram freqs
	assert(0<n)
	fNGram = freqNGram(n-1)
	fNGram.add(corpus)
	# get freqs
	freqs = fNGram.values()
	#print "DEBUG:", fNGram
	# frequencies of frequencies, using freqs as 0-gram
	ffNGram = freqNGram(0)
	ffNGram.add(freqs)
	# call SGT.c
	probs = SGT_wrapper(ffNGram)
	#print "DEBUG"
	#for r in probs.iterkeys() :
	#	print r, probs[r]
	tw = tuple(words)
	if fNGram.has_key(tw) :
		result = probs[fNGram[tw]]
	elif n==1 : # 0-grama, but not in freq table.
		# cannot estimate, unknown number of unseen species, return 0
		result = 0.0
	else:
		assert(0<n)
		# split words in two
		words0 = words[:n/2]
		words1 = words[n/2:]
		assert(words == words0+words1)
		P0 = probs[0]
		# compute P0' = \sum_{wi,wj \notin Bigrams} P(wi)P(wj) = 1 - \sum_{wi,wj \in Bigrams} P(wi)P(wj)
		P0p = 1
		# compute 0-gram frequencies by hand and them all. Using class nGramModel is too slow
		pw = {}
		N = 0
		for w in corpus :
			N += 1
			if pw.has_key(w) : pw[w] += 1
			else :		   pw[w] = 1
		for w in pw.iterkeys() :
			pw[w] = pw[w]/float(N)
		for wis in fNGram.iterkeys() :
			p = 1
			for wi in wis :
				p *= pw[wi]
			P0p -= p  # P0p -= P(w0)*P(w1)*...* P(wk)
		# got P0p, recursively compute the two parts probabilities
		p0 = nGram_SGT_prob(words0, corpus)
		p1 = nGram_SGT_prob(words1, corpus)
		result = p0*p1 * P0/P0p

	print result
	return result

#
# Main
#

import re
import sys

if len(sys.argv) > 1:
	assert(sys.argv[1])
	fd = open(sys.argv[1], "r")					# open file
	if len(sys.argv)>2 and sys.argv[2] :				# words
		# encoding argv[] trick from http://betabug.ch/blogs/ch-athens/135
		words = sys.argv[2].decode("utf-8").split(" ")
	else :	words = "cristina gana".split(" ")

	corpus = [] # corpus is a list of words
	m = mensaje()
	doc = parse(fd)
	for root in doc.childNodes :
		if root.localName=="corpus" :
			for message in root.childNodes :
				#print "DEBUG: ", message.localName
				if message.localName=="mensaje" :
					m.load_from_dom(message) # got the text, split into words
					# Santiago's way
					ws = map(unicode.lower, re.findall(u'[a-zA-Z0-9áéíóúÁÉÍÓÚñÑüÜ]+',unicode(m.texto)))
					#print "DEBUG:", ws
					corpus.extend(ws)
	fd.close()
	sgt_prob = nGram_SGT_prob(words, corpus)
	print "Estimated prob:", sgt_prob

	exit(0)

else:
	print "Usage:", sys.argv[0], "corpus.xml \"list of words\""
	print
	print "Example:", sys.argv[0], "clarin.xml \"cristina gana\", bisectSGT probability of the words for the corpus clarin.xml"


"""
TODO:
"""

"""
References
[1] William A. Gale and Geoffrey Sampson, "Good–Turing Frequency Estimation Without Tears"
"""

