#!/usr/bin/python
# coding: utf-8

from xml.dom.minidom import parse
from mensaje import mensaje

class nGramModel :
	def __init__(self,n,corpus) :
		self.nGram = n
		self.C = countWordsCaching(corpus)
	def P(self,words) : # P(w1 ... wn) = P(w1) * P(w2|w1) * ... * P(wn|w1...wn-1)
		assert(0<len(words))
		result = 1
		for i in range(len(words)) :
			result *= self.CP(words[i],words[:i])
		return result
	def CP(self,w,words) : # P(wn | w1 ... wn-1), Conditional Probability
		wordsUpToN = words[:self.nGram]
		return self.MLE(w,wordsUpToN)
	def MLE(self,w,words) : # Maximum Likelihood Estimate, [ms98, p.197]
		w1wn = list(words) # a copy
		w1wn.append(w)
		w1wn_1 = words # a reference
		cw1wn = self.C.count(w1wn)
		cw1wn_1 = self.C.count(w1wn_1)
		assert(0<cw1wn_1)
		result = float(cw1wn)/float(cw1wn_1)
		return result

class countWordsCaching :
	def __init__(self,corpus) : # takes a corpus as a list of words
		self.corpus = corpus
		self.cache = {():len(corpus)}	# hash as a count cache, not it stores tuples not lists because te earlier are hashable
		self.hits = 0 # cache stats
		self.miss = 0
	def count(self,words) :
		tw = tuple(words)
		if self.cache.has_key(tw) :
			self.hits+=1
			result = self.cache[tw]
		else : # not in cache, compute it, cache it
			self.miss+=1
			n = len(words)
			result = 0
			for i in range(len(corpus)-n) :
				if corpus[i:i+n]==words :
					result+=1
			self.cache[tw] = result
		print "DEBUG:",tw,result
		return result
	def stats(self) :
		print self.__class__.__name__, " stat: hits/miss", self.hits, self.miss, float(self.hits)/self.miss

import sys

#
# Main
#


if len(sys.argv) > 1:
	assert(sys.argv[1])
	fd = open(sys.argv[1], "r")					# open file
	if len(sys.argv)>2 and sys.argv[2] : n = int(sys.argv[2])	# ngram
	else :	n = 0
	if len(sys.argv)>3 and sys.argv[3] :				# words
		# encoding argv[] trick from http://betabug.ch/blogs/ch-athens/135
		words = sys.argv[3].decode("utf-8").split(" ")
	else :	words = "cristina gana".split(" ")

	corpus = [] # corpus is a list of words
	m = mensaje()
	doc = parse(fd)
	for root in doc.childNodes :
		if root.localName=="corpus" :
			for message in root.childNodes :
				#print "DEBUG: ", message.localName
				if message.localName=="mensaje" :
					m.load_from_dom(message) # got the text, split into words
					corpus.extend(m.texto.lower().split(" ")) # too simple!?
	fd.close()

	model = nGramModel(n,corpus)
	print model.P(words)
	model.C.stats()

else:
	print "Usage:", sys.argv[0], "corpus.xml n-gram \"list of words\""
	print
	print "Example:", sys.argv[0], "clarin.xml 2 \"cristina gana\", probability of the words out of the 2-gram-model for the corpus clarin.xml"

