#!/usr/bin/python
# coding: utf-8

from xml.dom.minidom import parse
from mensaje import mensaje

# computes all freqs and probs of n-gram given corpus (in the very beggining)
class nGram :
	def __init__(self,n,corpus) :
		self.ngram = n
		self.N = 0
		self.c = {}
		self.p = {}
		for i in range(len(corpus)-self.ngram) :
			self.N += 1
			g = tuple(corpus[i:i+self.ngram+1]) # tuple is hashable
			#print "DEBUG:", g
			if self.c.has_key(g) : self.c[g] += 1
			else : self.c[g] = 1
		for g in self.c.iterkeys() :
			self.p[g] = self.c[g]/float(self.N)
	def count(self,ngram) :
		assert(len(ngram)==self.ngram)
		return self.c[tuple(ngram)]
	def prob(self,ngram) :
		assert(len(ngram)==self.ngram)
		return self.p[tuple(ngram)]
	def topK(self,k) :
		l = self.c.items()
		l.sort(lambda x,y : cmp(x[1],y[1]), reverse=True)
		l = map((lambda x: x[0]), l[:k]) # get first coord out of topK
		
		return l


# From http://neverland.ncssm.edu/~morrison/tcm05.html
def binomial(n,k) :
	p = 1
	for j in range(0,k):
		p = p*(n - j)/(j + 1)
	#print "n,k,p", n,k,p
	return p

def b(k,n,x) :
	kd = decimal.Decimal(str(k))
	nd = decimal.Decimal(str(n))
	xd = decimal.Decimal(str(x))
	result = binomial(nd,kd) * (xd**kd) * (1-xd)**(nd-kd)
	return float(str(result))


import math
import decimal # high presicion numbers
# as in [1] p.173
def L(k,n,x) :
	kd = decimal.Decimal(str(k))
	nd = decimal.Decimal(str(n))
	xd = decimal.Decimal(str(x))
	v = (xd**kd) * (1-xd)**(nd-kd)
	#print "v, float(str(v)), math.log(float(str(v)))", v, float(str(v)), math.log(float(str(v)))
	return float(str(v))

# modified to cope with overflow
def logL(k,n,x) :
	v = k*math.log(x) + (n-k)*math.log(1-x)
	return v


# Using T-test, get all (w1,w2) from all top-k bigrams the top-j in T ranking
def tTestGetCollocations(corpus, topk, topj) :
	zeroGram = nGram(0,corpus)
	uniGram = nGram(1,corpus)
	#print "DEBUG: 0Gram N ~= 1Gram N:", zeroGram.N, uniGram.N
	#print "DEBUG: unique 0Gram 1Gram:", len(zeroGram.c), len(uniGram.c)
	lTopK = uniGram.topK(topk)

	# compute t, [1] p.165
	t = {}
	N = zeroGram.N
	for w1w2 in lTopK :
		#print "DEBUG:", w1w2
		p = zeroGram.p[w1w2[:-1]] * zeroGram.p[w1w2[1:]]
		pw1w2 = uniGram.p[w1w2] 
		mu = p
		# see errata in http://nlp.stanford.edu/fsnlp/errata.html
		# If I put pw1w2 as the numbers in equation says, it goes wrong. The text above the eqn is ok, the eqn is wrong.
		s_sq = p
		x_bar = pw1w2
		tee = (x_bar - mu) / (math.sqrt(s_sq/N))
		t[w1w2] = tee

	# get top-j
	lTopJ = t.items()
	lTopJ.sort(lambda x,y : cmp(x[1],y[1]), reverse=True)
	lTopJ = lTopJ[:topj]
	#print "DEBUG: T test"
	#print "DEBUG: t     C(w1) C(w2) C(w1w2)" 
	#for w1w2t in lTopJ :
	#	print 'DEBUG: %5.3g %6d %4d %4d %s %s' % (w1w2t[1], zeroGram.c[w1w2t[0][:-1]], zeroGram.c[w1w2t[0][1:]], uniGram.c[w1w2t[0]], unicode(w1w2t[0][0]), unicode(w1w2t[0][1]))
	
	#construct a dict again
        return dict(lTopJ)


# From the dict of collocations, replace "w1" "w2" by "w1_w2"
def tTestReplaceCollocations(corpus, collocations) :
	#print "DEBUG: len(corpus) before", len(corpus)
	result = []
	i = 0
	while i<len(corpus)-1 :
		if collocations.has_key((corpus[i],corpus[i+1])) :
			w1_w2 =corpus[i]+"_"+corpus[i+1]
			#print "DEBUG: adding", w1_w2
			result.append(w1_w2)
			i += 2
		else:
			result.append(corpus[i])
			i += 1
	#print "DEBUG: len(result)", len(result)
	
	#print "DEBUG:"
	#for w in result:
	#	if '_' in w : print w,
	return result


# Using LRT, replace from all top-k bigrams the [offset,offset+top-j] in LRT ranking, by word1_word2
# (works OK with an offset of 128 approx.)
def lrTestReplaceCollocations(corpus, topk, topj, offset) :
	# compute likelihood ratio test (lrt), [1] p.17
	zeroGram = nGram(0,corpus)
	uniGram = nGram(1,corpus)
	#print "DEBUG: 0Gram N ~= 1Gram N:", zeroGram.N, uniGram.N
	#print "DEBUG: unique 0Gram 1Gram:", len(zeroGram.c), len(uniGram.c)
	lTopK = uniGram.topK(topk)
	lrt = {}
	N = zeroGram.N
	for w1w2 in lTopK :
		#print "DEBUG:", w1w2
		c1 = zeroGram.c[w1w2[:-1]]
		c2 = zeroGram.c[w1w2[1:]]
		c12 = uniGram.c[w1w2]
		if c2==c12 : #avoiding p2 going to 0, and then logL(,,p2) going to \infty
			c2+=1
		if c1==c12 : #avoiding p1 going to 1, and then logL(,,p1) going to \infty
			c1+=1
		p = c2/float(zeroGram.N)
		p1 = c12/float(c1)
		p2 = (c2 - c12) / float(zeroGram.N - c1)

		logLambda = logL(c12,c1,p) + logL(c2-c12,N-c1,p) - logL(c12,c1,p1) - logL(c2-c12,N-c1,p2)
		lrtee = -2*logLambda
		#print "lrt["+str(w1w2)+"]",lrtee
		lrt[w1w2] = lrtee

	#get [offset, offset+top-j]
	lTopJ = lrt.items()
	lTopJ.sort(lambda x,y : cmp(x[1],y[1]), reverse=True)
	lTopJ = lTopJ[offset : offset+topj]
	# print top-j
	print "LRT test"
	print "-2logƛ C(w1) C(w2) C(w1w2)" 
	for w1w2t in lTopJ :
		print '%5.3g %6d %4d %4d %s %s' % (w1w2t[1], zeroGram.c[w1w2t[0][:-1]], zeroGram.c[w1w2t[0][1:]], uniGram.c[w1w2t[0]], unicode(w1w2t[0][0]), unicode(w1w2t[0][1]))

	#replace all top-j from top-k, by word1_word2
	dTopJ = dict(lTopJ) #construct a dict again
	print "DEBUG: len(corpus) before", len(corpus)
	i = 0
	length = len(corpus)-1
	while i<length :
		if dTopJ.has_key((corpus[i],corpus[i+1])) :
			w1_w2 =corpus[i]+"_"+corpus[i+1]
			#print "DEBUG: replacing", w1_w2
			corpus.pop(i)
			corpus.pop(i)
			corpus.insert(i,w1_w2)
			i +=1
			length -=1
		else:
			i +=1
	print "DEBUG: len(corpus) after", len(corpus)
	
	raw_input("pausa")
	print "DEBUG:"
	for w in corpus:
		if '_' in w : print w,

	return corpus

#
# Main
#
def main():

    import re
    import sys
    
    if len(sys.argv) > 1:
    
            """
            # Test to catch the bug in [1] p.165
            p = 3.615e-7
            pw1w2 = 5.5911e-7 
            N = 14307668
            mu = p
            s_sq = pw1w2
            x_bar = pw1w2
            print "(",x_bar," - ",mu,") / (math.sqrt(",s_sq,"/",N,"))"
            tee = (x_bar - mu) / (math.sqrt(s_sq/N))
            print tee
            exit(0)
            """
    
            assert(sys.argv[1])
            fd = open(sys.argv[1], "r")					# open file
            if len(sys.argv)>2 and sys.argv[2] :				# top-k bigrams
                    topk = int(sys.argv[2])
            else :	topk = 2048
            if len(sys.argv)>3 and sys.argv[3] :				# top-j bigrams under hypo-test
                    topj = int(sys.argv[3])
            else :	topj = 256
            if len(sys.argv)>4 and sys.argv[4] :				# offset for LRT
                    offset = int(sys.argv[4])
            else :	offset = 256
    
            corpus = [] # corpus is a list of words
            m = mensaje()
            doc = parse(fd)
            for root in doc.childNodes :
                    if root.localName=="corpus" :
                            for message in root.childNodes :
                                    #print "DEBUG: ", message.localName
                                    if message.localName=="mensaje" :
                                            m.load_from_dom(message) # got the text, split into words
                                            # Santiago's way
                                            ws = map(unicode.lower, re.findall(u'[a-zA-Z0-9áéíóúÁÉÍÓÚñÑüÜ]+',unicode(m.texto)))
                                            #print "DEBUG:", ws
                                            corpus.extend(ws)
            fd.close()
            
            #collocations = tTestGetCollocations(corpus, topk, topj)
            #print collocations
            #corpus = tTestReplaceCollocations(corpus, collocations)
            #print corpus
            #print "DEBUG:", corpus
            #corpus = lrTestReplaceCollocations(corpus, topk, topj, offset)
            #print "DEBUG:", corpus
    
    
    else:
            print "Usage:", sys.argv[0], "corpus.xml top-k top-j offset"
            print
            print "Example:", sys.argv[0], "clarin.xml 2048 256 256, --insert explanation--"

if __name__ == "__main__":
    main()

"""
TODO:
"""

"""
References
	[1] Chris Manning and Hinrich Schütze, Foundations of Statistical Natural Language Processing, MIT Press. Cambridge, MA: May 1999
"""

