#!/usr/bin/python
import random

from math import log, exp
from copy import deepcopy

def getSequenceAndKeys(filename):
	f = open(filename)
	sequence = ""
	for line in f:
		if ">" not in line:
			sequence += line.strip()
	sequence = sequence.upper()
	
	keys = set()
	for c in sequence:
		keys.add(c)
		
	return (sequence, keys)


def getHiddenStates(num):
	hid = set()
	for i in range(0,num):
		hid.add(i)
	return hid


def getInitialDoubleDistribution(hid, obs):
	dist = dict()
	
	for i in hid:
		dist[i] = getInitialDistribution(obs)

	return dist

	
def getInitialDistribution(states):
	dist = dict()
	
	for j in states:
		dist[j] = random.randint(95,105)
	dist = makelog(normalize(dist))
	
	return dist


def normalize(dist):
	total = 0.
	for tok in dist:
		total += dist[tok]
	for tok in dist:
		dist[tok] /= total
		
	return dist


def makelog(dist):
	for tok in dist:
		dist[tok] = log(dist[tok])
		
	return dist


def logsum(loga, logb):
	if loga == float("-inf") and loga == float("-inf"):
		return float("-inf")
	if loga < logb:
		(loga, logb) = (logb, loga)
	return loga + log(1 + exp(logb - loga))
	

def logtotal(loga, logb):
	if loga == 0:
		return logb
	elif logb == 0:
		return loga
	return logsum(loga, logb)

	
def printExp(dist):
	total = 0
	for s in dist.keys():
		print s, ":", exp(dist[s]), ", ",
		total += exp(dist[s])
	print total


def printDoubleExp(dist):
	for s in dist.keys():
		print s, " - ",
		printExp(dist[s])


# Forward-Backward Algorithm ----------------------------------------
def getAlphaValues(sequence, hid, PI, A, B):
	alpha = dict()
	for s in hid:
		alpha[s] = [0L] * len(sequence)
		alpha[s][0] = PI[s] + B[s][sequence[0]]
	
	for t in range(1, len(sequence)):
		for j in hid:
			total = 0L
			for i in hid:
				partial = alpha[i][t-1] + A[i][j]
				total = logtotal(total, partial)
			alpha[j][t] = total + B[j][sequence[t]]
	
	return alpha
	
	
def getBetaValues(sequence, hid, A, B):
	beta = dict()
	T = len(sequence) - 1
	for s in hid:
		beta[s] = [0L] * len(sequence)
		beta[s][T] = log(1)
	for t in range(T - 1, -1, -1):
		for i in hid:
			total = 0
			for j in hid:
				partial = A[i][j] + B[j][sequence[t]] + beta[j][t+1]
				total = logtotal(total, partial)
			beta[i][t] = total
	
	return beta
	

# Viterbi -----------------------------------------------------------
def viterbi(sequence, hid, PI, A, B):
	trellis = []
	phi = dict()
	delta = dict()
	for i in hid:
		delta[i] = PI[i]
		phi[i] = None
	d = dict()
	d["delta"] = delta
	d["phi"] = phi
	trellis.append(d)
	for t in sequence:    
		phi = dict()
		delta = dict()
		for i in hid:
			delta[i], phi[i] = computeInnerScores(hid, t, i, A, B, PI, trellis)
		d = dict()
		d["delta"] = delta
		d["phi"] = phi
		trellis.append(d)
	#decode
	best_score = float("-inf")
	mostProbableState = None
	for k, v in trellis[-1]["delta"].items():
		if v > best_score:
			best_score = v;
			mostProbableState = k
	annot = []
	state = "out"
	start = 0
	end = 0
	result = "0"*(len(trellis) - 1)
	for i in range(len(trellis)- 1, 0, -1):
		if state == "out" and mostProbableState == 1:
			state = "in"
			end = i
		if state == "in" and mostProbableState == 0:
			state = "out"
			start = i + 1
			n = dict()
			n["start"] = start
			n["end"] = end
			n["length"] = end - start + 1
			annot.append(n)
			result = result[:n['start'] - 1] + '1'*n['length'] + result[n['end']:]
		mostProbableState = trellis[i]["phi"][mostProbableState]
	import logging
	logger = logging.getLogger("Viterbi")
	logger.debug(result)
	return (result, best_score)
def computeInnerScores(hid, char, s, A, B, PI, trellis):
	m = float("-inf")
	am = None
	#for prevS in trellis[-1].keys():
	for prevS in hid:
		d = B[s][char] + A[prevS][s] + trellis[-1]["delta"][prevS]
		if m < d:
			m = d;
			am = prevS
	return m, am
"""
# Viterbi -----------------------------------------------------------
def viterbi(sequence, hid, PI, A, B):
        # Initialization.
        hidden_sequence = [0L] * len(hid)
        score = [0L] * len(hid)
        
        for s in hid:
                hidden_sequence[s] = str(s)
                score[s] = PI[s] + B[s][sequence[0]]
        
        # Iterate across the trellis.
        for t in range(1, len(sequence)):
                new_score = [0L] * len(hid)
                for s in hid:
                        best = getMaxScoreViterbi(hid, score, A, B, s, sequence[t])
                        hidden_sequence[s] += str(best[0])
                        new_score[s] = best[1]
                score = new_score
        
        best_score = float("-inf")
        best_state = 0
        for s in hid:
                if score[s] > best_score:
                        best_score = score[s]
                        best_state = s
        return (hidden_sequence[best_state], best_score)
"""                
	
			
def getMaxScoreViterbi(hid, prev_score, A, B, j, token):
	best_score = float("-inf")
	best_state = 0
	for i in hid:
		score = prev_score[i] + A[i][j] + B[i][token]
		if score > best_score:
			best_score = score
			best_state = i
	return (best_state, best_score)

# Baum-Welch --------------------------------------------------------
def getBestModel(sequence, obs, hid, A, B, PI):
	# Initialize model arbitrarily.
	if not A and not B and not PI:
	  A = getInitialDoubleDistribution(hid, hid)
	  B = getInitialDoubleDistribution(hid, obs)
	  PI = getInitialDistribution(hid)
	
	stop = 0
	count = 0
	oldscore = float("-inf")
	while stop != 1:
		# Get alpha and beta values.
		alpha = getAlphaValues(sequence, hid, PI, A, B)
		beta = getBetaValues(sequence, hid, A, B)
		
		# Get viterbi hidden sequence and score.
		(hid_seq, score) = viterbi(sequence, hid, PI, A, B)
		
		# Get chi and gamma values.
		chi = getChiValues(sequence, hid, A, B, alpha, beta, score)
		gamma = getGammaValues(hid, sequence, chi)
		
		
		# Reestimate model.
		PI_bar = deepcopy(PI)
		for i in hid:
			PI_bar[i] = gamma[i][0]
		
		A_bar = deepcopy(A)
		for i in hid:
			for j in hid:
				numer = 0
				denom = 0
				for t in range(0, len(sequence)-1):
					numer = logtotal(chi[(i,j)][t], numer)
					denom = logtotal(gamma[i][t], denom)
				A_bar[i][j] = numer - denom
				
		B_bar = deepcopy(B)
		for j in hid:
			for k in obs:
				numer = 0
				denom = 0
				for t in range(0, len(sequence)-1):
					if k == sequence[t]:
						numer = logtotal(gamma[j][t], numer)
					denom = logtotal(gamma[j][t], denom)
				B_bar[j][k] = numer - denom
				
		A = A_bar
		B = B_bar
		PI = PI_bar
		
		print score
		#printExp(PI)
		#printDoubleExp(B)
		#printDoubleExp(A)
		import os
		os.sys.path.append("../")
		import Metrics
		#Metrics.PlotThePercentage(B)

		if (abs(score - oldscore) < .01):
			stop = 1
			
		oldscore = score
		
		#if exp(score) == 1:
		#	stop = 1
		#	print hid_seq
		
		if (count > 100):
			stop = 1
		
		count += 1
	
	print hid_seq
		
	return (A, B, PI)


def getChiValues(sequence, hid, A, B, alpha, beta, score):
	chi = dict()
	for i in hid:
		for j in hid:
			chi[(i,j)] = [0L] * len(sequence)
	
	for t in range(0, len(sequence) - 1):
		total = 0
		for i in hid:
			for j in hid:
				partial = alpha[i][t] + A[i][j] + B[j][sequence[t+1]] + beta[j][t+1]
				chi[(i,j)][t] = partial
				total = logtotal(total, partial)
		for i in hid:
			for j in hid:
				chi[(i,j)][t] -= total
	
	return chi

def getGammaValues(hid, sequence, chi):
	gamma = dict()
	for i in hid:
		gamma[i] = [0L] * len(sequence)
	for t in range(0, len(sequence) - 1):
		for i in hid:
			total = 0L
			for j in hid:
				partial = chi[(i,j)][t]
				total = logtotal(total, partial)
			gamma[i][t] = total
	
	return gamma
		
	


# =============================================================================


if __name__ == "__main__":
	filename = "easy.fasta"

	(sequence, obs) = getSequenceAndKeys(filename)
	hid = getHiddenStates(2)


	model = getBestModel(sequence, obs, hid, None, None, None)
















