# -*- coding: utf-8 -*-

from utils import util
from utils.util import printCPD
from lib.hmm import HiddenMarkovModel
from lib.probability import DictionaryProbDist
from methods import dict_translation
import codecs

global _debug
_debug = False

_GREEK = 1
_GREEKLISH = 0

#get the hidden states
hidden_states = []
#hidden_states = copy.deepcopy(chars.greek_ch)
#hidden_states.remove(u'')
#get the observations/samples
observations = []
#observations = copy.deepcopy(chars.greeklish_ch)
#observations.remove('')

startProbability = {}

hiddenStateTransitions = {}
        
hiddenStateToObservation = {}

bigram_gr_count = {}

_labelled_sequences = []

def split_word2(word):
    parts= []
    buf_chars = []
    for c in word:
        buf_chars.append(c)
        if len(buf_chars)==2:
            parts.append(u''.join(buf_chars))
            buf_chars = []
    if buf_chars:
        parts.append(u''.join(buf_chars))
    return parts

def pad_word(parts_gr):
    if len(parts_gr[-1])==1:
        parts_gr[-1]+='!'
    return parts_gr

def align2(greeklish,greek):
    parts_gr = split_word2(greek)
    parts_grkl = split_word2(greeklish)
    count_gr = 0
    count_grkl = 0
    append_gr = True
    while len(parts_gr)!=len(parts_grkl):
        #print 'Different length!'
        #print parts_gr, parts_grkl
        if append_gr:
            parts_gr.append('!')
            count_gr = count_gr+1
        else :
            parts_grkl.append('!')
            count_grkl = count_grkl+1
        
        if count_gr > 3:
            while '!' in parts_gr: parts_gr.remove('!')
            append_gr = False
        if count_grkl > 3: 
            print 'Giving up on', parts_gr, parts_grkl
            return None
        
            
    sequence = []
    for i in range(0,len(parts_gr)):
        sequence.append((parts_grkl[i],parts_gr[i]))
    return sequence
         
def addWordGreek(sequence, count=1):
#    assert (type(sequence)==types.UnicodeType)
    #print 'adding: ', word_gr.lower().encode('utf-8'), word_grkl.encode('utf-8'), count
    if count<1: count=1
    try:
        start_probability(sequence, count)
        hidden_transition(sequence, count)
    except KeyError:
        print 'KeyError', sequence.encode('utf-8')
        raise 'KeyError'
          
       
def addWordGreeklish(sequence, count=1):
#    assert (type(word_gr)==types.UnicodeType)
#    print 'adding: ', word_gr.lower().encode('utf-8'), word_grkl.encode('utf-8'), count
    if count<1: count=1
    try:
#        sequence = align(word_grkl, word_gr)
        hidden_to_observation(sequence, count)
    except KeyError:
        print 'KeyError', sequence.encode('utf-8')
        raise 'KeyError'

def hidden_to_observation(sequence, count):
    for greeklish, greek in sequence:
        count = bigram_gr_count.get(greek,1)
        if _debug:
            print (u'Adding (%s,%s) - %d' % (greek,greeklish,count)).encode('utf-8')
        hiddenStateToObservation[(greek,greeklish)] = hiddenStateToObservation.get((greek, greeklish),0) + count

        
#def hidden_to_observation(word_gr, word_grkl, count):
#    for i in range(0, len(word_gr)):
#        #if not hiddenStateToObservation[(word_gr[i], word_grkl[i])] : hiddenStateToObservation[(word_gr[i], word_grkl[i])] = 0
#        hiddenStateToObservation[(word_gr[i], word_grkl[i])] = hiddenStateToObservation.get((word_gr[i], word_grkl[i]), 0 ) + count    
        
        
def hidden_transition(sequence, count):
    prev = None
    for greek in sequence: #@UnusedVariable
        if(prev):
             if greek in hidden_states:
                 if (_debug):
                     print ('Adding (%s,%s) - %d' % (prev.encode('utf-8'), greek.encode('utf-8'), count))
                 hiddenStateTransitions[(prev,greek)] = hiddenStateTransitions.get((prev,greek),0) + count
        if greek in hidden_states:
            prev = greek 
        else:
            prev = None

def start_probability(sequence, count):    
    start = sequence[0]#[_GREEK]
    if start in hidden_states:
        startProbability[start] =     startProbability.get(start, 0 ) + count
    else:
        print start.encode('utf-8'), 'not in hidden states'

def add_new_states_and_observations(sequence):
    for greeklish, greek in sequence:
        if not greeklish in observations:
            observations.append(greeklish)
        if not greek in hidden_states:
            hidden_states.append(greek)  

def train_grkl():
    count = 0
    file_grkl = codecs.open('methods/greeklish_dict.txt','r','utf-8')
    for line in file_grkl:
        [word_grkl, word_gr, rank]=line.split(' - ')
#        if _debug:
#            print 'adding: ', word_gr.lower().encode('utf-8'), word_grkl.encode('utf-8')
        sequence = align2(word_grkl.lower(), word_gr.lower())
        if sequence:
            count = count+1
            add_new_states_and_observations(sequence)
            addWordGreeklish(sequence )
    return count

def train_gr(words):
    
    for word_gr in words.samples():
        parts_gr = pad_word(split_word2(word_gr.lower()))
        addWordGreek(parts_gr, words.count(word_gr))
        
def bigram_prob(part, count):
    bigram_gr_count[part] = bigram_gr_count.get(part, 0 ) + count

def train(file, function=None):
    print 'Reading and refining words...'
    words = util.refineWords(util.read_words(file))
    print 'Adding words...'
    for word_gr in words.samples():
        parts_gr = pad_word(split_word2(word_gr.lower()))
        for part in parts_gr:
            bigram_prob(part, words.count(word_gr))
        
    count = train_grkl()
    print 'added', count,'unique greeklish words'
    train_gr(words)

    A = util.cpd_from_dict(hiddenStateTransitions, normalize = True)
    B = util.cpd_from_dict(hiddenStateToObservation, normalize = True)
    pi = DictionaryProbDist(startProbability, normalize = True) 
    if(_debug):
        print 'Hidden state transitions'
        printCPD(A)
        print 'Hidden state to observations'
        printCPD(B)
        print 'start proabilities'
        print pi._prob_dict
        print 'bigram_probabilities'
        print bigram_gr_count
    if(_debug): print 'Creating model...'
        
    _model = HiddenMarkovModel(symbols=observations, states=hidden_states, transitions=A, outputs=B, priors=pi)
#    if (_debug): print 'Model:', model
        

    if (_debug): print 'Model:', _model
    return _model
            


# FIXME one should use the greeklish words as reference, and get the counts from the greek word list
# TODO maybe the greeklish count could be calculated by the greek count of a bigram
def train2(file, function=dict_translation.greeklish):
    print 'Reading and refining words...'
    words = util.refineWords(util.read_words(file))
    print 'Adding words...'
    count = 0
    for word_gr in words.samples():
        word_grkl = function(word_gr.lower())
        if(word_grkl): #TODO is this the correct behavior?    
            if _debug:
                print 'adding: ', word_gr.lower().encode('utf-8'), word_grkl.encode('utf-8'), words.count(word_gr)
            sequence = align2(word_grkl, word_gr.lower())
            if sequence:
                count = count+1
                add_new_states_and_observations(sequence)
                addWordGreek(sequence, words.count(word_gr))
                addWordGreeklish(sequence )#, words.count(word_gr))
    #special case hack                
#    hiddenStateTransitions[(u'ς',u'ς')] = 1
    print 'added', count,'unique words'
    A = util.cpd_from_dict(hiddenStateTransitions, normalize = True)
    B = util.cpd_from_dict(hiddenStateToObservation, normalize = True)
    pi = DictionaryProbDist(startProbability, normalize = True) 
    if(_debug):
        print 'Hidden state transitions'
        printCPD(A)
        print 'Hidden state to observations'
        printCPD(B)
        print 'start proabilities'
        print pi._prob_dict
    if(_debug): print 'Creating model...'
        
    _model = HiddenMarkovModel(symbols=observations, states=hidden_states, transitions=A, outputs=B, priors=pi)
#    if (_debug): print 'Model:', model
        

    if (_debug): print 'Model:', _model
    return _model

    

    


def test():
    words = [(u'ορέστης', 'orestis'),
             (u'άρης', 'aris'),
             (u'τάκης', 'takis'),
             ]
    for (word_gr, word_grkl) in words:
        addWordGreeklish(word_gr, word_grkl)
        addWordGreek(word_gr)
    
    res1 =  [(char,cnt) for (char, cnt) in startProbability.items() if cnt>0]
    res2 = [(char[0], char[1],cnt) for char, cnt in hiddenStateTransitions.items() if cnt>0]
    res3 = [(char[0], char[1],cnt) for char, cnt in hiddenStateToObservation.items() if cnt>0]
    
    print 'startProbability'
    for (c, cnt) in res1:
        print "%s - %s" % (c.encode('utf-8'), cnt)
    print 'hidden state transitions'
    for (c1, c2, cnt) in res2:
        print "(%s,%s) - %s" % (c1.encode('utf-8'),c2.encode('utf-8'), cnt)        
    print 'hidden state to observations'
    for (c1, c2, cnt) in res3:
        print "(%s,%s) - %s" % (c1.encode('utf-8'),c2.encode('utf-8'), cnt)        
    
    #print hiddenStateToObservation.values()
    
if __name__ == '__main__':
    test()
    
        

