# -*- coding: utf-8 -*-

from utils import util
from utils.util import printCPD
from lib.hmm import HiddenMarkovModel
from lib.probability import DictionaryProbDist
from methods import dict_translation
import codecs

global _debug
_debug = False

_GREEK = 1
_GREEKLISH = 0

#get the hidden states
hidden_states = []
#hidden_states = copy.deepcopy(chars.greek_ch)
#hidden_states.remove(u'')
#get the observations/samples
observations = []
#observations = copy.deepcopy(chars.greeklish_ch)
#observations.remove('')

startProbability = {}

hiddenStateTransitions = {}
        
hiddenStateToObservation = {}

bigram_gr_count = {}

_labelled_sequences = []

def split_word1(word):
    parts = []
    if len(word)==1: return [word]
    prev = None
    for c in word:
        if prev:
            parts.append(u''.join([prev,c]))
        prev = c
    
    return parts

def split_word2b(word):
    parts= []
    buf_chars = []
    for c in word:
        buf_chars.append(c)
        if len(buf_chars)==2:
            parts.append(u''.join(buf_chars))
            buf_chars = []
    if buf_chars:
        parts.append(u''.join(buf_chars))
    return parts

def pad_word(parts_gr):
    if len(parts_gr[-1])==1:
        parts_gr[-1]+='!'
    return parts_gr

def align2(greeklish,greek):
    parts_gr = split_word1(greek)
    parts_grkl = split_word1(greeklish)
    count_gr = 0
    count_grkl = 0
    append_gr = True
    while len(parts_gr)!=len(parts_grkl):
        #print 'Different length!'
        #print parts_gr, parts_grkl
        if append_gr:
            parts_gr.append('!')
            count_gr = count_gr+1
        else :
            parts_grkl.append('!')
            count_grkl = count_grkl+1
        
        if count_gr > 3:
            while '!' in parts_gr: parts_gr.remove('!')
            append_gr = False
        if count_grkl > 3: 
            print 'Giving up on', parts_gr, parts_grkl
            return None
        
            
    sequence = []
    for i in range(0,len(parts_gr)):
        sequence.append((parts_grkl[i],parts_gr[i]))
    return sequence
         
def addWordGreek(word, count=1):
#    assert (type(sequence)==types.UnicodeType)
    #print 'adding: ', word_gr.lower().encode('utf-8'), word_grkl.encode('utf-8'), count
    if count<1: count=1
    try:
        start = word[0]
        start_probability(start, count)
        parts_gr = split3(word)
        for (prev, next) in parts_gr:
            hidden_transition(prev, next, count)
    except KeyError:
#        print 'KeyError', sequence.encode('utf-8')
        raise 'KeyError'
    
def split3(word):
    parts = []
    if len(word)<3: return [(word,'!')]
    buf=[]
    for c in word:        
        if len(buf)==2:
            parts.append((u''.join(buf), c))
            del buf[0]
        buf.append(c)
    return parts
            
        

          
       
def addWordGreeklish(sequence, count=1):
#    assert (type(word_gr)==types.UnicodeType)
#    print 'adding: ', word_gr.lower().encode('utf-8'), word_grkl.encode('utf-8'), count
    if count<1: count=1
    try:
#        sequence = align(word_grkl, word_gr)
        hidden_to_observation(sequence, count)
    except KeyError:
        print 'KeyError', sequence.encode('utf-8')
        raise 'KeyError'

def hidden_to_observation(hidden, obs, count):
        if _debug:
            print (u'Adding (%s,%s) - %d' % (hidden,obs,count)).encode('utf-8')
        hiddenStateToObservation[(hidden,obs)] = hiddenStateToObservation.get((hidden, obs),0) + count

        
#def hidden_to_observation(word_gr, word_grkl, count):
#    for i in range(0, len(word_gr)):
#        #if not hiddenStateToObservation[(word_gr[i], word_grkl[i])] : hiddenStateToObservation[(word_gr[i], word_grkl[i])] = 0
#        hiddenStateToObservation[(word_gr[i], word_grkl[i])] = hiddenStateToObservation.get((word_gr[i], word_grkl[i]), 0 ) + count    
        
        
def hidden_transition(prev, next, count):
        if (_debug):
            print ('Adding (%s,%s) - %d' % (prev.encode('utf-8'), next.encode('utf-8'), count))
        hiddenStateTransitions[(prev,next)] = hiddenStateTransitions.get((prev,next),0) + count

def start_probability(start, count):    
    startProbability[start] =     startProbability.get(start, 0 ) + count
    
def add_new_states_and_observations(sequence):
    for greeklish, greek in sequence:
        if not greeklish in observations:
            observations.append(greeklish)
        if not greek in hidden_states:
            hidden_states.append(greek)  

def train_grkl():
    count = 0
    file_grkl = codecs.open('methods/greeklish_dict.txt','r','utf-8')
    for line in file_grkl:
        [word_grkl, word_gr, rank]=line.split(' - ')
#        if _debug:
#            print 'adding: ', word_gr.lower().encode('utf-8'), word_grkl.encode('utf-8')
        sequence = align2(word_grkl.lower(), word_gr.lower())
        if sequence:
            count = count+1
            add_new_states_and_observations(sequence)
            addWordGreeklish(sequence )
    return count

def train_gr(words):
    for word_gr in words.samples():
        addWordGreek(word_gr.lower(), words.count(word_gr))
        
def bigram_prob(part, count):
    bigram_gr_count[part] = bigram_gr_count.get(part, 0 ) + count

def train(file, function=None):
    print 'Reading and refining words...'
    words = util.simplifyWords(util.refineWords(util.read_words(file)))
    print 'Adding words...'
#    for word_gr in words.samples():
#        parts_gr = split_word1(word_gr.lower())
#        for part in parts_gr:
#            bigram_prob(part, words.count(word_gr))
        
    count = train_grkl()
    print 'added', count,'unique greeklish words'
    train_gr(words)

    A = util.cpd_from_dict(hiddenStateTransitions, normalize = True)
    B = util.cpd_from_dict(hiddenStateToObservation, normalize = True)
    pi = DictionaryProbDist(startProbability, normalize = True) 
    if(_debug):
        print 'Hidden state transitions'
        printCPD(A)
        print 'Hidden state to observations'
        printCPD(B)
        print 'start proabilities'
        print pi._prob_dict
        print 'bigram_probabilities'
        print bigram_gr_count
    if(_debug): print 'Creating model...'
        
    _model = HiddenMarkovModel(symbols=observations, states=hidden_states, transitions=A, outputs=B, priors=pi)
#    if (_debug): print 'Model:', model
        

    if (_debug): print 'Model:', _model
    return _model
            


# FIXME one should use the greeklish words as reference, and get the counts from the greek word list
# TODO maybe the greeklish count could be calculated by the greek count of a bigram

    
def test2():
    global _debug
    _debug=True
    print 'Reading and refining words...'
    words = util.refineWords(util.read_words('greekwords.txt'))
    print 'Adding words...'
    train_gr(words)

def test():
    words = [(u'ορέστης', 'orestis'),
             (u'άρης', 'aris'),
             (u'τάκης', 'takis'),
             ]
    for (word_gr, word_grkl) in words:
        addWordGreeklish(word_gr, word_grkl)
        addWordGreek(word_gr)
    
    res1 =  [(char,cnt) for (char, cnt) in startProbability.items() if cnt>0]
    res2 = [(char[0], char[1],cnt) for char, cnt in hiddenStateTransitions.items() if cnt>0]
    res3 = [(char[0], char[1],cnt) for char, cnt in hiddenStateToObservation.items() if cnt>0]
    
    print 'startProbability'
    for (c, cnt) in res1:
        print "%s - %s" % (c.encode('utf-8'), cnt)
    print 'hidden state transitions'
    for (c1, c2, cnt) in res2:
        print "(%s,%s) - %s" % (c1.encode('utf-8'),c2.encode('utf-8'), cnt)        
    print 'hidden state to observations'
    for (c1, c2, cnt) in res3:
        print "(%s,%s) - %s" % (c1.encode('utf-8'),c2.encode('utf-8'), cnt)        
    
    #print hiddenStateToObservation.values()
    
if __name__ == '__main__':
    test2()
    
        

