# -*- coding: utf-8 -*-
import monograms


from numarray import *
from nltk_lite.tag.hmm import HiddenMarkovModel
from nltk_lite.probability import  DictionaryProbDist    
import util

import chars
import table_conversion

list_hidden_trans = []
list_hiddenStateToObservations=[]
list_startProbability=[]
#get the hidden states
hidden_states = chars.greek_ch
hidden_states.remove(u'')
#get the observations/samples
observations = chars.greeklish_ch
observations.remove(u'')



def init(total_number=1):
    hidden_transitions = monograms.hiddenStateTransitions
    hiddenStateToObservations = monograms.hiddenStateToObservation
    startProbability = monograms.startProbability

    cnt1 = 0
    for c1 in hidden_states:
        
        list_hidden_trans.append([])
        for c2 in hidden_states:
            list_hidden_trans[cnt1].append(  float(hidden_transitions[(c1,c2)])/total_number)
        cnt1+=1
    del cnt1
    
    cnt1 = 0
    for c1 in hidden_states:
        
        list_hiddenStateToObservations.append([])
    
        for c2 in observations:
            list_hiddenStateToObservations[cnt1].append( float(hiddenStateToObservations[(c1,c2)])/total_number)
        cnt1+=1
    del cnt1

    for c1 in hidden_states:
        list_startProbability.append( float(startProbability[c1])/total_number)

def debug_print():
    print 'list_hidden_trans', list_hidden_trans
    print 'list_hiddenStateToObservations', list_hiddenStateToObservations
    print 'list_startProbability', list_startProbability
    
def printCPD(cpd):
        for condition in cpd.conditions():
            print condition.encode('utf-8'), repr(condition)
            print cpd[condition]._prob_dict
            print '---'

def demo_small():
    import util
    from nltk_lite.probability import  DictionaryProbDist
    words = [(u'ορέστης', 32),(u'γιάννης', 22),(u'τάκης', 12),(u'μαρίζα', 2), 
             (u'βασίλης', 2),(u'δημήτρης', 2),(u'θοδωρής', 2),(u'ξάπλα', 2),(u'υγεία', 2),
             (u'φρόσω', 2), (u'χαλί', 2), (u'έλα', 42), (u'ψάρι', 2), (u'αφού', 22), (u'κώλος', 6), 
             (u'υϊός', 4), (u'αΰλος', 2), (u'μαϋρα', 42),
             (u'πουραΐμη', 4), (u'ρούφα', 8), (u'αυτόςα', 42), (u'\u03c2', 42),]
    for (word, count) in words:
        monograms.addWordGreek(word, count)
        monograms.addWordGreeklish(word , table_conversion.greeklish(word), count)
    
    A = util.cpd_from_dict(monograms.hiddenStateTransitions, normalize = True)
    B = util.cpd_from_dict(monograms.hiddenStateToObservation, normalize = True)
    pi = DictionaryProbDist(monograms.startProbability, normalize = True) 
    
    
    
    print 'Hidden state transitions'
    printCPD(A)
    print 'Hidden state to observations'
    printCPD(B)
    print 'start proabilities'
    print pi._prob_dict
    model = HiddenMarkovModel(symbols=observations, states=hidden_states,
                              transitions=A, outputs=B, priors=pi)
    
    for test in ['fyssas',u'giannhs']:

        sequence = [(t, None) for t in test]
        print 'Testing with state sequence', test
        try:
            bp = model.best_path(sequence)
            bps = ''.join(bp)
            print 'best path', bp, bps.encode('utf-8')

        except KeyError, ke:
            print 'KeyError: ', ke[0].encode('utf-8'), ke
            
    print 'debug'    
    print 'done'

def train(filename, trans_function, datafile):
    global _debug
    if(_debug):     print 'Training monograms...'
    monograms.train(filename, trans_function)
    A = util.cpd_from_dict(monograms.hiddenStateTransitions, normalize = True)
    B = util.cpd_from_dict(monograms.hiddenStateToObservation, normalize = True)
    pi = DictionaryProbDist(monograms.startProbability, normalize = True) 
    if(_debug):
        print 'Hidden state transitions'
        printCPD(A)
        print 'Hidden state to observations'
        printCPD(B)
        print 'start proabilities'
        print pi._prob_dict
    if(_debug): print 'Creating model...'
        
    model = HiddenMarkovModel(symbols=observations, states=hidden_states, transitions=A, outputs=B, priors=pi)
    if (_debug): print 'Model:', model
    
    dump_model(model, datafile)
    return model

import cPickle
def dump_model(model, datafile):
    file =  open(datafile, 'wb')
    cPickle.dump(model, file, cPickle.HIGHEST_PROTOCOL)
    pass

def read_model(datafile):
    file =  open(datafile, 'rb')
    model = cPickle.load(file)
    return model

def convert(model, greeklishword):
    sequence = [(t, None) for t in greeklishword]
    bp = model.best_path(sequence)
    bps = u''.join(bp)
    return bps
    

def demo():
    model = train('D:\orestis\Sxoli\diplomatiki\data\words.ref.txt', table_conversion.greeklish)
    for test in ['orestis','giannis', 'mitsotakis']:

        sequence = [(t, None) for t in test]

#        try:
        bp = model.best_path(sequence)
        bps = ''.join(bp)
        print 'Testing with state sequence', test
        print 'best path', bp, bps.encode('utf-8')
        print 'probability =', model.probability(sequence)
        print 'log_probability =', model.log_probability(sequence)        
#        print 'tagging =    ', model.tag(sequence)
#        print 'p(tagged) =  ', model.probability(sequence)
#        print 'H =          ', model.entropy(sequence)
#        print 'H_exh =      ', model._exhaustive_entropy(sequence)
#        print 'H(point) =   ', model.point_entropy(sequence)
#        print 'H_exh(point)=', model._exhaustive_point_entropy(sequence)
#        except OverflowError, ex:
#            print ex.args
        print

def print_usage():
    print """  HMM implementation by Orestis Markou
    usage: python hmm.py [options] greeklish
    [options]
    -t, --train :  Trains the hmm. 
                   will use default values if not specified
    -g, --greekwordfile filename:  filename must be a word file in the format
                                   greekword - count - (frequency)
                                   default: greekwords.txt
                                   used only with -t
    -m, --method method: the method must be one of: table_conversion 
                                    default: table_conversion
                                    used only with -t                                    
    -d, --datafile filename: the filename will be the file that the hmm training
                             data will be loaded from, or stored if --train is used
                             default: data.hmm
    -h, --help: prints this text
    -v : verbose output
    """
import getopt

def main(argv): 
    greekwordfile = "greekwords.txt"                                    
    datafile = "data.hmm"
    translation_function = table_conversion.greeklish 
    _train = False
    if(not argv): print_usage(); sys.exit(2)
    try:                                
        opts, args = getopt.getopt(argv, "htg:m:d:v", ["help", "train", "greekwordfile=", "method=", "datafile="]) 
    except getopt.GetoptError:           
        print_usage()                          
        sys.exit(2) 
    for opt, arg in opts:                
        if opt in ("-h", "--help"):      
            print_usage()                     
            sys.exit()                  
        elif opt in ('-t', '--train'):                
            _train = True              
        elif opt in ("-g", "--greekwordfile"): 
            greekwordfile = arg   
        elif opt in ("-m", "--method"): 
            if arg == "table_conversion":
                print "arg: ", arg
                translation_function = table_conversion.greeklish
            else: print "unsupported translation function"; sys.exit(1)
        elif opt in ("-d", "--datafile"): 
            datafile = arg              
        elif opt == '-v':                
            global _debug
            _debug = 1    

    greeklish_words = args
    if(_train):
        model = train(greekwordfile, translation_function, datafile)
    else:
        model = read_model(datafile)
    
    for word in greeklish_words:
        print convert(model, word).encode('utf-8')
        
    

if __name__ == '__main__':
    main(sys.argv[1:])

        

