from sandbox.align import split_smart
from utils.util import printSeq
from sandbox.align import split_from_list
import cPickle
from lib.myhmm import MyHMM
from utils import util, classify
from utils import chars
import re
from sandbox.align import align3,split_word_gr,split_word_grkl
#from decimal import *

state_transitions = {}
start_probabilities = {'!':0}
observations_prob = {}
states = ['!']
observations = []

datadir = '../data/'


def dump_model(model, datafile):
    file =  open(datafile, 'wb')
    cPickle.dump(model, file, cPickle.HIGHEST_PROTOCOL)
    pass

def read_model(datafile):
    file =  open(datafile, 'rb')
    model = cPickle.load(file)
    return model

def doWork():
    try:
        model_phon = read_model(datadir+'model.phon.data')
        chain_phon = read_model(datadir+'chain.phon.data')
    except:
        print "Phon Model not found, training..."
        model_phon = train2(datadir+'greekwords.txt',datadir+'phonetic.txt')
        dump_model(model_phon, datadir+'model.phon.data')  
        chain_phon = classify.trainChain(datadir+'phonetic.txt')
        dump_model(chain_phon, datadir+'chain.phon.data')
    print 'Phonetic', model_phon        
    try:
        model_orth = read_model(datadir+'model.orth.data')
        chain_orth = read_model(datadir+'chain.orth.data')
    except:
        print "Orth Model not found, training..."
        model_orth = train2(datadir+'greekwords.txt',datadir+'orthographic.txt')
        dump_model(model_orth, datadir+'model.orth.data')
        chain_orth = classify.trainChain(datadir+'orthographic.txt')
        dump_model(chain_orth, datadir+'chain.orth.data')
    print 'Orthographic', model_orth      
    try:
        model_oth = read_model(datadir+'model.oth.data')
        chain_oth = read_model(datadir+'chain.oth.data')
    except:
        print "Others Model not found, training..."
        model_oth = train2(datadir+'greekwords.txt',datadir+'others.txt')
        dump_model(model_oth, datadir+'model.oth.data')
        chain_oth = classify.trainChain(datadir+'others.txt') 
        dump_model(chain_oth, datadir+'chain.oth.data')
    print 'Others', model_oth
    try:
        model_all = read_model(datadir+'model.all.data')
        chain_all = read_model(datadir+'chain.all.data')
    except:
        print "All Model not found, training..."
        model_all = train2(datadir+'greekwords.txt',datadir+'word_list.txt')        
        dump_model(model_all, datadir+'model.all.data')        
        chain_all = classify.trainChain(datadir+'word_list.txt')  
        dump_model(chain_all, datadir+'chain.all.data')        
    print 'All', model_all
    test_file = open(datadir+'test_words.txt', 'rb')
    res_file = open(datadir+'test_res.txt', 'wb')
    contents = test_file.read()
    if contents.startswith('\xef\xbb\xbf'):
        contents = contents[3:] #strip BOM       
    contents = unicode(contents, 'utf-8')
    pattern = re.compile(r'(\w+) - (\w+) - (\d+) - ', re.UNICODE)
    print "Testing"
    parts_phon = model_phon.observations
    parts_phon.sort(key=len, reverse=True)
    parts_orth = model_orth.observations
    parts_orth.sort(key=len, reverse=True)
    parts_oth = model_oth.observations
    parts_oth.sort(key=len, reverse=True)
    parts_all = model_all.observations
    parts_all.sort(key=len, reverse=True)
    for line in contents.splitlines():
        print ".",
        m = pattern.match(line)
        if m == None: continue
        grkl_word = m.group(1)
        
        
        sequence = split_smart(grkl_word.lower(),parts_phon, chars.dipthongs_split)
#        phon_prob = chain_phon.probability(sequence)
#        orth_prob = chain_orth.probability(sequence)
#        oth_prob = chain_oth.probability(sequence)
#        all_prob = chain_all.probability(sequence)        
#        max_prob = max([phon_prob,orth_prob,oth_prob,all_prob])
#        if max_prob==phon_prob:
        if(sequence):
            result = model_phon.viterbi(sequence)
            prob = model_phon.forward(sequence)            
            method= "phonetic"
            add_entry(res_file, grkl_word, sequence, result, prob, method)
#        elif max_prob==orth_prob:
        sequence = split_smart(grkl_word.lower(),parts_orth, chars.dipthongs_split)
        if(sequence):
            result = model_orth.viterbi(sequence)
            prob = model_orth.forward(sequence)            
            method= "orthographic"
            add_entry(res_file, grkl_word, sequence, result, prob, method)        
#        elif max_prob==oth_prob:
        sequence = split_smart(grkl_word.lower(),parts_oth, chars.dipthongs_split)
        if(sequence):
            result = model_oth.viterbi(sequence)
            prob = model_oth.forward(sequence)                        
            method= "others"            
            add_entry(res_file, grkl_word,  sequence,result, prob, method)        
#        else:
        sequence = split_smart(grkl_word.lower(),parts_all, chars.dipthongs_split)
        if(sequence):
            result = model_all.viterbi(sequence)
            prob = model_all.forward(sequence)            
            method = "all"
            add_entry(res_file, grkl_word, sequence, result, prob, method)
        
        res_file.write('\n')
    print "Finished"    

def add_entry(res_file, grkl_word, sequence,result, prob, method):
    res_file.write(grkl_word)
    res_file.write(' - ')
    res_file.write(u''.join(result).encode('utf-8'))    
    res_file.write(' - ')
    res_file.write(printSeq(sequence))
    res_file.write(' - ')
    res_file.write(printSeq(result).encode('utf-8'))
    res_file.write(' - ')
    res_file.write(str(prob))                       
    res_file.write(' - ')
    res_file.write(method)                               
    res_file.write('\r\n') 
    res_file.flush()   
        
        

    
    
    

def train2(greek_file,greeklish_file):
    print 'Reading and refining words...'
    greek_words = util.simplifyWords(util.refineWords(util.read_words(greek_file)))
    print 'Refined: Total %d words, %d unique' % (greek_words.N(), greek_words.B())
    n = greek_words.N()
    assert n > 0
    print 'Adding words...'
    hmm = MyHMM(states, observations, start_probabilities, state_transitions,observations_prob)
    
    obs_sequences = []
    grkl_file = open(greeklish_file, 'rb')
    contents = grkl_file.read()
    if contents.startswith('\xef\xbb\xbf'):
        contents = contents[3:] #strip BOM       
    contents = unicode(contents, 'utf-8')
    pattern = re.compile(r'(\w+) - (\w+) - (\d+) - ', re.UNICODE)
    print 'Aligning...',
    #print repr(contents)
    for line in contents.splitlines():
        #print repr(line)
        #print repr(line.split('-'))
        m = pattern.match(line)
        if m == None: continue
        grkl_word = m.group(1)
        
        gr_word = m.group(2)
        gr_word = gr_word.lower().translate(chars.simple_ch)
        
        sequence = align3(grkl_word.lower(), gr_word)
        if sequence is None: continue
        obs_sequences.append((sequence, 1))
    print 'Done!'
    print 'Training observations...',
    hmm.train_observations(obs_sequences, True, True)
    print 'Done!'
    
    print 'Splitting...',
    state_sequences = []
    parts_list = hmm.states
    parts_list.sort(key=len, reverse=True)
    for word in greek_words.samples():
        # FIXME the split word gr method is wrong!!!
        
        parts_gr = split_from_list(word.lower(), parts_list)
        if parts_gr == None: continue
        state_sequences.append((parts_gr,greek_words.count(word)))
        
    
    
    print 'Done!'
    print 'Training states...',
    hmm.train_states(state_sequences, True)
    print 'Done!'
    return hmm


def train2b():
    return train2('../data/greekwords.txt', '../data/word_list.txt')

            
            
        
            
    
            
def split_word2b(word):
    parts= []
    buf_chars = []
    for c in word:
        buf_chars.append(c)
        if len(buf_chars)==2:
            parts.append(u''.join(buf_chars))
            buf_chars = []
    if buf_chars:
        parts.append(u''.join(buf_chars))
    return parts

def pad_word(parts_gr):
    if len(parts_gr[-1])==1:
        parts_gr[-1]+='!'
    return parts_gr


if __name__ == '__main__':
    doWork()
#    hmm = train2b()
#    print hmm.viterbi(['or','es','ti','s'])
    