import classify
import converter 
import re
import utils
import char_inputs

__doc__="""Module used for batch evaluation of different parameters."""
datadir = '../data_eval/'
suite_parameters = [ {'number_of_clusters':9, #input
                     'distance':classify.eucl_distance, #input
                     'char_weights':char_inputs.weights, #input
                     'test_ratio':(1.0/15.0), #input
                      'extra_weight':10, #input
                     'greekwords':datadir+'greekwords2.txt', #input
                     'greeklish_words':datadir+'word_list3.txt', #input
                     'word_dict':datadir+'word_dict.txt', #input                     
                     
                     'suffix':'cl_9_w10_dict', #prefix for output files so they can distinguished

                     'test_filename':datadir+'test_words.txt', #output                     
                     'result_filename':datadir+'results.txt', #output
                     'model_format':datadir+'model_%s.data', #output
                     'cluster_format':datadir+'cluster_%s.txt', #output                 
                     }
                     ,{'number_of_clusters':9, #input
                     'distance':classify.eucl_distance, #input
                     'char_weights':char_inputs.weights, #input
                     'test_ratio':(1.0/15.0), #input
                      'extra_weight':10, #input
                     'greekwords':datadir+'greekwords2.txt', #input
                     'greeklish_words':datadir+'word_list3.txt', #input
                     'word_dict':None, #input                     
                     
                     'suffix':'cl_9_w10_none', #prefix for output files so they can distinguished

                     'test_filename':datadir+'test_words.txt', #output                     
                     'result_filename':datadir+'results.txt', #output
                     'model_format':datadir+'model_%s.data', #output
                     'cluster_format':datadir+'cluster_%s.txt', #output                 
                     }
                     ]
import copy

def multiple_runs(parameters, times=10, offset=0):
    batch_parameters = []
    for i in range(offset, times):
        for p in parameters:
            p2 = copy.copy(p)
            p2['suffix']=p['suffix']+'_'+str(i)
            batch_parameters.append(p2)
#    for p in batch_parameters:
#        for key in p:
#            print key,':',p[key]
#        print '---------'
    return batch_parameters


def evaluate(parameters):
    print parameters
    
    number_of_clusters = parameters['number_of_clusters']
    distance = parameters['distance']
    greekwords = parameters['greekwords']
    greeklish_words = parameters['greeklish_words']
    word_dict = parameters['word_dict']
    char_weights = parameters['char_weights']
    test_ratio = parameters['test_ratio']
    extra_weight=parameters['extra_weight']


    test_filename = parameters['test_filename']+'.'+parameters['suffix']
    result_filename = parameters['result_filename']+'.'+parameters['suffix']
    model_format = parameters['model_format']+'.'+parameters['suffix']
    cluster_format = parameters['cluster_format']+'.'+parameters['suffix']


    
    greeklish_words = extract_test_words(greeklish_words,test_filename,test_ratio)
    if word_dict is None:
        word_dict = {}
    else:
        word_dict = utils.dict_from_file(word_dict)
    centroids = classify.classify_words(greeklish_words,    
                   distance , number_of_clusters, 
                   cluster_format,
                   char_weights, extra_weight, word_dict)
    
    converters=[]
    for i in range(0,len(centroids)): 
        conv = converter.Converter(model_format%str(i),greekwords, cluster_format%str(i))
        converters.append(conv)
    test_file = open(test_filename, 'rb')
    res_file = open(result_filename, 'wb')
    contents = test_file.read()
    if contents.startswith('\xef\xbb\xbf'):
        contents = contents[3:] #strip BOM       
    contents = unicode(contents, 'utf-8')
    pattern = re.compile(r'(\w+) - (\w+) - (\d+) - ', re.UNICODE)
    n=0
    for line in contents.splitlines():
        print ".",
        n+=1
        if n%10==0: print n

        m = pattern.match(line)
        if m == None: continue
        grkl_word = m.group(1)
        conv_idx = classify.argmin(centroids,distance,classify.vector(grkl_word.lower(),char_weights))
        for i in range(0,len(converters)):
            conv = converters[i]
            result = conv.convert(grkl_word, join=False)
            prob = conv.probability(grkl_word)       
            method= str(i)
            if result:
                add_entry(res_file, grkl_word, conv.sequence(grkl_word), result, prob, method)
        res_file.write('Correct is:'+str(conv_idx))  
        res_file.write('\r\n') 
        res_file.write('----')
        res_file.write('\r\n') 

import random
import codecs

        
def extract_test_words(word_list_filename, test_list_filename, test_ratio, suffix='.processed'):
    word_file = open(word_list_filename, 'rb')
    contents = word_file.read()
    if contents.startswith('\xef\xbb\xbf'):
        contents = contents[3:] #strip BOM       
    contents = unicode(contents, 'utf-8')
    word_list = []
    for line in contents.splitlines():
        word_list.append(line)
        
    random.shuffle(word_list)
    num_of_test_elements = int(test_ratio*len(word_list))
    
    processed_list = word_list[num_of_test_elements:]
    test_list = word_list[:num_of_test_elements]
    
    test_file = codecs.open(test_list_filename, 'wb','utf-8')    
    for l in test_list:
        test_file.write(l)
        test_file.write('\r\n') 
    
    test_file.close()    
    processed_file = codecs.open(word_list_filename+suffix, 'wb', 'utf-8')    
    for pl in processed_list:
        processed_file.write(pl)
        processed_file.write('\r\n') 
        
    processed_file.close()
    
    return word_list_filename+suffix
    
    

        
def add_entry(res_file, grkl_word, sequence,result, prob, method):
    res_file.write(grkl_word)
    res_file.write(' - ')
    res_file.write(u''.join(result).encode('utf-8'))    
    res_file.write(' - ')
    res_file.write(utils.printSeq(sequence))
    res_file.write(' - ')
    res_file.write(utils.printSeq(result).encode('utf-8'))
    res_file.write(' - ')
    res_file.write(str(prob))                       
    res_file.write(' - ')
    res_file.write(method)                               
    res_file.write('\r\n') 
    res_file.flush()      
    
def collect_results(test_filename,res_filename,output_filename,parameters):
#    file_write = codecs.open('results.txt', mode = 'wb', encoding='utf-8')
    test_file = open(test_filename, 'rb')
    res_file = open(res_filename, 'rb')
    output_file = codecs.open(output_filename, 'wb', 'utf-8')    
    solutions = {}
    contents = test_file.read()
    if contents.startswith('\xef\xbb\xbf'):
        contents = contents[3:] #strip BOM       
    contents = unicode(contents, 'utf-8')
    
    pattern = re.compile(r'(\w+) - (\w+) - (\d+) - ', re.UNICODE)
    for line in contents.splitlines():
#        print line.encode('utf-8')
        m = pattern.match(line)
        if m == None: continue
        grkl_word = m.group(1)
        gr_word = m.group(2)
        gr_word = gr_word.lower().translate(char_inputs.simple_ch)
        solutions[grkl_word]=gr_word
    output_file.write('------------')
    output_file.write('\r\n')
    output_file.write(str(len(solutions)))
    output_file.write(' solutions')
    output_file.write('\r\n')
    
    contents = res_file.read()
    if contents.startswith('\xef\xbb\xbf'):
        contents = contents[3:] #strip BOM       
    contents = unicode(contents, 'utf-8')
    
    pattern = re.compile(r'(?P<greeklish>\w+) - (?P<greek>\w+) - ([-!\w]+) - ([-!\w]+) - (?P<prob>.+?) - (?P<method>\w+)', re.UNICODE)
    results = []  
    word = []
    for line in contents.splitlines():
#        print line.encode('utf-8')
        if line =='':continue
        if line.startswith('--'):continue       
        new = False
        m = pattern.match(line)
        if m == None: new=True;
        if not new:
            #add to the word
            word.append({
                         'greeklish':m.group('greeklish'),
                        'greek':m.group('greek'),                         
                        'prob':float(m.group('prob')),
                        'method':m.group('method'),
                         })
        else:
            #process the previous word
            assert line.startswith('Correct is:')
            method = line[len('Correct is:'):]
            
#            max_prob=1e-300
#            method = None
            result = None
            grkl = None
            gr = None
            for d in word:
                if method==d['method']:
                    max_prob=d['prob']
                    method=d['method']
                    result=solutions[d['greeklish']]==d['greek']
                    grkl = d['greeklish']
                    gr = d['greek'] 
            if result is not None:
                results.append((grkl, gr, result, method))
            else:
                results.append((d['greeklish'], 'None', False, method))
            word = []
    total=0
    correct=0
    output_file.write('------------')
    output_file.write('\r\n')
    output_file.write('Using parameters:')
    output_file.write('\r\n')
    output_file.write(str(parameters))
    output_file.write('\r\n')
    
    for r in results:
        if r[2]:
            correct+=1
        else:
            output_file.write(r[0])
            output_file.write('-')
            output_file.write(r[1])
            output_file.write('\r\n')              
        total+=1
    output_file.write('------------')
    output_file.write('\r\n')  
    output_file.write('Total:')
    output_file.write(str(total))
    output_file.write('\r\n')      
    output_file.write('Correct:')
    output_file.write(str(correct))
    output_file.write('\r\n')      
    output_file.write('Percentage:')
    output_file.write(str((float(correct)/float(total))*100))
    output_file.write('\r\n')  
    output_file.close()    

def run_suite():    
    for parameter in multiple_runs(suite_parameters,3, offset=2):
        evaluate(parameter)
        collect_results(parameter['test_filename']+'.'+parameter['suffix'],
                        parameter['result_filename']+'.'+parameter['suffix'],
                        datadir+'final_results.txt'+'.'+parameter['suffix'],
                        parameter)
        
import os
def calculate_distances():
    runs = {}
    basedir = '../data_eval/new align - dict/'
    for f in os.listdir(basedir):
#        print f, os.path.isfile(basedir+f)
        
        if os.path.isfile(basedir+f):
            root, ext = os.path.splitext(f)
            
#            print root, ext
            if ext not in runs: runs[ext] = {}
#            root = os.path.basename(root)
            if root == 'final_results.txt':
                runs[ext]['results']=root+ext
            elif root == 'test_words.txt':
                runs[ext]['solutions']=root+ext
                
    for run in runs:
        print run,
        output_file = codecs.open(basedir+'distances'+run, 'wb', 'utf-8')  
        test_file = open(basedir+runs[run]['solutions'], 'rb')
        res_file = open(basedir+runs[run]['results'], 'rb')
        contents = res_file.read() #.decode('utf-8')
        contents = contents[contents.rindex('}')+3:contents.rindex('------------')] 
        results = {}
        for line in contents.splitlines():
#            print line
            grkl, gr = line.split('-')
            results[grkl]=gr
        
        contents = test_file.read() #.decode('utf-8')
        solutions = {}
        for line in contents.splitlines():
            grkl, gr, d1, d2 = line.split(' - ')
            solutions[grkl]=gr
        
        fin_results = {}
        for grkl in results:
            if grkl not in solutions: print 'ERROR',grkl,'not in solutions in run',run
            dist = utils.lev_distance(results[grkl], solutions[grkl])
            fin_results[dist]=fin_results.get(dist,0)+1
        
        
        
        keys = fin_results.keys()
        keys.sort()
        for dist in keys:
#            output_file.write(str(dist))
#            output_file.write(' - ')
            output_file.write(str(fin_results[dist]))        
            output_file.write('\n')    
                
        print ' Done'
        
def calc_avg_distances():
    basedir = '../data_eval/new align - nodict/'
    runs ={}
    for f in os.listdir(basedir):
#        print f, os.path.isfile(basedir+f)
        
        if os.path.isfile(basedir+f):
            root, ext = os.path.splitext(f)
            
            ext2 = ext[:-2]
            if ext2 not in runs: runs[ext2] = []
#            root = os.path.basename(root)
            if root == 'distances':
                runs[ext2].append(root+ext)
    
    for run in runs:
        output_file = codecs.open(basedir+'avg_distances'+run, 'wb', 'utf-8')  
        distances = {}

        for f in runs[run]:
            res_file = open(basedir+f, 'rb')
            for idx, line in enumerate(res_file):
                distances[idx]=distances.get(idx,0)+int(line[:-1])
                distances['total']=distances.get('total',0)+int(line[:-1])
        for key in distances:
            distances[key]=float(distances[key])*100.0/float(distances['total'])
        
        print distances
        keys = distances.keys()
        keys.sort()
        for dist in keys:
            output_file.write(str(distances[dist]))        
            output_file.write('\n')    

                
    
if __name__ == '__main__':
#    run_suite()
#    calculate_distances()
#    calc_avg_distances()
    
    
