﻿import sys
#sys.path.append(r"C:\Program Files (x86)\IronPython 2.7\Lib\ ")
import re
import collections
import codecs
import os
import unicodedata
import wer
import time
import json
import string

#warning just for testing
reload(sys)
sys.setdefaultencoding("utf-8")



os.chdir(r"I:\Andres\_Tesis\_TesisV0.1\scSVN\trunk")

def words(text): return re.findall(u'[a-zñáéíóú123456789]+', text.lower()) 
NEWORDS = collections.defaultdict(lambda: 1)
model2 = collections.defaultdict(lambda: 0)
def train2(features):   
    global model2     
    for f in features:              
        if len(f)>2:
            if not is_not_a_word(f):
                if f not in NWORDS:                    
                    model2[f]+=1
                else:
                    NEWORDS[f] += 1 
    
def train():    
    #features = words(codecs.open('dictionaries\\big.txt','r','utf-8').read())
    prefeatures=words(codecs.open('dictionaries\\specific.txt','r','utf-8').read())   
    names=words(codecs.open('dictionaries\\names.txt','r','utf-8').read())   
    aspell=words(codecs.open('dictionaries\\aspell.txt','r','utf-8').read())   
    model = collections.defaultdict(lambda: 1)
    #The words in prefeatures has more weigth because the dictionary is more specific
    for f in aspell:
        model[f] += 1 
    for f in prefeatures:
        model[f] += 20000
    for f in names:
        model[f] += 10000
    #for f in features:
    #   model[f] += 1 
    return model

#NWORDS = train(words(file('big.txt').read()))
#NWORDS = train(words(codecs.open('big.txt','r','utf-8').read()))
#t1 = time.time()
#NWORDS = train()
#print "Dictionary Load Time: "+ str(time.time() - t1)

#NWORDS = ''
alphabet = u'abcdefghijklmnopqrstuvwxyzñáéíóú123456789'


def edits0(word):
    splits     = splits = [(word[:i],word[i],word[i+1:]) for i in range(len(word) )]
    replaces   = [a + r + c for a, b, c in splits for r in class_of(b)]
    return set(replaces)
    
def edits1(word):
   splits     = [(word[:i], word[i:]) for i in range(len(word) + 1)]
   deletes    = [a + b[1:] for a, b in splits if b]
   transposes = [a + b[1] + b[0] + b[2:] for a, b in splits if len(b)>1]
   replaces   = [a + c + b[1:] for a, b in splits for c in alphabet if b]
   inserts    = [a + c + b     for a, b in splits for c in alphabet]
   return set(deletes + transposes + replaces + inserts)

def edits01(word):
    return set(e2 for e1 in edits0(word) for e2 in edits0(e1) if e2 in NWORDS)
   
   
   
def known_edits2(word):
    return set(e2 for e1 in edits1(word) for e2 in edits1(e1) if e2 in NWORDS)

def known(words): return set(w for w in words if w in NWORDS)


def correct(pword):
    word=pword.lower()
	
    candidates =known([word])
    if len(candidates)==0:
        candidates= known(edits0(word))
        if len(candidates)==0:
            candidates= known(edits01(word))
            if len(candidates)==0:
                candidates= known(edits1(word))
                if len(candidates)==0:
                    candidates =known_edits2(word)
                    if len(candidates)==0:
                        candidates=[word]
    #candidates = known([word]) or known(edits1(word)) or known_edits2(word) or [word]   
    ret = max(candidates, key=NWORDS.get)   
    return ret


def correct_file2(fpath):

	with codecs.open(fpath,'r','utf_8') as file:
		for line in file:
			for each_word in line.split(' '):
				if(len(each_word)>1):
                    
					lw.append((each_word, correct(each_word)))
				
				#print each_word.encode('unicode_escape') +u' -> '+ correct(each_word)


def remove_accents(s):
    s=s.replace(u"Á","A")
    s=s.replace(u"É","E")
    s=s.replace(u"Í","I")	
    s=s.replace(u"Ó","O")
    s=s.replace(u"Ú","U")
    return s
def pre_proccess(pword):
    if pword == "Lev":
        return "Ley"
    if pword == "Vita":
        return "VIta"
    if pword == "Iros":
        return "1ros"
    if pword == "Ilda":
        return "IIda"  
    if pword == "Ildo":
        return "IIdo"      
    if pword == "ei":
        return "el" 
    if pword == "ai":
        return "al" 
    n="º"
    if len(pword)== 2:
        if pword[0].isdigit() and  pword[1] == "?":
            return pword[0] + n.decode('utf-8')
    if len(pword)== 3:
        if pword[0].isdigit() and  pword[1] == "?" and  pword[2] == ",":
            return pword[0] + n.decode('utf-8') + pword[2]
    if len(pword)== 2:    
        if pword[0].isdigit() and  pword[1] == "\"":
            return pword[0] + n.decode('utf-8')
    if len(pword)== 3:
        if pword[0].isdigit() and  pword[1] == "\"" and  pword[2] == ",":
            return pword[0] + n.decode('utf-8') + pword[2]    
    return pword

    
def is_not_a_word(str):
    count = 0
    # Rule 'L': "If a string is longer than 40 characters, it is garbage, If len<=2 the string is ignored"
    if len(str)>= 40 or len(str)<=2:
        return 1;
    # Rule 'R': "If a string has 4 identical characters in a row, it is garbage"      
    if re.search(r"(\w)\1{3,}",str):
        return 1 
    if len(str)==2 and (str[0] not in alphabet  and str[1] not in alphabet) or str.isdigit():
        return 1
    for c in str:
        if not c.isalpha():
            count=count+1            
    if count >= (len(str)/2):
        return 1
    return 0

#Recibe un caracter y retorna la clase de similaridad

def class_of(ch):    
    classes= u"fiíjklrtIJLT1! abdgoópqOQ690 eécCG vxyVYX sS5 zZ EFPRK úu"
    for _class in classes.split():
        if ch in _class:
            return  _class    
    return ch

#fix code, line over 80 caracters                
def process_word(word):

    rword=pre_proccess(word)
    if rword == word:
        if is_not_a_word(word):
            rword= word
        else:
            word_checked = correct(word)
            if word_checked == word.lower():
                rword= word
            else:
                if word.isupper():
                    rword= remove_accents( word_checked.upper())				
                elif word[0].isupper():
                    rword= word_checked[0].upper() + word_checked[1:]
                else:
                    rword= word_checked;
    if rword != word:
        print word + " -> " + rword
    return rword
    
#fpath=r"D:\Andres\_Tesis\trash\E27.dir\1.txt"    
#fpath=r"D:\Andres\_Tesis\trash\F15N3054664.txt"    
def correct_file(fpath):
    fpath_out = fpath.replace('.txt','.corrected.txt')
    fout= codecs.open(fpath_out,'w','utf_8')
    pword=''   
    with codecs.open(fpath,'r','utf_8') as file:
        for line in file:
                line_out=''
                pstr=''
                #import pdb;pdb.set_trace()
                for c in line:                    
                    #if c == ' ' or c=='\t':
                    if c in [' ','"','\t','\n','\r',',','.',';',':','(',')',u'\u2014']:
                        pword=process_word(pstr)
                        line_out=line_out+pword+c
                        pstr=''
                    else:
                        pstr=pstr+c
                pword=process_word(pstr)        
                line_out=line_out+pword	
                fout.write(line_out)
    
    fout.close()
def correct_directory():
    global model2    
    for i in [4,5,6,7,8,9,10]:
        d=r'H:\Andres\_Tesis\BoletinesOrdenados\\'+ str(i)        
        ficheros = os.listdir(d)
        for f in ficheros:
            if f[-4:]=='.txt':
                train2(words(codecs.open(d+"\\"+f,'r','utf-8').read()))            
    with open('dictErros.json', 'w') as f: f.write(json.dumps(model2))
    """
    l=0
    for w in model2:
        l=l+model2[w]
    l=l*0.01
    ac=0
    parcial=0 
    
    for w in sorted(model2, key=model2.get, reverse=True):
        parcial=model2[w]*1.0 / l
        ac=ac+parcial
        spaces=" ";
        print (w),
        for j in range(28-len(w)):
            spaces=spaces+" "
        print spaces +"|  ",
        print string.zfill(str(model2[w]),4),"  |  ",
        print string.zfill(str(round(parcial,6)),8),"  |  ",
        print string.zfill(str(round(ac,6)),9),"  |  ";
    """

    
    
    
    
    # Adds a list of words to the dictionay from a plane txt
def add_to_dictionary():
    json_data = open('dictionaries\\dictClean.json')
    Dict = json.load(json_data)
    auxDict = collections.defaultdict(lambda: 1)    
    listToAdd=words(codecs.open('dictionaries\\cities.txt','r','utf-8').read())               
    for f in listToAdd:       
        auxDict[f] += 10000
    for f in Dict:       
        auxDict[f] += Dict[f]    
    with open('dictFinal.json', 'w') as f: f.write(json.dumps(auxDict))
    
        
        
def correct_directory_html():
    global model2    
    for i in [4,5,6,7,8,9,10]:
        d=r'H:\Andres\_Tesis\BoletinesOrdenados\\'+ str(i)        
        ficheros = os.listdir(d)
        for f in ficheros:
            if f[-4:]=='.txt':
                train2(words(codecs.open(d+"\\"+f,'r','utf-8').read()))            
    l=0
    for w in model2:
        l=l+model2[w]
    l=l*0.01
    ac=0
    parcial=0
    fout= codecs.open("ErrorsAnalysisByWord.html",'w','utf_8')
    fout.write("<html><body><table>");
    for w in sorted(model2, key=model2.get, reverse=True):
        parcial=model2[w]*1.0 / l
        ac=ac+parcial
        fout.write("<tr>");    
        fout.write("<td>");
        fout.write(w);
        fout.write("</tc><td>");
        fout.write(str(model2[w]));
        fout.write("</td><td>");
        fout.write(str(parcial));
        fout.write("</td><td>");
        fout.write(str(ac));
        fout.write("</td></tr>");
        #print (w), 
        #print (": " + str(model2[w])+ " - " +str(parcial) + " - "+ str(ac))    
    fout.write("<table></body></html>");    
    
    
        
#correct_directory()		

#############################################TEST##############################################
    

                    
le=['utf_8','utf_16','iso-8859-1','iso-8859-2','iso-8859-3','iso-8859-4','iso-8859-9','iso-8859-13','iso-8859-15']	
def check_encoding():
	
		
	for best_enc in le:
		try:
			text=codecs.open('2.txt','r',best_enc).read()
			#print unicode(text,best_enc,"strict")
			print 'pass'
			print text
		except:
			print 'not: ',best_enc
			pass
		else:
			break
	print best_enc
	return best_enc

	
#check_encoding()
#check_file2(r'd:\1.txt')	
#check_file("2.txt")
#correct_file(r"H:\Andres\_Tesis\trash\F15N3132762.txt")
#correct_file(r"H:\Andres\_Tesis\trash\F15N3138822.txt")
#correct_file(r"H:\Andres\_Tesis\trash\F15N3132762.txt")
