# -*- coding: utf-8 -*-

import sys
import re
import os
import glob
import time
import trunk.data
from io import open
from trunk.src.read_dictionary import read_dictionary, read_tags

#from trunk.src.treestuff import suffix_tree
#from trunk.src.treestuff.all_comsubstr import return_sub

class Lemmatizer:
    
    def __init__(self):
        tags = read_tags(os.path.join(trunk.data.__path__[0], 'tags.txt'))
        self.dict_lemmas = read_dictionary(os.path.join(trunk.data.__path__[0], 'dictionary.txt'), tags)
        self.dict_list = dict() #lemmas organized by length
        self.verb_table = self.get_verbs()
        self.spanish_table = self.read_spanish()
        
        self.prefixsuffixpatterns = dict()
        self.prefixpatterns = dict()
        self.suffixpatterns = dict()
        self.mappings = dict()  # saves the mappings from the wordform found in the dictionary to the lemmatized word
        
        # for statistics of lemmatization
        self.verbtablecounter = 0 #lemmas retrieved from verb table
        self.lemmatizedcounter = 0 #wordforms lemmatized by substring comparison
        self.nonlemmatizedcounter = 0 #lemmas not lemmatized (because the matches were too short or the other options did not hold) 
        self.wordislemmacounter = 0 #number of words that correspond to a lemma in the dictionary
    
    def read_write(self, function, threshold):
        
            """Method for reading in Evaluation Corpus
            and writing evaluated data to file
            @param version: version of the lemmatization function (1 stands for lemmatize and 2 for lemmatize)"""
            
            mypath = os.path.join(trunk.data.__path__[0], "usp", "Evaluation_corpus")
            os.chdir(mypath)
            evaluation = 'Evaluation.txt'
            lemmafile = 'Lemmatization.txt' # file where we write what wordforms we lemmatized to what output
            
            if function == "l":
                function = self.lemmatize
            if function == "l2":
                function = self.newlemmatize

            #files = ["14.final", "16.final", "20.final", "22.final", "23.final"]
            files = ["gold_standard.txt"]
            with open(os.path.join(mypath, evaluation), 'w', encoding='utf8') as g:
                with open(os.path.join(mypath, lemmafile), 'w', encoding='utf8') as l:
                    for file in files:
                        print(file)
                        with open(file, encoding="utf8") as f:
                                    for line in f:
                                        new_line = line #lemmatized line
                                        if line.startswith("\\t"):
                                            g.write(line)
                                            #print line.strip()
                                            
                                            new_line = '\z'
                                            
                                            line = re.sub(ur'[\d:/,.?!+\xbf\xa1\(\)]+', "", line.strip()[3:len(line)-2].lower()) #line we want to lemmatize on
                                            if line:
                                                words = line.split() #words without commas, points, etc
                                            else:
                                                break
                                            
                                            #print(line)
                                            
                                            for word in words: # LEMMATIZE
                                                lemma = function(word.lower(), threshold)
                                                #print "word: "+word+" lemma: "+lemma
                                                
                                                l.write(word.lower() + "\t" + lemma.split("/")[0].lower() + "\n")
                                                new_line += " " + lemma
                                            new_line += "\n"
                                        #print new_line.strip()
                                        g.write(unicode(new_line))
            
    def organize_dict(self):
        """Method for organizing lemmas by their length"""
        for lemma in self.dict_lemmas:
            try:
                self.dict_list[len(lemma)][lemma] = self.dict_lemmas[lemma]
            except:
                self.dict_list[len(lemma)] = dict()
                self.dict_list[len(lemma)][lemma] = self.dict_lemmas[lemma]
            
    
    
    def lemmatize(self, word, threshold):
        
        """IMPROVED Method for lemmatizing by using suffix trees,
        returns the lemma and its pos tag"""
        
        #word = word.decode("utf-8")

        
        lemma, pos = (word, "???")
        
        try: #word is in dictionary
            pos = self.dict_lemmas[word][0] #takes first tag from dict
            self.wordislemmacounter += 1
            
        except: #word is not in dict
            
            try: #look in verb table
                lemma = self.verb_table[word]
                
                self.verbtablecounter += 1
                pos = self.dict_lemmas[lemma][0]
                lemma = lemma.decode("utf-8")  

            except: 
                if word in self.spanish_table:
                    pos = "ESP"
                
                else:
                    #find largest substring shared by word and the lemmas from dict
                    min_len = max(int(len(word) * 0.5), 1) #min length, must at least be 1
                    max_len = int(len(word) * 1.5) #max length
                    #print word, "\tlen:", len(word), "\tmin:", min_len, "\tmax:", max_len
                    
                    sublen = len(word) * threshold
                    sublen_words = set()
                    
                    for x in range(min_len, max_len+1):
                        try: #max word length might be longer than existing words
                            for dict_lemma in self.dict_list[x]:
                                dict_lemma = dict_lemma.decode('utf-8')
                                #sub = return_sub([dict_lemma, word])
                                sub = self.return_sub(dict_lemma, word)
                                if sub != None:
                                    if len(sub) > sublen: #new longest substring
                                        sublen = len(sub)
                                        sublen_words = [dict_lemma]
                                        sublen_words = set(sublen_words)
                                    elif len(sub) == sublen: #new word with same max substring length as before
                                        sublen_words.add(dict_lemma)
                            
                            if sublen_words:
                                lemma = min(sublen_words, key=len) #get smallest word from set
                            
                                #print("No match")
                            #print("Längster Substring", lemma)
                        except:
                            pass
                        
                        self.lemmatizedcounter += 1
                        try:
                            try:
                                x = lemma.split()
                                lemma = x[0]
                                for xs in x[1:]:
                                    lemma += "_" + xs
                            except:
                                pass
                            pos = self.dict_lemmas[lemma.encode("utf-8")][0]
                        except:
                            pass
        
        if lemma == word:
            self.nonlemmatizedcounter += 1
        new = lemma + "/" + pos
        return new        
    
    
    def newlemmatize(self, word, threshold):
        
        """IMPROVED Method for lemmatizing by using suffix trees,
        returns the lemma and its pos tag"""
        
        #word = word.decode("utf-8")

        
        lemma, pos = (word, "???")
        
        try: #word is in dictionary
            pos = self.dict_lemmas[word][0] #takes first tag from dict
            self.wordislemmacounter += 1
            
        except: #word is not in dict
            
            try: #look in verb table
                lemma = self.verb_table[word]
                
                self.verbtablecounter += 1
                pos = self.dict_lemmas[lemma][0]
                lemma = lemma.decode("utf-8")  

            except: 
                if word in self.spanish_table:
                    pos = "ESP"
                
                else:
                    #find largest substring shared by word and the lemmas from dict
                    
                    #print word, "\tlen:", len(word), "\tmin:", min_len, "\tmax:", max_len
                    
                    sublen = len(word) * threshold
                    sublen_words = set()
                    
                    try: #max word length might be longer than existing words
                        for dict_lemma in self.dict_lemmas:
                            dict_lemma = dict_lemma.decode('utf-8')
                            #sub = return_sub([dict_lemma, word])
                            sub = self.return_sub(dict_lemma, word)
                            if sub != None:
                                if len(sub) > sublen: #new longest substring
                                    sublen = len(sub)
                                    sublen_words = [dict_lemma]
                                    sublen_words = set(sublen_words)
                                elif len(sub) == sublen: #new word with same max substring length as before
                                    sublen_words.add(dict_lemma)
                        
                        if sublen_words:
                            lemma = min(sublen_words, key=len) #get smallest word from set
                        
                            #print("No match")
                        #print("Längster Substring", lemma)
                    except:
                        pass
                    
                    self.lemmatizedcounter += 1
                    try:
                        try:
                            x = lemma.split()
                            lemma = x[0]
                            for xs in x[1:]:
                                lemma += "_" + xs
                        except:
                            pass
                        pos = self.dict_lemmas[lemma.encode("utf-8")][0]
                    except:
                        pass
        
        if lemma == word:
            self.nonlemmatizedcounter += 1
        new = lemma + "/" + pos
        return new        
    
    def get_verbs(self):
        """Read in the verb table file"""
        dictio = dict()
        mypath = trunk.data.__path__[0]
        with open(os.path.join(mypath, 'verb_table.txt'), encoding = 'utf-8') as f:
            for line in f:
                line = line.strip().split("\t")
                lemma = line[0]
                lemma = lemma.encode('utf-8')
                for verb in line[1].split():
                    verb = verb.encode('utf-8')
                    dictio[verb] = lemma
        return dictio
    
    def read_spanish(self):
        """Reads in spanish words"""
        words = set()
        mypath = trunk.data.__path__[0]
        with open(os.path.join(mypath, 'spanish.txt'), encoding = 'utf-8') as f:
            for line in f:
                line = line.strip()
                words.add(line)
        return words
                
        
    def read_traincorp(self):
        
        """Reads in the training corpus and lemmatizes all its wordforms. It will save mappings
        of wordforms to lemmas for those wordforms that are not equal to their wordform (i.e. ones 
        that can be extracted directly from the dictionary) and that are not part of the irregular verb
        table or the spanish table"""
        
        mypath = os.path.join(trunk.data.__path__[0], "usp", "tb") 
        
        os.chdir(mypath)
        #if version == "1":
        function = self.lemmatize
        #elif version == "2":
            #function = self.lemmatize
        mappingspath = os.path.join(trunk.data.__path__[0], "mappings.txt")
    
        with open(mappingspath, 'w', encoding="utf8") as m:
            for file in glob.glob("*.final"):
                    with open(file, encoding="utf8") as f:
                #print("File geöffnet")
                        i = 0
                        for line in f:
                            if line.startswith("\\t"):
                                #print("Line mit Wortformen!")
                                line = re.sub(ur'[\d:/,.?!+\xbf\xa1\(\)]+', "", line.strip()[3:len(line)-2].lower())
                                if line:
                                    words = line.split()
                                else:
                                    break
                                for word in words:
                                    lemma = function(word)
                                    lemma = lemma.split("/")[0]
                                    #print(lemma)
                                    if lemma != word and word not in self.verb_table and word not in self.spanish_table:
                                        s = word + "\t" + lemma + "\n"
                                        print(s)
                                        m.write(s)
                            i = i + 1
    
    def read_mappings(self):
        """Reads the mappings from wordforms to lemmas from a file and saves them in the internal dictionary mappings"""
        mappingspath = os.path.join(trunk.data.__path__[0], "mappings.txt")
    
        with open(mappingspath, encoding="utf8") as m:
            for line in m:
                splits = line.split("\t")
                word = splits[0]
                lemma = splits[1].rstrip()
                #print(word, lemma)
                self.mappings[word] = lemma
                
        
            
                        
    
    def extract_patterns(self, threshold):    
        '''
        Extracts patterns from the training corpus by assigning wordforms to lemmas from the
        dictionary and checking how a wordform turns into a lemma. If this pattern is seen frequently,
        we save it as a rule. There is one dictionary containing prefixes and suffixes (where the 
        key is a tuple of a prefix and a suffix), but also those 
        containing only either prefixes or suffixes. The value is a list specifying in a shorthand 
        what actions should be taken with the wordform to turn it into the lemma, and the value is the number of
        times this same pattern is seen: 
        i.e. { [strip p, replace s 'xyz'] : 2 }
        actions:
        - insert    place    string    ->    insert a string at the front, after the prefix, before the suffix or at the back
        - strip    object    -> deletes either a prefix or suffix
        - replace    object    string    -> replace a prefix or a suffix with a different string
        
        place: front, back, after (the prefix), before (the suffix)
        object: p(refix), s(uffix)
        
        The arguments of the action are delimited by a tab so they can be extracted more easily.
        '''
        for (word, lemma) in self.mappings.items():
            
            word = word.encode("utf-8")
            lemma = lemma.encode("utf-8")
            
            #print(word, lemma)
            sub = self.return_sub(word, lemma)
            
            actions = []
            
            mode = ""
            prefix = ""
            suffix = ""
            prefsuf = ""

            start = word.find(sub)
            end = start + len(sub)
            
            #print(word, lemma, sub, start, end)
            
            
            if start == 0:
                mode = "suffix" #if match is at the start of the wordform, we extract rule as a suffix-rule
                suffix = word[end:len(word)]
                #print("Suffix ", suffix)
                if suffix not in self.suffixpatterns:
                    self.suffixpatterns[suffix] = dict()
            elif end == len(word):
                mode = "prefix" #if match goes until the end of the wordform, we extract rule as a prefix-rule
                prefix = word[0:start]
                #print("Prefix ", prefix)
                if prefix not in self.prefixpatterns:
                    self.prefixpatterns[prefix] = dict()
            else:
                mode = "prefsuf" #if neither of the above holds, we extract a prefixsuffix-rule
                prefix = word[0:start]
                suffix = word[end:len(word)]
                if (prefix, suffix) not in self.prefixsuffixpatterns:
                    self.prefixsuffixpatterns[(prefix, suffix)] = dict()
                # print("PrefSuf ", prefix, suffix)
                
            startsub = lemma.find(sub) #the index in the word where the match starts in the lemma
            endsub = startsub + len(sub) #the index + 1 in where the match ends in the lemma
             
            """Check what changes have happened to the regions of the word when transforming it into the lemma and save rules accordingly"""

            if mode == "prefix":
                
                startpref = lemma.find(prefix)
                
                if word not in lemma:
                    if prefix not in (lemma[0:(startsub + 1)]): #if the prefix can no longer be found before the match
                        if startsub == 0:
                            actions.append("strip" + "\t" + "p") #if the prefix has not been substituted, save rule to strip this suffix
                        else:
                            actions.append("replace" + "\t" + "p" + "\t" + lemma[0:(startsub)]) #if the prefix has been substituted, save substitution rule
                    else:
                        if startpref == 0:
                            if len(prefix) != startsub:
                                actions.append("insert" + "\t" + "after" + "\t" + lemma[len(prefix):(startsub)]) #if there is some string inserted between the prefix and the match, save rule to insert string
                        else:
                            if startpref + len(prefix) != startsub:
                                actions.append("insert" + "\t" + "after" + "\t" + lemma[(startpref + len(prefix)):(startsub)]) #if there is some string inserted between the prefix and the match, save rule to insert string
                            actions.append("insert" + "\t" + "front" + "\t" + lemma[0:(startpref)]) #if there is some string before the prefix, save rule to insert

                if endsub != len(lemma):
                    actions.append("insert" + "\t" + "back" + "\t" + lemma[endsub:len(lemma)]) #if there is some string concatenated to the end of the match, save rule to add at the back
                    
                actions = tuple(actions)
                
                if actions in self.prefixpatterns[prefix]:
                    self.prefixpatterns[prefix][actions] += 1
                else:
                    self.prefixpatterns[prefix][actions] = 1
                    
            elif mode == "suffix":

                startsuf = lemma.find(suffix)
                endsuf = startsuf + len(suffix)
                
                if word not in lemma:
                    if suffix not in (lemma[endsub:len(lemma)]):
                        if endsub == len(lemma):
                            actions.append("strip" + "\t" + "s")    #if suffix is no longer part of the lemma, strip it
                        else:
                            actions.append("replace" + "\t" + "s" + "\t" + lemma[endsub:len(lemma)]) #if suffix has been substituted, replace it with the newfound string
                    else:
                        if endsuf == len(lemma):
                            if endsub != startsuf:
                                actions.append("insert" + "\t" + "before" + "\t" + lemma[endsub:(startsuf)]) #if there is a string s between the end of the match and the start of the suffix, save the rule to insert s between the end of the match and the start of the string
                        else:
                            if endsub != startsuf:
                                actions.append("insert" + "\t" + "before" + "\t" + lemma[endsub:(startsuf)]) #if there is a string s between the end of the match and the start of the suffix, save the rule to insert s between the end of the match and the start of the string
                            actions.append("insert" + "\t" + "back" + "\t" + lemma[endsuf:len(lemma)]) #if there is a string s after the suffix, save rule to insert s at the back

                if startsub != 0:
                    actions.append("insert" + "\t" + "front" + "\t" + lemma[0:(startsub)]) #if the match does not start at the beginning of the lemma, save the string s preceding it to be appended to the front
                if word in lemma:
                    if endsub != len(lemma):
                        actions.append("insert" + "\t" + "back" + "\t" + lemma[endsub:len(lemma)]) #if there is some string concatenated to the end of the match, save rule to add at the back
                
                actions = tuple(actions)
                
                if actions in self.suffixpatterns[suffix]:
                    self.suffixpatterns[suffix][actions] += 1
                else:
                    self.suffixpatterns[suffix][actions] = 1
                    
                    
            elif mode == "prefsuf":
                                
                startpref = lemma.find(prefix)
                
                if prefix not in (lemma[0:(startsub + 1)]): #if the prefix can no longer be found before the match
                    if startsub == 0:
                        actions.append("strip" + "\t" + "p") #if the prefix has not been substituted, save rule to strip this suffix
                    else:
                        actions.append("replace" + "\t" + "p" + "\t" + lemma[0:(startsub)]) #if the prefix has been substituted, save substitution rule
                else:
                    if startpref == 0:
                        if len(prefix) != startsub:
                            actions.append("insert" + "\t" + "after" + "\t" + lemma[len(prefix):(startsub)]) #if there is some string inserted between the prefix and the match, save rule to insert string
                    else:
                        if startpref + len(prefix) != startsub:
                            actions.append("insert" + "\t" + "after" + "\t" + lemma[(startpref + len(prefix)):(startsub)]) #if there is some string inserted between the prefix and the match, save rule to insert string
                        actions.append("insert" + "\t" + "front" + "\t" + lemma[0:(startpref)]) #if there is some string before the prefix, save rule to insert
            
                startsuf = lemma.find(suffix)
                endsuf = startsuf + len(suffix)
                
                if suffix not in (lemma[endsub:len(lemma)]):
                    if endsub == len(lemma):
                        actions.append("strip" + "\t" + "s")    #if suffix is no longer part of the lemma, strip it
                    else:
                        actions.append("replace" + "\t" + "s" + "\t" + lemma[endsub:len(lemma)]) #if suffix has been substituted, replace it with the newfound string
                else:
                    if endsuf == len(lemma):
                        if endsub != startsuf:
                            actions.append("insert" + "\t" + "before" + "\t" + lemma[endsub:(startsuf)]) #if there is a string s between the end of the match and the start of the suffix, save the rule to insert s between the end of the match and the start of the string
                    else:
                        if endsub != startsuf:
                            actions.append("insert" + "\t" + "before" + "\t" + lemma[endsub:(startsuf)]) #if there is a string s between the end of the match and the start of the suffix, save the rule to insert s between the end of the match and the start of the string
                        actions.append("insert" + "\t" + "back" + "\t" + lemma[endsuf:len(lemma)]) #if there is a string s after the suffix, save rule to insert s at the back
                        
                actions = tuple(actions)
                
                if actions in self.prefixsuffixpatterns[(prefix, suffix)]:
                    self.prefixsuffixpatterns[(prefix, suffix)][actions] += 1
                else:
                    self.prefixsuffixpatterns[(prefix, suffix)][actions] = 1
                
                        
        tempsufdict = dict()
        tempprefdict = dict()
        tempprefsufdict = dict()
    
    
        for s in self.suffixpatterns:
    
            for pattern in self.suffixpatterns[s]:
                if self.suffixpatterns[s][pattern] > threshold and s:
                    if s not in tempsufdict:
                        tempsufdict[s] = dict()
                    tempsufdict[s][pattern] = 0
        
        self.suffixpatterns = tempsufdict
        
        print(self.suffixpatterns.items())
        
        for p in self.prefixpatterns:    
            for pattern in self.prefixpatterns[p]:
                if self.prefixpatterns[p][pattern] > threshold and p:
                    if p not in tempprefdict:
                        tempprefdict[p] = dict()
                    tempprefdict[p][pattern] = 0
                    
        self.prefixpatterns = tempprefdict
        
        print(self.prefixpatterns.items())
                    
        for (s,p) in self.prefixsuffixpatterns:
            #print ("Präfixsuffix", (s,p), lemm.prefixsuffixpatterns[(s,p)])
            for pattern in self.prefixsuffixpatterns[(s,p)]:
                if self.prefixsuffixpatterns[(s,p)][pattern] > threshold:
                    if (s,p) not in tempprefsufdict:
                        tempprefsufdict[(s,p)] = dict()
                    tempprefsufdict[(s,p)][pattern] = 0
                    
        self.prefixsuffixpatterns = tempprefsufdict
        
        print(self.prefixsuffixpatterns.items())

    

    def return_sub(self, s1, s2):
            """computes longest common substring
            Code taken from Wikipedia"""
            m = [[0] * (1 + len(s2)) for i in xrange(1 + len(s1))]
            longest, x_longest = 0, 0
            for x in xrange(1, 1 + len(s1)):
                for y in xrange(1, 1 + len(s2)):
                    if s1[x - 1] == s2[y - 1]:
                        m[x][y] = m[x - 1][y - 1] + 1
                        if m[x][y] > longest:
                            longest = m[x][y]
                            x_longest = x
                    else:
                        m[x][y] = 0
            return s1[x_longest - longest: x_longest].encode("utf-8")              

def main(argv):

    
    action = argv[1]
    threshold = argv[2]
    lemm = Lemmatizer()
    
    
    if action == "lemmatize":
        lemm.organize_dict()
        lemm.read_write("l", threshold)
    elif action == "patterns":
        lemm.organize_dict()
        lemm.read_traincorp()
        lemm.extract_patterns(9)
    elif action == "newlemmatize":
        lemm.read_write("l2", threshold)
    else:
        sys.stdout.write("Falscher Befehl")
        
    '''
    lemm.read_traincorp()
    mypath = os.path.join(trunk.data.__path__[0], "mappings.txt")
    
    with open(mypath, encoding="utf8") as f:
        for word in lemm.mappings:
            s = word + "\t" + lemm.mappings[word] + "\n"
            print(s)
            #f.write(s)'''


if __name__ == '__main__':
    
    #main(sys.argv)
    lemm = Lemmatizer()
    lemm.organize_dict()
    lemm.read_write("l2", 0.3)
    
    """python Lemmatizer.py lemmatize 0.1
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.1
    python compare_lemmatization.py evaluate
    python Lemmatizer.py lemmatize 0.2
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.2
    python compare_lemmatization.py evaluate
    python Lemmatizer.py lemmatize 0.3
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.3
    python compare_lemmatization.py evaluate
    python Lemmatizer.py lemmatize 0.35
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.35
    python compare_lemmatization.py evaluate
    python Lemmatizer.py lemmatize 0.4
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.4
    python compare_lemmatization.py evaluate
    python Lemmatizer.py lemmatize 0.45
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.45
    python compare_lemmatization.py evaluate
    python Lemmatizer.py lemmatize 0.5
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.5
    python compare_lemmatization.py evaluate
    python Lemmatizer.py lemmatize 0.55
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.55
    python compare_lemmatization.py evaluate
    python Lemmatizer.py lemmatize 0.6
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.6
    python compare_lemmatization.py evaluate
    python Lemmatizer.py lemmatize 0.65
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.65
    python compare_lemmatization.py evaluate
    python Lemmatizer.py lemmatize 0.70
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.70
    python compare_lemmatization.py evaluate
    python Lemmatizer.py lemmatize 0.75
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.75
    python compare_lemmatization.py evaluate
    python Lemmatizer.py lemmatize 0.8
    python compare_lemmatization.py evaluate
    python Lemmatizer.py newlemmatize 0.8
    python compare_lemmatization.py evaluate"""

    #lemm.read_traincorp()
    #lemm.read_mappings()
    #lemm.extract_patterns(9)
    

