# -*- coding: utf-8 -*-

import re
import sys
import codecs
import Transcode as Transcode
from collections import defaultdict


class breakLoop(Exception):
    pass



class PanlexRecord:
    """This class is used to represent monolingual PanLex records."""
    #___________________________________________________________________________
    
    # field types in PanLex format
    FIELD_SOURCE_MEANING = "mi"
    FIELD_TOPIC = "dm"
    FIELD_DEFINITION = "df"
    FIELD_EXPRESSION = "ex"
    FIELD_WORD_CLASS = "wc"
    FIELD_FACT_OTHER = "md"

    # dictionary of field types
    FIELDS = {FIELD_SOURCE_MEANING : True,
             FIELD_TOPIC : True,
             FIELD_DEFINITION : True,
             FIELD_EXPRESSION : True,
             FIELD_WORD_CLASS : True,
             FIELD_FACT_OTHER : True
             }

    # dictionary of super-field types
    SUPERFIELDS = {FIELD_SOURCE_MEANING : True,
             FIELD_TOPIC : True,
             FIELD_DEFINITION : True,
             FIELD_EXPRESSION : True,
             }

    # dictionary of super-field types
    ORIGLANGFIELDS = {FIELD_SOURCE_MEANING : True,
             FIELD_TOPIC : True,
             FIELD_DEFINITION : True,
             FIELD_EXPRESSION : True,
             FIELD_FACT_OTHER : True
             }

    def __init__(self, ex="", extype = "ex", lang="", debug=False):
        self.ex = ex
        self.extype = extype
        self.lang = lang
        if lang: self.lang = lang
        self.subatts = []
        self.metaatts = []
        self._clear()



    def _clear(self):
        pass



    def addEx(self, tuplist):
        if len(tuplist[0]) > 3: sys.stderr.write("WARNING: attempt to add expression with more than 3 sub-attributes\n")
        self.extype = tuplist[0][0]
        self.ex = tuplist[0][-1]
        if len(tuplist[0]) == 3:
            self.lang = tuplist[0][1]
        for tup in tuplist[1:]:
            self.subatts.append(tup)


    def addMetaAttr(self, tuplist):
        if len(tuplist) > 1: sys.stderr.write("WARNING: attempt to add meta-attribute with sub-attributes\n")
        self.metaatts.append(tuplist)



    def addSubAttr(self, tuplist):
        if len(tuplist) > 1: sys.stderr.write("WARNING: attempt to add sub-attribute with sub-attributes\n")
        self.subatts.append(tuplist)
    

    
    def write(self):
        out = unicode("")
        for tuple in self.metaatts:
            for att in tuple:
                out += att + "\n"
        out += "%s\n" % (self.extype)
        if self.lang: out += "%s\n" % (self.lang)
        out += "%s\n" % (self.ex)
        for tuple in self.subatts:
            for att in tuple:
                out += att + "\n"
        return out




    



class PanlexSourceRecord(PanlexRecord):
    """This class is used to represent source-language PanLex records."""
    #___________________________________________________________________________
    
    def _clear(self):
        self.targetExs = []
    
    #___________________________________________________________________________

    def addTargetEx(self, targetEx, debug=False):
        self._debug = debug
        tex = PanlexRecord()
        tex.addEx(targetEx)
        self.targetExs.append(tex)


    #___________________________________________________________________________
        
    def writeall(self):
        out = "\n" + self.write()
        for targetex in self.targetExs:
            out += targetex.write()
        return(out)
    



class PanlexMatchRecord:
    """This class is used to store match records, used when determining where records occurred
    in the original dictionary"""
    #___________________________________________________________________________
    
    def __init__(self,start,end,lang=''):
        self.start = start
        self.end = end
        self.lang = lang




class PanlexMatchEdgeRecord:
    """This class is used to store match edge records, used when constructing the match lattice for each span"""
    #___________________________________________________________________________
    
    def __init__(self,start,end,langdir,dist,llcontext,lrcontext,rlcontext,rrcontext,lines,match):
        self.start = start
        self.end = end
        self.langdir = langdir
        self.dist = dist
        self.llcontext = llcontext
        self.lrcontext = lrcontext
        self.rlcontext = rlcontext
        self.rrcontext = rrcontext
        self.lines = lines
        self.match = match




class PanlexMatchEdgeSetRecord:
    """This class is used to store stats for edge record sets, for a given span"""
    #___________________________________________________________________________
    
    def __init__(self):
        self.edges = 0
        self.lines = defaultdict(int)
        self.expressions = 0
        self.langdir = defaultdict(int)
        self.dist = defaultdict(int)
        self.llcontext = defaultdict(int)
        self.lrcontext = defaultdict(int)
        self.rlcontext = defaultdict(int)
        self.rrcontext = defaultdict(int)
        self.paths = defaultdict(int)


    def increment(self,j,k,lines,langdir,dist,llcontext,lrcontext,rlcontext,rrcontext):
        self.edges += 1
        self.lines[lines] += 1
        self.langdir[langdir] += 1
        self.dist[dist] += 1
        self.llcontext[llcontext] += 1
        self.lrcontext[lrcontext] += 1
        self.rlcontext[rlcontext] += 1
        self.rrcontext[rrcontext] += 1
        self.paths[(j,k)] += 1


    def decrement(self,edge,j,k):
        self.edges -= 1

        for attr in ["lines","langdir","dist","llcontext","lrcontext","rlcontext","rrcontext"]:
            self.__dict__[attr][edge.__dict__[attr]] -= 1
            if not self.__dict__[attr][edge.__dict__[attr]]: del self.__dict__[attr][edge.__dict__[attr]]

        self.paths[(j,k)] -= 1
        if not self.paths[(j,k)]: del self.paths[(j,k)]




class PanlexDict:
    """This class is used to manipulate PanlexRecord sets."""
    #___________________________________________________________________________
    
    # IDs for the three basic PanLex formats
    VARIOUS_VARIOUS = 0
    ONE_VARIOUS = 1
    ONE_ONE = 2
    ENCODING = "utf-8"
    RECORDWINDOWSIZE = 100
    MULTILINEPENALTY = 100
    SOURCELANG = 's'
    TARGLANG = 't'

    
    def __init__(self, debug=False):
        self._debug = debug
	self.sourceLang = ""
	self.targetLangs = []
	self.langno = 0
        self.records = []


    #___________________________________________________________________________

    def addSimpleRecord(self, source, target, retrec=False):
        """Add simple record"""
        record = PanlexSourceRecord(source)
        record.addTargetEx([(unicode("ex"), target)])
        self.records.append(record)
        if retrec: return record.writeall()


    #___________________________________________________________________________

    def parse(self, infile, debug=False):
        self._debug = debug
        infp = open(infile, "r")
        langs = int(infp.readline().decode(self.ENCODING).rstrip("\n"))
        if langs:
            self.sourceLang = infp.readline().rstrip("\n")
            self.langno += 1
            langs -= 1
        for i in range(langs):
            self.targetLangs.append(infp.readline().rstrip("\n"))
            self.langno += 1

        infp.readline() # read blank line between header and records

        try:
            rem = infp.read()
        except:
            sys.stderr.write("Unexpected error: ", sys.exc_info()[0])
            raise

        for recordstr in rem.split("\n\n"):
            reclist = []
            tuplist = []
            recordstr = recordstr.strip("\n")
            fields = recordstr.decode(self.ENCODING).split("\n")
            while fields:
                value = fields.pop()
                field = fields.pop()
                if field not in PanlexRecord.FIELDS:
                    tup = (fields.pop(),field,value)
                else:
                    tup = (field,value)
                tuplist.append(tup)
                if tup[0] in PanlexRecord.SUPERFIELDS:
                    tuplist.reverse()
                    reclist.append(tuplist)
                    tuplist = []
            self.store(reclist)


    #___________________________________________________________________________

    def store(self, reclist):
        record = PanlexSourceRecord()
        sourceExFound = False
        for tuplist in reclist[::-1]:
            if not sourceExFound:
                if tuplist[0][0] == PanlexSourceRecord.FIELD_EXPRESSION:
                    sourceExFound = True
                    record.addEx(tuplist)
                else:
                    record.addMetaAttr(tuplist)
            else:
                record.addTargetEx(tuplist)
        self.records.append(record)

    #___________________________________________________________________________

    def write(self, outfile, debug=False):
        self._debug = debug
        outfp = codecs.open(outfile, "w", self.ENCODING)
        outfp.write(self.writeHeader())
        for record in self.records:
            outfp.write(record.writeall())
        outfp.close()


    #___________________________________________________________________________

    def writeHeader(self):
        out = unicode("")
        out = "{0}\n".format(self.langno)
        if self.sourceLang: out += "{0}\n".format(self.sourceLang)
        for lang in self.targetLangs: out += "{0}\n".format(lang)
        return(out)

    
    #___________________________________________________________________________

    def robustPunct(self,string):
        string = re.sub(r"([\(\)\.\*\+])",r"\\\1",string)
        regexp = re.compile(u"(’|\'|\")",re.UNICODE|re.MULTILINE|re.IGNORECASE)
        return regexp.sub(".",string)

    #___________________________________________________________________________
    
    def removeOutOfSeq(self,matchlocs,debug=False):
        # Work back through the list of span lists, removing any obvious anomalies in terms of the
        # linear sequencing in the file. Specifically, work through each poslist in ascending order of
        # span candidates, and prune any candidates from preceding poslists where the start of the span
        # is after the rightmost end of the spans for the current poslist OR for proceedings poslists,
        # the end of the span is before the leftmost start of the spans for the current poslist

        prevmatchmin = prevmatchmax = -1
        for i, poslist in sorted(enumerate(matchlocs),key=lambda i: len(i[1])):
            if poslist:
                minstart, maxend = poslist[0]
            else:
                break
            for start,end in poslist:
                if start < minstart: minstart = start
                if end > maxend: maxend = end
            for j in range(0,len(matchlocs)):
                if j < i:
                    for k in range(len(matchlocs[j])-1,-1,-1):
                        start,end = matchlocs[j][k]
                        if start > maxend:
                            del matchlocs[j][k]
                elif j > i:
                    for k in range(len(matchlocs[j])-1,-1,-1):
                        start,end = matchlocs[j][k]
                        if end < minstart:
                            del matchlocs[j][k]
        



    def checkforEmptySpans(self,match_edge_stats):
        removed_spans = 0

        for i in match_edge_stats:
            for match in match_edge_stats[i]:
                try:
                    if match_edge_stats[i][match].edges < 1:
                        del match_edge_stats[i][match]
                        removed_spans += 1
                        print("Removing {0}-{1}".format(i,match))
                        raise breakLoop
                    for path in match_edge_stats[i][match].paths:
                        if match_edge_stats[i][match].paths[path] < 1:
                            print("Removing {0}-{1}".format(i,match))
                            del match_edge_stats[i][match]
                            removed_spans += 1
                            raise breakLoop
                except breakLoop:
                    break
        return removed_spans



    def printformat(self,val):
        if type(val) == unicode:
            return(val.encode("utf-8"))
        else:
            return(val)


    def typeTokenCountDict(self,matchlattice,match_edge_stats,attr,include=[]):
        types = tokens = 0
        matchedtokens = defaultdict(int)
        matchedtypes = defaultdict(int)
        excludevals = []
        for i in match_edge_stats:
            types += 1
            found_type = {}
            for match in match_edge_stats[i]:
                for val in match_edge_stats[i][match].__dict__[attr].keys():
#                    print("{0}-{1}: {2}".format(self.printformat(i),self.printformat(match),self.printformat(val)))
                    matchedtokens[val] += match_edge_stats[i][match].__dict__[attr][val]
                    tokens += match_edge_stats[i][match].__dict__[attr][val]
                    if val not in found_type:
                        matchedtypes[val] += 1
                        found_type[val] = True
        best_val = matchedtypes.keys()[0]
        for val in matchedtypes:
            print("{0}: tokens: {1}%, types: {2}%".format(self.printformat(val),100*matchedtokens[val]/tokens,100*matchedtypes[val]/types))
            if matchedtypes[val] >= matchedtypes[best_val] and matchedtokens[val] > matchedtokens[best_val]:
                best_val = val
            if matchedtypes[val] < types:
                if val not in include: excludevals.append(val)
        return(float(matchedtypes[best_val])/types,float(matchedtokens[best_val])/tokens,excludevals)



    def pruneLattice(excludevals):
        removed_edges = 0
        for (i,j,ji,k,ki) in matchlattice.keys():
            if matchlattice[(i,j,ji,k,ki)].__dict__[attr] in excludevals:
                match_edge_stats[i][matchlattice[(i,j,ji,k,ki)].match].decrement(matchlattice[(i,j,ji,k,ki)],j,k)
                del matchlattice[(i,j,ji,k,ki)]
                removed_edges += 1
        removed_spans = self.checkforEmptySpans(match_edge_stats)
        return(removed_edges,removed_spans)



    #___________________________________________________________________________

    def locateInFile(self,infile,debug=True):
        """find original occurrences of each record from ''records'' in infile"""
        print("\n\n**************** {0} ****************\n\n".format(infile))
        fp = open(infile,"r")
        text = fp.read().decode(self.ENCODING)
        textlen = len(text)
        exlist = []
        matchlocs = []  # list of spans where a given record can be found
        match_node_stats = []  # list of detailed match stats for individual record components

        # generate list for each record, containing a list of the strings stored in that record,
        # sorted in ascending order of string length (longer strings less likely to match in multiple
        # places, so more efficient to search for those first, and other strings in their immediate vicinity)
        for record in self.records:
            strlist = [(record.ex,self.SOURCELANG)]
            for targex in record.targetExs:
                strlist.append((targex.ex,self.TARGLANG))
            strlist.sort(key=lambda i: len(i))
            exlist.append(strlist)

        # for each list of strings, locate the various occurrences of the set of component strings in ''infile''
        for strlist in exlist:
            poslist = []  # list to store locations of positions of different occurrences of the strings

            longex, lang = strlist.pop()  # NB assumes strlist is sorted in *ascending* order of length
            pos = len(strlist)
            
            match_node_stats.append({})

#            if debug: print("Searching for longest element: {0}".format(longex.encode("utf-8")))

            regexp = re.compile(self.robustPunct(longex),re.UNICODE|re.MULTILINE|re.IGNORECASE)

            # check through each occurrence of a match (for the longest component of the record),
            # generating file-position "neighborhoods" of locations where the record can potentially be found
            for match in regexp.finditer(text):
#                print("<{0},{1}>: {2}".format(match.start(),match.end(),poslist))
                newstart,newend = (max(0,match.start()-self.RECORDWINDOWSIZE),min(textlen,match.end()+self.RECORDWINDOWSIZE))

                # if the start of the current match occurs within the span of the previous match, merge the two
                if poslist and newstart < poslist[-1][-1]:
                    start, oldend = poslist.pop()
                    oldlist = match_node_stats[-1][(start,oldend)]
                    newstart = start
#                    print("ORIG: {0},{1} -> NEW: {2},{3}".format(start,oldend,newstart,newend))
                    match_node_stats[-1][(newstart,newend)] = match_node_stats[-1][(start,oldend)]
                    if oldend != newend:
                        del match_node_stats[-1][(start,oldend)]
                    poslist.append((newstart,newend))
                    match_node_stats[-1][(newstart,newend)][pos].append(PanlexMatchRecord(match.start(),match.end(),lang))
                else:
                    poslist.append((newstart,newend))
                    if (newstart,newend) not in match_node_stats[-1]:
                        match_node_stats[-1][(newstart,newend)] = defaultdict(list)
                    match_node_stats[-1][(newstart,newend)][pos].append(PanlexMatchRecord(match.start(),match.end(),lang))

#            print("{0}".format(poslist))
            # for the remainder of record components, search within the positional spans to see
            # if a match for that component can be found, removing any spans where any given component
            # *isn't* found
            for pos, (ex, lang) in enumerate(strlist[::-1]):
                regexp = re.compile(self.robustPunct(ex),re.UNICODE|re.MULTILINE|re.IGNORECASE)

                # add match location covering full file to poslist if empty
                if not poslist:
                    poslist.append((0,-1))
                    match_node_stats[-1][(0,-1)] = []

                for i in range(len(poslist)-1,-1,-1):
                    start,end = poslist[i]
#                    print("{0}: <{1},{2}>".format(longex.encode("utf-8"),start,end))
                    matchfound = False
                    for match in regexp.finditer(text,start,end):
#                        print("{0}: <{1},{2}>".format(match.group(0).encode("utf-8"),match.start(),match.end()))
                        matchfound = True
                        match_node_stats[-1][(start,end)][pos].append(PanlexMatchRecord(match.start(),match.end(),lang))
                    if not matchfound:
                        del poslist[i]
                        del match_node_stats[-1][(start,end)]
                        
            # store all positional spans where all record components matched in matchlocs
            matchlocs.append(poslist)
        
        if debug: print("ORIG: {0}".format(matchlocs))

        self.removeOutOfSeq(matchlocs)

        matchlattice = {}
        match_edge_stats = {}
        
        linecounter = re.compile("\n",re.UNICODE|re.MULTILINE|re.IGNORECASE)

        # Generate match lattice
        # for each record
        for i in range(len(matchlocs)):
            poslist = matchlocs[i]
            match_edge_stats[i] = {}
            # for each span where that record matched
            for match in poslist:
                # minstart = textlen
                # maxend = -1
                # mindist = textlen
                # minlangdir = ""
                match_edge_stats[i][match] = PanlexMatchEdgeSetRecord()
                # for each expression associated with the record
                for j, ex in enumerate(match_node_stats[i][match]):
                    match_edge_stats[i][match].expressions += 1

                    # for each location within that expression matched in the given span
                    for ji, m1 in enumerate(match_node_stats[i][match][ex]):
                        for k, ex2 in enumerate(match_node_stats[i][match]):

                            # for each other expression associated with that record
                            # only compute over upper diagonal of match matrix (as distances are symmetric)
                            if j < k: 

                                # for each location for that expression
                                for ki,m2 in enumerate(match_node_stats[i][match][ex2]):

#                                    print("({0},{1},{2}) -> ({3},{4})".format(i,j,ji,k,ki))


                                    def dist(start,startlang,end,endlang):
                                        d = end - start
                                        if d < 0:
                                            d *= -1
                                            if "\n" in text[end:start]: d += self.MULTILINEPENALTY
                                            return(d,end,start,endlang+startlang)
                                        else:
                                            if "\n" in text[start:end]: d += self.MULTILINEPENALTY
                                            return(d,start,end,startlang+endlang)
                                    
                                    (d1,d1start,d1end,d1langdir) = dist(m1.end,m1.lang,m2.start,m2.lang)
                                    (d2,d2start,d2end,d2langdir) = dist(m2.end,m2.lang,m1.start,m1.lang)

                                    if d1 > 0 and d1 < d2:
                                        lines = len(linecounter.findall(text,d1start,d1end))
                                        matchlattice[(i,j,ji,k,ki)] = \
                                        PanlexMatchEdgeRecord(d1start,d1end,d1langdir,d1,"",text[d1start],text[d1end-1],"",lines,match)
                                        match_edge_stats[i][match].increment(j,k,lines,d1langdir,d1,"",text[d1start],text[d1end-1],"")
                                    elif d2 > 0 and d2 < d1:
                                        lines = len(linecounter.findall(text,d2start,d2end))
                                        matchlattice[(i,j,ji,k,ki)] = \
                                        PanlexMatchEdgeRecord(d2start,d2end,d2langdir,d2,"",text[d2start],text[d2end-1],"",lines,match)
                                        match_edge_stats[i][match].increment(j,k,lines,d2langdir,d2,"",text[d2start],text[d2end-1],"")

        while True:
            pruned = False
            best_constraint = []
            for sconstraint,sconstraint_desc,include in [("lines","line number",[]),("langdir","preferred ordering",["tt"]),("lrcontext","left context",[]),("rlcontext","right context",[])]:
                best_constraint.append((self.typeTokenCountDict(matchlattice,match_edge_stats,sconstraint,include=include),sconstraint_desc))
            best_constraint.sort(reverse=True)
            print best_constraint
            # for types,tokens,_exclude,sconstraint_desc in best_constraint:
            #     if debug: print("Remove {0} edge(s) and {1} span(s) through the {2} constraint".format(types,tokens,sconstraint_desc))
            if not pruned: break

        for (i,j,ji,k,ki) in sorted(matchlattice.keys()):
            print("({0},{1},{2}) -> ({3},{4}): <{5},{6},{7},{8}> {9}".format(i,j,ji,k,ki,matchlattice[(i,j,ji,k,ki)].start,matchlattice[(i,j,ji,k,ki)].end,matchlattice[(i,j,ji,k,ki)].dist,matchlattice[(i,j,ji,k,ki)].langdir,text[matchlattice[(i,j,ji,k,ki)].start:matchlattice[(i,j,ji,k,ki)].end].encode("utf-8")))
                                                         
        if debug: print("FINAL: {0}".format(matchlocs))

        multiple_matches = 0
        multiple_edges = 0
        for i in match_edge_stats:
            if len(match_edge_stats[i].keys()) > 1:
                multiple_matches += 1
            for match in match_edge_stats[i]:
                if match_edge_stats[i][match].edges > 1:
                    multiple_edges += 1
                
        print("{0} records with multiple matches, {1} records have multiple edges".format(multiple_matches,multiple_edges))

        if not multiple_matches and not multiple_edges:
            prematch_strings = []
            inmatch_strings = []
            postmatch_strings = []
            prematch = re.compile("(.*)$",re.UNICODE)
            postmatch = re.compile("^(.*)",re.UNICODE)
            for (i,j,ji,k,ki) in sorted(matchlattice.keys()):
                match = prematch.search(text[:matchlattice[(i,j,ji,k,ki)].start])
                prematch_strings.append(match.group(0))

                inmatch_strings.append(text[matchlattice[(i,j,ji,k,ki)].start:matchlattice[(i,j,ji,k,ki)].end])

                match = postmatch.search(text[matchlattice[(i,j,ji,k,ki)].end:])
                postmatch_strings.append(match.group(0))
            print "PRE:"
            for string in prematch_strings:
                print("\t>{0}<".format(string.encode("utf-8")))
            print "IN:"
            for string in inmatch_strings:
                print("\t>{0}<".format(string.encode("utf-8")))
            print "POST:"
            for string in postmatch_strings:
                print("\t>{0}<".format(string.encode("utf-8")))
                
