import itertools, re

from _token_util import *

__justjoin = ''.join

LOCAL_TRUNCATE = False
LITERAL_TRUNCATE = False

def build_token_uniqueness_functions(filePrepTokenSeqs):
    patIdSep = re.compile("[.]|-&gt;")
    id2files = dict() # identifier name -> list of fileId
    for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
        bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
        for bigToken in bigtokenSeq:
            if bigtoken_is_brace(bigToken) or bigtoken_is_reserved_word(bigToken):
                continue
            for t in bigToken.split("\n"):
                if t.startswith("id|"):
                    for separatedId in patIdSep.split(t[3:]):
                        v = id2files.setdefault(separatedId, [ fi ])
                        if v[-1] != fi: v.append(fi)
                elif t.startswith("l_") and t.find("|") >= 0:
                    v = id2files.setdefault(t, [ fi ])
                    if v[-1] != fi: v.append(fi)
    
    def scorefunc(num): return num
    #def scorefunc(num): return int(math.log(num, 2)) + 1
    id2score = dict(( id, scorefunc(len(fs)) ) for id, fs in id2files.iteritems()) # identifier names -> count of files
    id2score_get = id2score.__getitem__
    allFileScore = scorefunc(len(filePrepTokenSeq))
    def frequent_truncated(id_or_literal, freq):
        if id_or_literal == "id|": return "val|"
        elif id_or_literal.startswith("id|"):
            r = []
            for separatedId in patIdSep.split(id_or_literal[3:]):
                u = id2score_get(separatedId)
                if u <= freq: r.append(separatedId)
            if not r: return "val|"
            return "id|" + ".".join(r)
        elif id_or_literal.startswith("l_") and id_or_literal.find("|") >= 0:
            u = id2score_get(id_or_literal)
            if u <= freq: return id_or_literal
            return "val|"
        else:
            return id_or_literal
        
    def bigtoken_frequency(bigToken):
        if not bigToken or bigtoken_is_brace(bigToken) or bigtoken_is_reserved_word(bigToken): return 0
        u = allFileScore
        for t in bigToken.split("\n"):
            if t.startswith("id|") and len(t) > 3:
                v = min(map(id2score_get, patIdSep.split(t[3:])))
                u = min(u, v)
            elif t.startswith("l_") and t.find("|") >= 0:
                v = id2score_get(t)
                u = min(u, v)
        return u
    return bigtoken_frequency, frequent_truncated

def bigtoken_includes_identifier_or_literal(bigToken):
    for t in bigToken.split("\n"):
        if t.startswith("id|") or \
                t.startswith("l_") and t.find("|") >= 0:
            return True
    return False

def bigtoken_index_subseq_prev_frequency_iter(bigtokenSeq, minLen, bigtoken_frequency):
    bigtoken_frequency = dict(( bt, bigtoken_frequency(bt) ) for bt in bigtokenSeq).__getitem__
    r = []
    for beginPos, endPos in bigtokenseq_defblock_body_range_iter(bigtokenSeq):
        bwiSeq = filter(lambda bwi: bigtoken_includes_identifier_or_literal(bwi) and bigtoken_frequency(bwi) != 0,
                (BWI.build(i, bigtokenSeq[i]) for i in xrange(beginPos, endPos)))
        for i in xrange(0, len(bwiSeq) - minLen):
            bwiSubseq = bwiSeq[i : i + minLen]
            assert len(bwiSubseq) == minLen
            bwiPrev = BWI.build(-1, '') if i == 0 else bwiSeq[i - 1]
            freq = max(map(bigtoken_frequency, bwiSubseq))
            r.append(( bwiPrev.index, bwiSubseq[-1].index, bwiSubseq[0].index, freq, 
                    map(BWI.to_original, bwiSubseq), bwiPrev.to_original() ))
            while True:
                prevFreq = freq
                if bigtoken_frequency(bwiSeq[i]) < prevFreq:
                    break # while True
                bwiSubseq = []
                for bwi in bwiSeq[i:]:
                    if bigtoken_frequency(bwi) < prevFreq:
                        bwiSubseq.append(bwi)
                    if len(bwiSubseq) == minLen: break # for bwi
                if len(bwiSubseq) < minLen:
                    break # while True
                bwiPrevCands = bwiSeq[:i]
                while bwiPrevCands and not (bigtoken_frequency(bwiPrevCands[-1]) < prevFreq):
                    bwiPrevCands.pop()
                bwiPrev = bwiPrevCands[-1] if bwiPrevCands else BWI.build(-1, '')
                freq = max(map(bigtoken_frequency, bwiSubseq))
                assert freq < prevFreq
                r.append(( bwiPrev.index, bwiSubseq[-1].index, bwiSubseq[0].index, freq, 
                        map(BWI.to_original, bwiSubseq), bwiPrev.to_original() ))
    
    if r:
        def keyfunc(item):
            tiPrev, tiLast, tiFirst, _, _, _ = item
            return tiFirst, tiLast, -tiPrev
        r.sort(key=keyfunc)
        yield tuple(r[0][2:]) # tiFirst, freq, btsubseq, prev
        for prevItem, item in zip(r, r[1:]):
            if prevItem[1:3] == item[1:3]: continue
            yield tuple(item[2:]) # tiFirst, freq, btsubseq, prev

def bigtoken_frequent_truncated(bigToken, freq, frequent_truncated):
    assert not(bigtoken_is_brace(bigToken) or bigtoken_is_reserved_word(bigToken))
    return "\n".join(frequent_truncated(t, freq) for t in bigToken.split("\n"))

def bigtoken_index_seq(bigtokenSeq, startPos, minLen, freq, 
        bigtoken_frequency):
    indices = []
    for i in xrange(startPos, len(bigtokenSeq)):
        bf = bigtoken_frequency(bigtokenSeq[i]) 
        if bf != 0 and bf <= freq:
            indices.append(i)
            if len(indices) == minLen: return indices
    assert False

def remove_nonmaximal_clones(buckets, validKeys):
    def value_iter(bucket):
        for _, values in bucket.iteritems():
            for fi_ti_freq in values: yield fi_ti_freq
            
    lowestUniquenessForEachCloneSet = dict() # tuple (fi, ti) -> (freq, clonesetKey)
    for k in validKeys:
        bucket = buckets[k]
        fiTis = [( fi, ti ) for fi, ti, _ in value_iter(bucket)]
        clonesetKey = tuple(itertools.chain(*sorted(fiTis)))
        maxfreq = max(freq for _, _, freq in value_iter(bucket))
        prevMaxfreq, _ = lowestUniquenessForEachCloneSet.get(clonesetKey, ( 0, None ))
        if maxfreq > prevMaxfreq:
            lowestUniquenessForEachCloneSet[clonesetKey] = maxfreq, k
        else:
            assert maxfreq != prevMaxfreq
            bucket.clear()
    maxfreqKeys = [key for _, key in lowestUniquenessForEachCloneSet.itervalues()]; maxfreqKeys.sort()
    return maxfreqKeys

def detect_cloneclass(filePrepTokenSeqs, minLen, 
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq, localTruncate=LOCAL_TRUNCATE, literalTruncate=LITERAL_TRUNCATE) # 2009/08/31
#        tokenSeq[:] = tokenseq_normalized(tokenSeq, localTruncate=False, literalTruncate=False)
    
    log_write and log_write("log> calculate token uniqueness...\n")
    bigtoken_frequency, frequent_truncated = build_token_uniqueness_functions(filePrepTokenSeqs)
    
    log_write and log_write("log> bucket sorting...\n")
    buckets = dict() # md5 -> bigtoken -> list of tuple int, int, int # fi, ti, freq
    #def pretty_bt(bt):
    #    bt = bt.replace("|","")
    #    bt = bt.replace("\n",",")
    #    return bt[:-1]
    for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
        #print "fi=",fi
        bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
        for ti, freq, btsubseq, lastToken in bigtoken_index_subseq_prev_frequency_iter(bigtokenSeq, minLen, bigtoken_frequency):
            tbtsubseq = map(lambda bt: bigtoken_frequent_truncated(bt, freq, frequent_truncated), btsubseq)
            # debug code
            #for tbti, tbt in enumerate(tbtsubseq):
            #    if not(tbt.find("id|") >= 0 or tbt.find("l_") >= 0):
            #        print "here"
            #    assert tbt.find("id|") >= 0 or tbt.find("l_") >= 0
            buckets_setitem(buckets, tbtsubseq, lastToken, ( fi, ti, freq ))
    
    log_write and log_write("log> removing unpaired fragments...\n")
    buckets_del_nocloneclass_entries(buckets)
    validKeys = buckets.keys(); validKeys.sort()
    
    log_write and log_write("log> removing nonmaximal clone sets...\n")
    maxfreqKeys = remove_nonmaximal_clones(buckets, validKeys)
    
    fileIndexConvertTables = []
    fileBigtokenSeqs = []
    for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
        bigtokenSeq, fileBigtokenSeq = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
        fileBigtokenSeqs.append(bigtokenSeq)
        fileIndexConvertTables.append(fileBigtokenSeq)

    def cloneclass_iter():
        for k in maxfreqKeys:
            bucket = buckets[k]
            cc = []
            for preced, values in sorted(bucket.iteritems()):
                cc_item = []
                for fi, ti, maxfreq in values:
                    #debug if fi == 1174: print "fi=%d,ti=%d,k=%s" % (fi, ti, k)
                    tbl = fileIndexConvertTables[fi]
                    bigtokenSeq = fileBigtokenSeqs[fi]
                    bigtokenIndexRanges = indices_to_ranges(bigtoken_index_seq(bigtokenSeq, ti, minLen, maxfreq, 
                                bigtoken_frequency))
                    tokenIndexRanges = [(tbl[begin], tbl[end] - tbl[begin]) for begin, end in bigtokenIndexRanges]
                    cc_item.append(( fi, tokenIndexRanges ))
                if preced: cc.append(cc_item)
                else: cc.extend([ cf ] for cf in cc_item) # if the preced == '', each pair of the item should be regarded as a clone pair
            yield cc # list of list of ( fi, list of ( beginIndex, endIndex ) )
    
    return cloneclass_iter

def count_valid_tokens(filePrepTokenSeqs, minLen, 
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq, localTruncate=LOCAL_TRUNCATE, literalTruncate=LITERAL_TRUNCATE) # 2009/08/31
    
    bigtoken_frequency0, frequent_truncated = build_token_uniqueness_functions(filePrepTokenSeqs)
    
    def fi_validtokens_iter():
        for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
            bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
            bigtoken_frequency = dict(( bt, bigtoken_frequency0(bt) ) for bt in bigtokenSeq).__getitem__
            filePositionSet = set()
            for beginPos, endPos in bigtokenseq_defblock_body_range_iter(bigtokenSeq):
                bwiSeq = filter(lambda bwi: bigtoken_frequency(bwi) != 0,
                        (BWI.build(i, bigtokenSeq[i]) for i in xrange(beginPos, endPos)))
                filePositionSet.update(bwi.index for bwi in bwiSeq)
            yield fi, len(filePositionSet)
    
    return fi_validtokens_iter
    
def count_candidates(filePrepTokenSeqs, minLen, 
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq, localTruncate=LOCAL_TRUNCATE, literalTruncate=LITERAL_TRUNCATE) # 2009/08/31
    
    bigtoken_frequency, frequent_truncated = build_token_uniqueness_functions(filePrepTokenSeqs)
    
    def fi_candidates_iter():
        for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
            bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
            count = len(list(bigtoken_index_subseq_prev_frequency_iter(bigtokenSeq, minLen, bigtoken_frequency)))
            yield fi, count
    
    return fi_candidates_iter

def count_tokenset(filePrepTokenSeqs, minLen, 
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq, localTruncate=LOCAL_TRUNCATE, literalTruncate=LITERAL_TRUNCATE) # 2009/08/31
    
    bigtoken_frequency0, frequent_truncated = build_token_uniqueness_functions(filePrepTokenSeqs)
    
    def fi_validtokens_iter():
        tokenSet = set()
        for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
            bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
            bigtoken_frequency = dict(( bt, bigtoken_frequency0(bt) ) for bt in bigtokenSeq).__getitem__
            fileTokenSet = set()
            for beginPos, endPos in bigtokenseq_defblock_body_range_iter(bigtokenSeq):
                bwiSeq = filter(lambda bwi: bigtoken_frequency(bwi) != 0,
                        (BWI.build(i, bigtokenSeq[i]) for i in xrange(beginPos, endPos)))
                fileTokenSet.update(bwi.original for bwi in bwiSeq)
            yield fi, len(fileTokenSet)
            tokenSet.update(fileTokenSet)
        yield -1, len(tokenSet)
    
    return fi_validtokens_iter
    
