import itertools

from _token_util import *

__justjoin = ''.join

def bigtoken_indices_hop_subseq_prev_iter(bigtokenSeq, minLen):
    bigtokenToIdSetTable = dict()
    for bigToken in bigtokenSeq:
        if bigToken in bigtokenToIdSetTable: continue
        s = set()
        if bigtoken_is_brace(bigToken) or bigtoken_is_reserved_word(bigToken):
            pass
        else:
            s.update(t  for t in bigToken.split("\n") if t.startswith("id|"))
            s.discard("id|")
            if s:
                bigtokenToIdSetTable[bigToken] = s
    id2PosSetTable = dict()
    for ti, bigToken in enumerate(bigtokenSeq):
        idSet = bigtokenToIdSetTable.get(bigToken, None)
        if idSet:
            for id in idSet:
                id2PosSetTable.setdefault(id, set()).add(ti)
    
    def hop_bwisubseq_bwiprev_iter(bigtokenSeq, minLen):
        for defblockBeginPos, defblockEndPos in bigtokenseq_defblock_body_range_iter(bigtokenSeq):
            for startPos in xrange(defblockBeginPos, defblockEndPos - minLen):
                curIncludingIdSet = bigtokenToIdSetTable.get(bigtokenSeq[startPos], None)
                if not curIncludingIdSet: continue
                curPosSet = set([ startPos ])
                for curHop in xrange(1, defblockEndPos - startPos):
                    newPosSet = set()
                    for id in curIncludingIdSet:
                        newPosSet.update(p for p in id2PosSetTable.get(id, []) if defblockBeginPos <= p < defblockEndPos)
                    newPosSet -= curPosSet
                    if not newPosSet: break # for curHop
                    curPosSet.update(newPosSet)
                    
                    posses = sorted(curPosSet)
                    prevPosses = filter(lambda p: p < startPos, posses)
                    subseqPosses = posses[len(prevPosses):]
                    if len(subseqPosses) >= minLen:
                        subseqPosses = subseqPosses[:minLen]
                        if prevPosses:
                            prev_1 = prevPosses[-1]
                            yield curHop, [BWI.build(p, bigtokenSeq[p]) for p in subseqPosses], BWI.build(prev_1, bigtokenSeq[prev_1])
                        else:
                            yield curHop, [BWI.build(p, bigtokenSeq[p]) for p in subseqPosses], BWI.build(-1, "")
                    
                    newIncludingIdSet = set()
                    for p in newPosSet:
                        newIncludingIdSet.update(bigtokenToIdSetTable.get(bigtokenSeq[p], []))
                    newIncludingIdSet -= curIncludingIdSet
                    if not newIncludingIdSet: break # for curPos
                    curIncludingIdSet.update(newIncludingIdSet)
    
    r = []
    for hop, bwiSubseq, bwiPrev in hop_bwisubseq_bwiprev_iter(bigtokenSeq, minLen):
        r.append(( bwiPrev.index, tuple(bwi.index for bwi in bwiSubseq), hop, 
                map(BWI.to_original, bwiSubseq), bwiPrev.to_original() ))
    if r:
        def keyfunc(item):
            #tiPrev, tindices, hop, btsubseq, prev = item
            tiPrev, tindices, hop, _, _ = item
            return tindices, -tiPrev, -hop # prefer larger tiPrev and larger hop
        r.sort(key=keyfunc)
        yield tuple(r[0][1:])
        for prevItem, item in zip(r, r[1:]):
            #tiPrev, tindices, hop, btsubseq, prev = item
            if prevItem[1] == item[1]: continue
            yield tuple(item[1:])
        
def remove_nonmaximal_clones(buckets, validKeys):
    def value_iter(bucket):
        for _, values in bucket.iteritems():
            for fi_indices_hop in values: yield fi_indices_hop
            
    maxHopHorEachCloneSet = dict() # tuple (fi, indices) -> (hop, clonesetKey)
    for k in validKeys:
        bucket = buckets[k]
        fiTis = [( fi, indices[0] ) for fi, indices, _ in value_iter(bucket)]
        clonesetKey = tuple(itertools.chain(*sorted(fiTis)))
        maxhop = max(hop for _, _, hop in value_iter(bucket))
        prevMaxhop, _ = maxHopHorEachCloneSet.get(clonesetKey, ( 0, None ))
        if maxhop > prevMaxhop:
            maxHopHorEachCloneSet[clonesetKey] = maxhop, k
        else:
            assert maxhop != prevMaxhop
            bucket.clear()
    maxhopKeys = [key for _, key in maxHopHorEachCloneSet.itervalues()]; maxhopKeys.sort()
    return maxhopKeys

STEPWISE_BUCKET_SORTING = True

def detect_cloneclass(filePrepTokenSeqs, minLen, 
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq, localTruncate=False, functionTruncate=True)
    
    if STEPWISE_BUCKET_SORTING:
        log_write and log_write("log> bucket identifying...\n")
        buckets = dict() # md5 -> bigtoken -> list of tuple fi, indices, hop
    
        for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
            bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
            for indices, hop, btsubseq, lastToken in bigtoken_indices_hop_subseq_prev_iter(bigtokenSeq, minLen):
                buckets_setitem(buckets, btsubseq, lastToken, None)
    
        log_write and log_write("log> removing unpaired fragments...\n")
        buckets_del_nocloneclass_entries(buckets)
        validKeys = buckets.keys(); validKeys.sort()
        
        log_write and log_write("log> bucket sorting...\n")
        buckets = dict((k, dict()) for k in validKeys) # md5 -> bigtoken -> list of tuple fi, indices, hop
        for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
            bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
            for indices, hop, btsubseq, lastToken in bigtoken_indices_hop_subseq_prev_iter(bigtokenSeq, minLen):
                v = buckets.get(bigtokenseq_hash(btsubseq), None)
                if isinstance(v, dict):
                    v.setdefault(lastToken, []).append(( fi, indices, hop ))
    else:
        log_write and log_write("warn> no step-wise bucket sorting\n")
        log_write and log_write("log> bucket sorting...\n")
        buckets = dict() # md5 -> bigtoken -> list of tuple int, int, int # fi, ti, lowu
        for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
            bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
            for indices, hop, btsubseq, lastToken in bigtoken_indices_hop_subseq_prev_iter(bigtokenSeq, minLen):
                buckets_setitem(buckets, btsubseq, lastToken, ( fi, indices, hop ))
        
        log_write and log_write("log> removing unpaired fragments...\n")
        buckets_del_nocloneclass_entries(buckets)
        validKeys = buckets.keys(); validKeys.sort()

    log_write and log_write("log> removing nonmaximal clone sets...\n")
    maxhopKeys = remove_nonmaximal_clones(buckets, validKeys)
    
    fileIndexConvertTables = []
    fileBigtokenSeqs = []
    for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
        bigtokenSeq, fileBigtokenSeq = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
        fileBigtokenSeqs.append(bigtokenSeq)
        fileIndexConvertTables.append(fileBigtokenSeq)

    def cloneclass_iter():
        for k in maxhopKeys:
            bucket = buckets[k]
            cc = []
            for preced, values in sorted(bucket.iteritems()):
                cc_item = []
                for fi, indices, _ in values:
                    tbl = fileIndexConvertTables[fi]
                    bigtokenSeq = fileBigtokenSeqs[fi]
                    bigtokenIndexRanges = indices_to_ranges(indices)
                    tokenIndexRanges = [(tbl[begin], tbl[end] - tbl[begin]) for begin, end in bigtokenIndexRanges]
                    cc_item.append(( fi, tokenIndexRanges ))
                if preced: cc.append(cc_item)
                else: cc.extend([ cf ] for cf in cc_item) # if the preced == '', each pair of the item should be regarded as a clone pair
            yield cc # list of list of ( fi, list of ( beginIndex, endIndex ) )
    
    return cloneclass_iter

def count_valid_tokens(filePrepTokenSeqs, minLen, 
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq, localTruncate=False, functionTruncate=True)
    
    def fi_validtokens_iter():
        for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
            bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
            filePositionSet = set()
            for indices, hop, btsubseq, lastToken in bigtoken_indices_hop_subseq_prev_iter(bigtokenSeq, minLen):
                filePositionSet.update(indices)
            yield fi, len(filePositionSet)
    
    return fi_validtokens_iter

def count_candidates(filePrepTokenSeqs, minLen, 
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq, localTruncate=False, functionTruncate=True)
    
    def fi_candidates_iter():
        for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
            bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
            count = len(list(bigtoken_indices_hop_subseq_prev_iter(bigtokenSeq, minLen)))
            yield fi, count
    
    return fi_candidates_iter

def count_tokenset(filePrepTokenSeqs, minLen, 
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq, localTruncate=False, functionTruncate=True)
    
    def fi_validtokens_iter():
        tokenSet = set()
        for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
            bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
            fileTokenSet = set()
            for indices, hop, btsubseq, lastToken in bigtoken_indices_hop_subseq_prev_iter(bigtokenSeq, minLen):
                fileTokenSet.update(btsubseq)
            yield fi, len(fileTokenSet)
            tokenSet.update(fileTokenSet)
        yield -1, len(tokenSet)
    
    return fi_validtokens_iter

