from _token_util import *

def bigtoken_index_subseq_prev_iter(bigtokenSeq, minLen):
    for defblockBeginPos, defblockEndPos in bigtokenseq_defblock_body_range_iter(bigtokenSeq):
        for startPos in xrange(defblockBeginPos, defblockEndPos - minLen):
            bigtokenSubseq = []
            if not bigtoken_is_expr(bigtokenSeq[startPos]): continue # for startPos
            for ti in xrange(startPos, defblockEndPos):
                bigToken = bigtokenSeq[ti]
                if bigtoken_is_expr(bigToken):
                    bigtokenSubseq.append(bigToken)
                    if len(bigtokenSubseq) >= minLen:
                        break # for ti
            else:
                continue # for startPos
            for ti in xrange(startPos - 1, defblockBeginPos - 1, -1):
                bigToken = bigtokenSeq[ti]
                if bigtoken_is_expr(bigToken):
                    preced = bigToken
                    break # for ti
            else:
                preced = ''
            yield startPos, bigtokenSubseq, preced

def bigtoken_index_seq(bigtokenSeq, startPos, minLen):
    indexSeq = []
    for ti in xrange(startPos, len(bigtokenSeq)):
        if bigtoken_is_expr(bigtokenSeq[ti]):
            indexSeq.append(ti)
            if len(indexSeq) >= minLen: return indexSeq
    assert False

def detect_cloneclass(filePrepTokenSeqs, minLen, 
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    if workerThreads > 0:
        warning_write and warning_write("warning> no extra threads will be used\n")
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq)
    
    log_write and log_write("log> bucket sorting...\n")
    buckets = dict() # md5 -> bigtoken -> list of tuple int, int
    fileIndexConvertTables = list()
    bigtokenSeqs = list()
    for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
        bigtokenSeq, fileIndexConvertTable = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
        bigtokenSeqs.append(bigtokenSeq)
        fileIndexConvertTables.append(fileIndexConvertTable)
        for ti, bigtokenSubseq, preced in bigtoken_index_subseq_prev_iter(bigtokenSeq, minLen):
            buckets_setitem(buckets, bigtokenSubseq, preced, ( fi, ti ))
    
    log_write and log_write("log> removing unpaired fragments...\n")
    buckets_del_nocloneclass_entries(buckets)
    validKeys = buckets.keys(); validKeys.sort()
    
    def cloneclass_iter():
        for k in validKeys:
            bucket = buckets[k]
            cc = []
            for preced, values in sorted(bucket.iteritems()):
                cc_item = []
                for fi, ti in values:
                    tbl = fileIndexConvertTables[fi]
                    bigtokenSeq = bigtokenSeqs[fi]
                    bigtokenIndexRanges = indices_to_ranges(bigtoken_index_seq(bigtokenSeq, ti, minLen))
                    tokenIndexRanges = [(tbl[begin], tbl[end] - tbl[begin]) for begin, end in bigtokenIndexRanges]
                    cc_item.append(( fi, tokenIndexRanges ))
                if preced: cc.append(cc_item)
                else: cc.extend([ cf ] for cf in cc_item) # if the preced == '', each pair of the item should be regarded as a clone pair
            yield cc # list of list of ( fi, beginTokenIndex, endTokenIndex )
    
    return cloneclass_iter

def count_valid_tokens(filePrepTokenSeqs, minLen, 
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq)
    
    def fi_validtokens_iter():
        for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
            filePositionSet = set()
            bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
            for defblockBeginPos, defblockEndPos in bigtokenseq_defblock_body_range_iter(bigtokenSeq):
                filePositionSet.update(xrange(defblockBeginPos, defblockEndPos))
            yield fi, len(filePositionSet)
    
    return fi_validtokens_iter

def count_candidates(filePrepTokenSeqs, minLen, 
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq)
    
    def fi_candidates_iter():
        for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
            bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
            count = len(list(bigtoken_index_subseq_prev_iter(bigtokenSeq, minLen)))
            yield fi, count
    
    return fi_candidates_iter

def count_tokenset(filePrepTokenSeqs, minLen, 
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq)
    
    def fi_validtokens_iter():
        tokenSet = set()
        for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
            fileTokenSet = set()
            bigtokenSeq, _ = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
            for ti, bigtokenSubseq, preced in bigtoken_index_subseq_prev_iter(bigtokenSeq, minLen):
                fileTokenSet.update(bigtokenSubseq)
            yield fi, len(fileTokenSet)
            tokenSet.update(fileTokenSet)
        yield -1, len(tokenSet)
    
    return fi_validtokens_iter
