from collections import defaultdict

import apriori
from _token_util import *

def extract_grams(bigtokenSeq, beginPos, endPos, gramLen):
    gs = []; g = []
    ps = []; p = []
    for pos in xrange(beginPos, endPos):
        bt = bigtokenSeq[pos]
        if not bigtoken_is_expr(bt): continue # for pos
        if bt in g: continue
        g.append(bt)
        p.append(pos)
        if len(g) == gramLen:
            gs.append(tuple(g)); g.pop(0)
            ps.append(tuple(p)); p.pop(0)
    return gs, ps

def bigtoken_beginindex_endindex_transaction_iter(bigtokenSeq, gramLen):
    for defblockBeginPos, defblockEndPos in bigtokenseq_defblock_body_range_iter(bigtokenSeq):
        grams = extract_grams(bigtokenSeq, defblockBeginPos, defblockEndPos, gramLen)[0]
        yield defblockBeginPos, defblockEndPos, sorted(set(grams))

def detect_cloneclass(filePrepTokenSeqs, minLen, gramLen,
        log_write=None, warning_write=None, workerThreads=0): # will modify filePrepTokenSeqs
    assert gramLen <= minLen
    if workerThreads > 0:
        warning_write and warning_write("warning> no extra threads will be used\n")
    for tokenSeq in filePrepTokenSeqs:
        tokenSeq[:] = tokenseq_normalized(tokenSeq, localTruncate=True, functionTruncate=False, literalTruncate=True)
        
    log_write and log_write("log> building bigtoken orders...\n")
    fileIndexConvertTables = list()
    bigtokenSeqs = list()
    bigtokenSet = set()
    for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
        bigtokenSeq, fileIndexConvertTable = build_bigtoken_and_index_conversion_table(filePrepTokenSeq)
        bigtokenSeqs.append(bigtokenSeq)
        fileIndexConvertTables.append(fileIndexConvertTable)
        bigtokenSet.update(bigtokenSeq)
    bigtokens = sorted(bigtokenSet)
    
    log_write and log_write("log> finding useless items for transactions...\n")
    bigtokenToOrderFunc = dict(( bt, i ) for i, bt in enumerate(bigtokens)).__getitem__
    #def gramHash(gram): return tuple(sorted(map(bigtokenToOrderFunc, gram)))
    def gramHash(gram): return hash(tuple(sorted(map(bigtokenToOrderFunc, gram))))
    
    hgramToFiSet = defaultdict(set)
    for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
        bigtokenSeq = bigtokenSeqs[fi]
        for tiBegin, tiEnd, transaction in bigtoken_beginindex_endindex_transaction_iter(bigtokenSeq, gramLen):
            if len(transaction) >= minLen - gramLen:
                hgrams = map(gramHash, transaction)
                for hgram in hgrams:
                    hgramToFiSet[hgram].add(fi)
    onlyOnceAppearSet = set(hgram for hgram, fiSet in hgramToFiSet.iteritems() if len(fiSet) <= 1)
    
    hgramHavingSimilarOneSet = set()
    fiSetToHgramsTable = defaultdict(list)
    for hgram, fiSet in sorted(hgramToFiSet.iteritems()):
        fiSetToHgramsTable[frozenset(fiSet)].append(hgram)
    for _, hgrams in fiSetToHgramsTable.iteritems():
        hgramHavingSimilarOneSet.update(hgrams[1:])
    
    log_write and log_write("log> building transactions...\n")
    transactionFiTis = list()
    transactions = list()
    countDisposed = 0
    for fi, filePrepTokenSeq in enumerate(filePrepTokenSeqs):
        bigtokenSeq = bigtokenSeqs[fi]
        for tiBegin, tiEnd, transaction in bigtoken_beginindex_endindex_transaction_iter(bigtokenSeq, gramLen):
            if len(transaction) >= minLen - gramLen:
                hgrams = filter(lambda hgram: hgram not in onlyOnceAppearSet and hgram not in hgramHavingSimilarOneSet, 
                        map(gramHash, transaction))
                if hgrams:
                    transactionFiTis.append(( fi, tiBegin, tiEnd ))
                    transactions.append(frozenset(hgrams))
                else:
                    countDisposed += 1
    assert len(transactionFiTis) == len(transactions)
    log_write and log_write("log> vaild transactions: %d\n" % len(transactions))
    log_write and log_write("log> dropped transactions: %d\n" % countDisposed)
    
    log_write and log_write("log> building frequent item sets...\n")
    frequentItemsetToSupportTable = apriori.build_size_n_frequent_itemsets_depthfirst(minLen - gramLen, transactions, 2, 
            log_write=log_write)
    
    if not frequentItemsetToSupportTable:
        def cloneclass_iter():
            yield []
        return cloneclass_iter
    
    frequentItemsetToSupportingTransactionsTable = apriori.calc_itemset_to_supporting_transactions_table( \
            frequentItemsetToSupportTable.keys(), transactions)
    
    supportingTransactionToOneFrequentItemset = dict()
    for frequentItemset, sts in sorted(frequentItemsetToSupportingTransactionsTable.iteritems()):
        tupleSts = tuple(sts)
        if tupleSts not in supportingTransactionToOneFrequentItemset:
            supportingTransactionToOneFrequentItemset[tupleSts] = frequentItemset
    
    def cloneclass_iter():
        for tupleSts, frequentItemset in sorted(supportingTransactionToOneFrequentItemset.iteritems()):
            cc = []
            for transactionIndex in tupleSts:
                cc_item = []
                fi, tiBegin, tiEnd = transactionFiTis[transactionIndex]
                bigtokenSeq = bigtokenSeqs[fi]
                transaction = transactions[transactionIndex]
                tbl = fileIndexConvertTables[fi]
                grams, positions = extract_grams(bigtokenSeq, tiBegin, tiEnd, gramLen)
                bigtokenIndexSet = set()
                for i, gram in enumerate(grams):
                    hgram = gramHash(gram)
                    if hgram in transaction:
                        bigtokenIndexSet.update(positions[i])
                bigtokenIndices = sorted(bigtokenIndexSet)
                bigtokenIndexRanges = indices_to_ranges(bigtokenIndices)
                tokenIndexRanges = [(tbl[begin], tbl[end] - tbl[begin]) for begin, end in bigtokenIndexRanges]
                cc_item.append(( fi, tokenIndexRanges ))
                cc.append(cc_item)
            yield cc # list of list of ( fi, beginTokenIndex, endTokenIndex )
    
    return cloneclass_iter
   
