try:
    import psyco; psyco.full()
except: pass

import sys, itertools

try: __import__('_prepscript_util', globals(), locals(), [], -1)
except ImportError:
    raise SystemError("add the quartet's directory to the PYTHONPATH")

from _prepscript_util import calc_md5
from _token_util import *
import cldt_datamanip as dm

__usage = """
Usage: cldt_tomaximal OPTIONS datafile
  Converts a clone data file of same-length clones into to maximal clones.
Option
  -o output: output file
"""[1:-1]

def __iteritem_destroy(d):
    while d: yield d.popitem()

def __read_prep_tokens(pathStr, checksumStr):
    tokenStrs = []
    f = file(pathStr + ".prep")
    for L in f:
        if not L or L[0] == '#': continue
        tokenStrs.append(L.rstrip().split("\t")[2])
    f.close()
    return tokenseq_normalized(tokenStrs)

def __build_file_tables(fileDataTable):
    tbls = convTbl, recovTbl, btseqTbl = dict(), dict(), dict() 
    for fileID, data in sorted(fileDataTable.iteritems()):
        pathStr, _, checksumStr = data
        seq = __read_prep_tokens(pathStr, checksumStr)
        bigtokenSeq, indexConversionTable = build_bigtoken_and_index_conversion_table(seq) # indexConversionTable: bigtoken index -> token index
        indexRecoverTable = build_index_recover_table(indexConversionTable) # indexRecoverTable: token index -> bigtoken index
        assert len(seq) == len(indexRecoverTable)
        convTbl[fileID] = indexConversionTable
        recovTbl[fileID] = indexRecoverTable
        btseqTbl[fileID] = bigtokenSeq
    return tbls

def build_index_recover_table(indexConversionTable):
    indexRecoverTable = [] # toke index -> bigtoken index
    for bi, ( tiCur, tiNext ) in enumerate(zip(indexConversionTable, indexConversionTable[1:])):
        assert tiNext > tiCur
        indexRecoverTable.extend(bi for _ in xrange(tiCur, tiNext))
    return indexRecoverTable

def convert_position(pos, indexConvertTable):
    startIndex, length = pos
    convertedStartIndex = indexConvertTable[startIndex]
        
    endIndex = startIndex + length
    if endIndex < len(indexConvertTable):
        convertedEndIndex = indexConvertTable[endIndex]
    else:
        convertedEndIndex = indexConvertTable[-1] + 1
    return convertedStartIndex, convertedEndIndex - convertedStartIndex
    
def __get_tailbt(cand): return cand[0]
def __get_tailbt_and_precedbt(cand): return cand[:2]
def __tocls(cands):
    return [[tuple(cand[2:5]) for cand in g] \
            for tp, g in itertools.groupby(cands, key=__get_tailbt_and_precedbt)]
__onlyNoneSet = set([ None ])
    
def __emc_i(cands):
    assert cands
    if len(cands) > 1:
        while True:
            for cand in cands:
                cand[4] += 1 # update 'end' value
                end, seq = cand[-2:]
                cand[0] = seq[end] if end < len(seq) else None # update 'tailbt' value
            tailbtSet = set(itertools.imap(__get_tailbt, cands))
            if tailbtSet == __onlyNoneSet: 
                yield __tocls(cands)
                break # while True
            if len(tailbtSet) > 1:
                cands.sort(key=__get_tailbt)
                yield __tocls(cands)
                for tailbt, g in itertools.groupby(cands, key=__get_tailbt):
                    for cls in __emc_i(list(g)): yield cls
                break # while True

def extract_maximal_clone_iter(fidBtStarts, fileBigtokenSeqTable):
    cands = [[None, None, fileID, btStartIndex, btStartIndex, fileBigtokenSeqTable[fileID]] \
            for fileID, btStartIndex in fidBtStarts]
    # tailbt, precedbt, fid, beg, end, seq
    for cand in cands:
        _, _, _, beg, _, seq = cand
        cand[1] = seq[beg - 1] if beg > 0 else None

    for maximalClass in __emc_i(cands): yield maximalClass

def bigtoken_index_seq_byrange(bigtokenSeq, startPos, endPos):
    return [ti for ti in xrange(startPos, endPos) if bigtoken_is_expr(bigtokenSeq[ti])]  

if __name__ == '__main__':
    maxint = sys.maxint
    
    dataFileName = None
    outputFileName = None
    
    if len(sys.argv) == 1:
        print __usage
        sys.exit(0)
    i = 1
    while i < len(sys.argv):
        arg = sys.argv[i]
        if arg.startswith("-") and arg != "-":
            if arg == "-h":
                print __usage
                sys.exit(0)
            elif arg == "-o":
                outputFileName = sys.argv[i + 1]
                i += 1
            else:
                raise SystemError("unknown option: %s" % arg)
        else:
            if not dataFileName:
                dataFileName = arg
            else:
                raise SystemError("too many command-line arguments")
        i += 1
    
    outputFile = open(outputFileName, "w") if outputFileName else sys.stdout
    output_write = outputFile.write
    
    def commentSink(comment):
        if comment.startswith("#gap by:"):
            raise SystemError("clonedata is not detected with basic detection method")
        if comment == "#cldt_tolinenum":
            raise SystemError("clonedata must be in token-index format, not line-number format")
        output_write("%s\n" % comment)
    
    fileDataTable = dict() # fileID -> (pathStr, length, checksumStr)
    def fileSink(fileID, pathStr, length, checksumStr):
        fileDataTable[fileID] = ( pathStr, length, checksumStr )
    
    ftbls = fileIndexConversionTable, fileIndexRecoverTable, fileBigtokenSeqTable = dict(), dict(), dict()
    # fileIndexConversionTable: fileID -> indexConversionTable
    # fileIndexRecoverTable: fileID -> indexRecoverTable
    # fileBigtokenSeqTable: fileID -> bigtokenSeq
    def cloneSink(cloneClass):
        if not fileIndexConversionTable:
            output_write("#cldt_tomaximal\n")
            output_write("files:\n")
            for fileID, data in sorted(fileDataTable.iteritems()):
                pathStr, length, checksumStr = data 
                output_write("%d\t%s\t%d\t%s\n" % ( fileID, pathStr, length, checksumStr ))
                prepPathStr = pathStr + ".prep"
                if calc_md5(prepPathStr) != checksumStr:
                    raise SystemError("checksum mismatch for a preprocessed file (perhaps clonedata is obsolute): %s" % prepPathStr)
            output_write("clone classes:\n")
        
            for ft, t in zip(ftbls, __build_file_tables(fileDataTable)): ft.update(__iteritem_destroy(t))
        
        candFragments = list(itertools.chain(*cloneClass))
        
        fidBtStarts = []
        for fileID, positions in candFragments:
            indexRecoverTable = fileIndexRecoverTable[fileID]
            btFirstSegment = convert_position(positions[0], indexRecoverTable)
            #btLastSegment = convert_position(positions[-1], indexRecoverTable)
            btStartIndex = btFirstSegment[0]
            #btEndIndex = btLastSegment[0] + btLastSegment[1]
            #btLength = btEndIndex - btStartIndex
            fidBtStarts.append(( fileID, btStartIndex ))
        
        for mxClass in extract_maximal_clone_iter(fidBtStarts, fileBigtokenSeqTable): # fidBtStarts: list of fileID, btStartIndex, btEndIndex
            output_write("{\n")
            for i, mxSubclass in enumerate(mxClass):
                if i > 0: output_write("--\n")
                for fileID, beg, end in mxSubclass:
                    tbl = fileIndexConversionTable[fileID]
                    bigtokenIndexRanges = indices_to_ranges(bigtoken_index_seq_byrange(fileBigtokenSeqTable[fileID], beg, end))
                    tokenIndexRanges = [(tbl[begin], tbl[end] - tbl[begin]) for begin, end in bigtokenIndexRanges]
                    output_write("%d\t%s\n" % ( fileID, ",".join("%d+%d" % (pos+1, length) for pos, length in tokenIndexRanges) ))
            output_write("}\n")
    
    dataFile = open(dataFileName) if dataFileName != '-' else sys.stdin
    dm.scan_clone_data_file(dataFile, commentSink, fileSink, cloneSink)
    if dataFile is not sys.stdin: dataFile.close()
    
    if outputFile is not sys.stdout: outputFile.close()
