import hashlib, string
from sys import maxint

def tokenseq_normalized(tokenSeq, localTruncate=True, functionTruncate=False, literalTruncate=True):
    tokenSeq = list(tokenSeq)
    len_tokenSeq = len(tokenSeq)
    
    if localTruncate:
        if functionTruncate:
            def tobe_truncate(isFuncName, isGlobalThing): return not isGlobalThing
        else: 
            def tobe_truncate(isFuncName, isGlobalThing): return not (isFuncName or isGlobalThing)
    else:
        if functionTruncate:
            def tobe_truncate(isFuncName, isGlobalThing): return isFuncName
        else:
            def tobe_truncate(isFuncName, isGlobalThing): return False
    
    for i, t in enumerate(tokenSeq):
        if t.startswith("id|"):
            isFuncName = i + 1 < len_tokenSeq and tokenSeq[i + 1] == '(paren' or \
                i + 2 < len_tokenSeq and tokenSeq[i + 1] == 'c_func' and tokenSeq[i + 2] == '(paren'
            isGlobalThing = t[0] in string.ascii_uppercase
            if tobe_truncate(isFuncName, isGlobalThing):
                tokenSeq[i] = 'id|'
        elif literalTruncate and t.startswith("l_"):
            p = t.find("|")
            if p >= 0: tokenSeq[i] = t[:p + 1]
    return tokenSeq

def tokenseq_prev_normalized(tokenSeq, pos):
    if pos == 0:
        return ''
    return tokenSeq[pos - 1]

def bigtoken_pretty(bt):
    bt = bt.replace("|","")
    bt = bt.replace("\n",",")
    return bt[:-1]

__njoin = "\n".join

def build_bigtoken_and_index_conversion_table(filePrepTokenSeq):
    def build_tuplebigtokenseq(tokenSeq):
        r = []; r_append = r.append
        curBigToken = []; curBigToken_append = curBigToken.append
        nextColonIsSeparator = False
        def makeItIndependentBigToken():
            if curBigToken: r_append(tuple(curBigToken))
            r_append(( t, ))
            curBigToken[:] = []
        for t in tokenSeq:
            assert t # len(t) > 0
            if t in ( '(def_block', '(brace', ')def_block', ')brace', 
                    "r_if", "r_else", "r_while", "r_do", "r_try", "r_catch", "r_switch", "r_case" ): # 20090819
                makeItIndependentBigToken()
                nextColonIsSeparator = False
            elif t == "colon":
                if nextColonIsSeparator:
                    makeItIndependentBigToken()
                    nextColonIsSeparator = False
                else:
                    curBigToken_append(t)
            elif t == "suffix:semicolon":
                curBigToken_append(t)
                r_append(tuple(curBigToken))
                curBigToken[:] = []
                nextColonIsSeparator = False
            else:
                if t[0] == '(': curBigToken_append('[' + t[1:])
                elif t[0] == ')': curBigToken_append(']' + t[1:])
                else: curBigToken_append(t)
        else:
            if curBigToken: r_append(tuple(curBigToken))
            curBigToken[:] = []
        return r
    
    def build_bigtokenseqindex_to_tokenseqindex_table(bigtokenSeq):
        curPos = 0
        r = [ curPos ]; r_append = r.append
        for t in bigtokenSeq:
            curPos += len(t)
            r_append(curPos)
        return r
    
    def bigtoken_flatten(bigToken): return __njoin(bigToken) + "\n"

    tuplebigtokenSeq = build_tuplebigtokenseq(filePrepTokenSeq)
    tbl = build_bigtokenseqindex_to_tokenseqindex_table(tuplebigtokenSeq)
    assert len(tbl) == len(tuplebigtokenSeq) + 1
    assert tbl[-1] == len(filePrepTokenSeq)
    return map(bigtoken_flatten, tuplebigtokenSeq), tbl

def bigtoken_is_brace(bigToken):
    if not bigToken or bigToken.find('\n') != len(bigToken) - 1:
        return None
    bigToken0 = bigToken[0]
    if bigToken0 in ('(', ')'): return bigToken0
    return None

def bigtoken_is_reserved_word(bigToken):
    if not bigToken or bigToken.find('\n') != len(bigToken) - 1:
        return None
    return bigToken.startswith("r_")

def bigtokenseq_defblock_body_range_iter(bigtokenSeq):
    beginPos, endPos = None, None
    for ti, bt in enumerate(bigtokenSeq):
        if bt == "(def_block\n":
            beginPos = ti + 1
        elif bt == ")def_block\n":
            endPos = ti
            if beginPos:
                yield beginPos, endPos
            beginPos, endPos = None, None

__hashlib_md5 = hashlib.md5

def bigtokenseq_hash(bigtokenSeq):
    m = __hashlib_md5(); m.update(__njoin(bigtokenSeq) + "\n")
    return int(m.hexdigest(), 16)

__noPrevToken = ''

def bigtokenseq_prev_normalized(bigtokenSeq, pos):
    if pos == 0: return __noPrevToken
    prevBigToken = bigtokenSeq[pos - 1]
    if bigtoken_is_brace(prevBigToken) == '(':
        return __noPrevToken
    return prevBigToken

def bigtoken_is_expr(bigToken): 
    return bigToken and not bigtoken_is_brace(bigToken) and not bigtoken_is_reserved_word(bigToken)

class BWI(str):
    def __init__(self, s):
        self.original = s
    def __repr__(self):
        return "BWI(%s,%d)" % (self.original, self.index)
    def __str__(self):
        self.__repr__()
    def to_original(self):
        return self.original
    @staticmethod
    def build(index, s):
        bwi = BWI(s)
        bwi.index = index
        return bwi

def buckets_setitem(buckets, btsubseq, preced, item):
    h = bigtokenseq_hash(btsubseq)
    v = buckets.get(h, None)
    if not v:
        buckets[h] = ( preced, [ item ] )
    elif isinstance(v, tuple):
        prevPreced, prevItemList = v
        if prevPreced == preced:
            prevItemList.append(item)
        else:
            buckets[h] = { prevPreced : prevItemList, preced : [ item ] }
    else:
        v.setdefault(preced, []).append(item)

def buckets_del_nocloneclass_entries(buckets):
    invalidKeys = [k for k, bucket in buckets.iteritems() if not isinstance(bucket, dict)]
    for k in invalidKeys: del buckets[k]

def indices_to_ranges(indexSeq):
    s = [ [ -maxint, -maxint ] ]; s_append = s.append # s[0] is dummy
    for ti in indexSeq:
        if ti > s[-1][1]: # if not continuous
            s_append([ ti, ti + 1 ]) 
        else: s[-1][1] += 1
    assert s[0] == [ -maxint, -maxint ]
    return map(tuple, s[1:])

