# coding: utf-8

# 正规式 --> DFA

import sys
import Queue
import copy
from sys import stdin
from finite_automata import FA, FANode

key_words  = [ "int", "decimal", "string", "fuction", "return" ]
token_type = { 1:'integer', 2:'decimal', 3:'var', 4:'operator', 5: "string", 6: "left_bound", 7 : "right_bound", 100 : "key_word"}

#返回start-end之间的所有字母的list
def char_range(start, end):
    return [chr(x) for x in range(ord(start), ord(end) + 1)]

#读取文件,返回每行内容的一个迭代器
def file_reader(file_name):
    ##print file_name
    with open(file_name) as content:
        for line in content:
            #print line
            yield( list( line.split(" := ") ) )

def code_reader(file_name):
    all_code = []
    with open(file_name) as content:
        for line in content:
            all_code.append(line)
    return all_code


class Regexp(object):
    count = 0
    operation_word = ['+', '*', '(', ')', '|']
    extern_word = ["\+", "\*", "\)", "\(", "\|", "\-"]
    def __init__(self):
        self._cur_id = 0                                    # 各个结点的标号, 为的是调试能看
        self._fa = FA()
        self._cur_cache = {}
        self._cur_reg_exp = ""                              # _build_nfa 要处理的当前regexp这样些可以使得函数不必传str参数


    # 将形式为[a-c]的正则式展开以便方便转NFA
    def _unflod(self, exp_partten):              
        iter_ = 0
        cur = []                            # 当前为止的字符有哪些
        ret = []                            # 返回值，是一个形式为(a|b|c)字符串的list
        reg_token_begin = False             # 当前表达式是否为[a-c]形式

        while iter_ < len(exp_partten):
            if exp_partten[ iter_ ] == '[':
                cur = []
                reg_token_begin = True
            elif reg_token_begin:
                if exp_partten[iter_] == ']':
                    reg_token_begin = False
                    ret.append('(' + '|'.join( cur ) + ')')
                elif exp_partten[iter_-1] != '\\' and exp_partten[iter_] == '-':
                    iter_ += 1
                    begin_char = cur.pop()
                    end_char = exp_partten[ iter_ ]
                    cur.extend( char_range(begin_char, end_char) )
                else:
                    cur.append( exp_partten[iter_] )
            else:
                ret.append(exp_partten[iter_])        
            iter_ += 1
        return "".join(ret)
    
    def _is_leagal_exp(self, l = 0, r = 0):
        key = l * 100000 + r
        cur_str = self._cur_reg_exp[l:r]

        if r - l < 1: return False
        if key in self._cur_cache:
            return self._cur_cache[key]

        if r - l == 1 and cur_str not in Regexp.operation_word: 
            self._cur_cache[key] = True
            return True
        elif cur_str in Regexp.extern_word:
            self._cur_cache[key] = True
            return True

        if self._cur_reg_exp[r-1] in [ '*', '+' ]:                             
            if self._is_leagal_exp(l, r-1):
                self._cur_cache[key] = True
                return True

        if self._cur_reg_exp[l] == '(' and self._cur_reg_exp[r-1] == ')':
            if self._is_leagal_exp(l+1, r-1):
                self._cur_cache[key] = True
                return True

        for it in range(l+1, r):          
            if self._cur_reg_exp[it] == '|':
                r_leagal = self._is_leagal_exp(it+1, r)
                if self._is_leagal_exp(l, it) and r_leagal:
                    self._cur_cache[key] = True
                    return True

        for it in range(l+1, r):
            r_leagal = self._is_leagal_exp(it, r)
            if self._is_leagal_exp(l, it) and r_leagal:
                self._cur_cache[key] = True
                return True

        self._cur_cache[key] = False
        return False

    # reg_txt --> DFA 递归构建语法树
    def _build_nfa(self, l = 0, r = 0, cur_type = 0):
        cur_str = self._cur_reg_exp[l:r]

        #非空叶子结点
        if r - l == 1:
            nfa = FA()
            nfa.nfa_end.type = cur_type
            nfa.nfa_start.type = cur_type
            nfa.nfa_end.is_end_state = True
            nfa.nfa_start.next[cur_str] = [ nfa.nfa_end ]
            return nfa
        
        #非空叶子结点，这里是转义的情况
        if r - l == 2:
            if cur_str in Regexp.extern_word:
                cur_str = self._cur_reg_exp[r-1]
                nfa = FA()
                nfa.nfa_end.type = cur_type
                nfa.nfa_start.type = cur_type
                nfa.nfa_end.is_end_state = True
                nfa.nfa_start.next[cur_str] = [ nfa.nfa_end ]
                return nfa 

        # r* 情况的结点处理
        if self._cur_reg_exp[r-1] == '*':                     
            if self._is_leagal_exp(l, r-1):
                nfa = FA()
                nfa.nfa_end.type = cur_type
                nfa.nfa_start.type = cur_type
                nfa_pre = self._build_nfa(l, r-1, cur_type)
                nfa.nfa_end.is_end_state = True
                nfa_pre.nfa_end.is_end_state = False               
                nfa.nfa_start.next["eps"] = [ nfa_pre.nfa_start, nfa.nfa_end ]
                if "eps" not in nfa_pre.nfa_end.next.keys():
                    nfa_pre.nfa_end.next["eps"] = []
                nfa_pre.nfa_end.next["eps"].extend( [nfa.nfa_end, nfa_pre.nfa_start] )
                return nfa

        # r+ 情况的结点处理
        if self._cur_reg_exp[r-1] == '+':    
            if self._is_leagal_exp(l, r-1):
                nfa = FA()
                nfa.nfa_end.type = cur_type
                nfa.nfa_start.type = cur_type
                nfa_pre = self._build_nfa(l, r - 1, cur_type)
                nfa.nfa_end.is_end_state = True
                nfa_pre.nfa_end.is_end_state = False              
                if "eps" not in nfa_pre.nfa_end.next.keys():
                    nfa_pre.nfa_end.next["eps"] = []    
                nfa_pre.nfa_end.next["eps"].extend( [nfa.nfa_end, nfa_pre.nfa_start] )
                nfa.nfa_start.next["eps"] = [ nfa_pre.nfa_start ]
                return nfa
        
        # (r) 情况结点的处理
        if self._cur_reg_exp[l] == '(' and self._cur_reg_exp[r-1] == ')':
            if self._is_leagal_exp(l+1, r-1):
                nfa = self._build_nfa(l+1, r-1, cur_type)
                nfa.nfa_end.type = cur_type
                nfa.nfa_start.type = cur_type
                #print '6  ' + self._cur_reg_exp[l:r]
                return nfa
        
        # a|b 情况结点的处理
        for it in range(l+1, r):          
            if self._cur_reg_exp[it] == '|':
                l_leagal = self._is_leagal_exp(l, it)
                r_leagal = self._is_leagal_exp(it+1, r)             
                if l_leagal and r_leagal:    
                    nfa = FA()
                    nfa.nfa_end.type = cur_type
                    nfa.nfa_start.type = cur_type
                    nfa_l = self._build_nfa(l, it, cur_type)
                    nfa_r = self._build_nfa(it+1, r, cur_type)
                    nfa.nfa_end.is_end_state = True
                    nfa_l.nfa_end.is_end_state = False
                    nfa_r.nfa_end.is_end_state = False
                    nfa.nfa_start.next["eps"] = [ nfa_l.nfa_start ]
                    nfa.nfa_start.next["eps"].append( nfa_r.nfa_start )               
                    if "eps" not in nfa_l.nfa_end.next.keys():
                        nfa_l.nfa_end.next["eps"] = []
                    nfa_l.nfa_end.next["eps"].append( nfa.nfa_end )        
                    if "eps" not in nfa_r.nfa_end.next.keys():
                        nfa_r.nfa_end.next["eps"] = []
                    nfa_r.nfa_end.next["eps"].append( nfa.nfa_end ) 
                    return nfa
        
        # ab 情况结点的处理
        for it in range(l+1, r):
            l_leagal = self._is_leagal_exp(l, it)
            r_leagal = self._is_leagal_exp(it, r)             
            if l_leagal and r_leagal:    
                nfa = FA()
                nfa.nfa_end.type = cur_type
                nfa.nfa_start.type = cur_type
                nfa_l = self._build_nfa(l, it, cur_type)
                nfa_r = self._build_nfa(it, r, cur_type)
                nfa.nfa_end.is_end_state = True
                nfa_l.nfa_end.is_end_state = False
                nfa_r.nfa_end.is_end_state = False
                if "eps" not in nfa_l.nfa_end.next.keys():
                    nfa_l.nfa_end.next["eps"] = []
                nfa_l.nfa_end.next["eps"].append( nfa_r.nfa_start )
                nfa.nfa_start.next["eps"] = [ nfa_l.nfa_start ]
                if "eps" not in nfa_r.nfa_end.next.keys():
                    nfa_r.nfa_end.next["eps"] = []
                nfa_r.nfa_end.next["eps"].append( nfa.nfa_end )
                return nfa

        return None

    # 输入文法, 返回文法的nfa
    def _regexp_to_nfa(self, file_name):
        nfas = []
        cur_type = 0
        reg_iter = file_reader(file_name)
        for reg_line in reg_iter:
            cur_type += 1
            # print reg_line[1]
            r = len(reg_line[1]) - 2
            self._cur_cache = {}
            self._cur_reg_exp = self._unflod(reg_line[1][0:r])
            # print self._cur_reg_exp
            nfas.append( self._build_nfa(0, len(self._cur_reg_exp), cur_type) )
        return nfas


    # 将各个regexp的nfa组合成最终的nfa
    def _combine_nfa(self, nfas = []):
        self._fa.nfa_start.next["eps"] = []
        i = 1
        for nfa in nfas:         
            self._fa.nfa_start.next["eps"].append( nfa.nfa_start )
            i += 1

    def regexp_to_dfa(self, file_name):
        nfas = self._regexp_to_nfa(file_name)
        self._combine_nfa(nfas)
        self._fa.nfa_to_dfa()

tokens = []

def get_token(line, reg_dear, cur_line):
    l = 0
    r = len(line)
    cur_token = ''
    cur = reg_dear._fa.dfa_start
    
    if line[-1] == '\n': r -= 2 

    while l < r:

        if l== 0:
            while line[l] in ['\t', ' ']: l += 1

        if line[l] in cur.next.keys():
            cur_token += line[l]
            cur = cur.next[line[l]]
        else:
            if cur.is_end_state == False :
                print "lex error"
                sys.exit()
                return
            elif cur.type == 1 or cur.type == 2:
                if line[l] not in [' ', '\t']:
                    print "lex error"
                    sys.exit()
                    return
                if cur_token.find('.') != -1: tokens.append( [cur_token, token_type[2], cur_line, l] )
                else: tokens.append( [cur_token, token_type[1], cur_line, l] )
            else:
                if cur_token in key_words: tokens.append( [cur_token, token_type[100], cur_line, l] )
                else: tokens.append([cur_token, token_type[cur.type], cur_line, l])

            while line[l] in [' ', '\t'] and l < r: l += 1
            if l >= r: return


            cur_token = line[l]
            cur = reg_dear._fa.dfa_start
            cur = cur.next[line[l]]
        l += 1

    if cur.is_end_state == False :
        print "lex error"
        sys.exit()
        return
    elif cur.type == 1 or cur.type == 2:
        if cur_token.find('.') != -1: tokens.append( [cur_token, token_type[2], cur_line, l] )
        else: tokens.append( [cur_token, token_type[1], cur_line, l] )
    else:
        if cur_token in key_words: tokens.append( [cur_token, token_type[100], cur_line, l] )
        else: tokens.append([cur_token, token_type[cur.type], cur_line, l])

def lex(file_name, reg_dear):
    content_list = code_reader(file_name)
    cur_line = 0;
    for line in content_list:
        cur_line += 1
        get_token(line, reg_dear, cur_line)




# Debug
def bfs(fa):
    vis = []
    node = fa
    queue = Queue.Queue()
    queue.put(node)
    vis.append(node.id)
    print "-- begin -- \n"
    print "nfa_start node = %d end = %s" % (node.id, str(node.is_end_state))
    while not queue.empty():
        cur = queue.get()
        for key in cur.next.keys():
           for next_node in cur.next[key]:
            print "u = %d, v = %d, key = %s end = %d" % (cur.id, next_node.id, key, next_node.is_end_state)
            if next_node.id not in vis:
                vis.append(next_node.id)          
                queue.put(next_node)
    print "\n-- end -- \n"

#Debug    

if __name__ == '__main__':
  
    reg_dealer = Regexp()
    reg_dealer.regexp_to_dfa("lex_test.txt")
    lex("code.txt", reg_dealer)
    for item in tokens:
        print item
# bfs(reg_dealer._fa.dfa_start)