import argparse
import json
import os.path
from posixpath import expanduser
import lexer_checker


TEMPLATE_MARK           = '// =((^3^))= EMMCC '
TEMPLATE_MARK_LEN       = len(TEMPLATE_MARK)
TEMPLATE_STATE_START    = 0
TEMPLATE_STATE_END      = 1
def cutTemplate(template_file) -> dict:
    '''把模板代码按照标记切割成多个部分
    
    Args:
        template_file: 模板文件名
    Return:
        {name : content}'''
    ret = {}
    with open(template_file) as f:
        state = TEMPLATE_STATE_START
        while True:
            line = f.readline()
            if line == '':
                break
            if line[:TEMPLATE_MARK_LEN] == TEMPLATE_MARK:
                if state == TEMPLATE_STATE_START:
                    name = line[TEMPLATE_MARK_LEN:-1] # 去掉回车
                    content = ''
                    state = TEMPLATE_STATE_END
                else :
                    ret[name] = content
                    state = TEMPLATE_STATE_START
            elif state == TEMPLATE_STATE_END:
                content += line
    return ret


template_dir = ''
dfa_CONDITION_FUNCTION_INIT_CHARS = {
    'isalnum':'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
    'isalpha':'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
    'islower':'abcdefghijklmnopqrstuvwxyz',
    'isupper':'ABCDEFGHIJKLMNOPQRSTUVWXYZ',
    'isdigit':'0123456789',
    'isxdigit':'0123456789abcdefABCDEF',
    'isgraph':'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~',
    'isspace':' \f\r\t\n\v',
    'isblank':' \t',
    'isprint':'0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~ ',
    'ispunct':'!"#$%&\'()*+,-./:;<=>?@[\]^_`{|}~'
}

def token_name_to_cpp_type(token_name) :
    return 'TokenType::' + token_name

def gen_dfa_class_name(name):
    return 'CustomDfa' + name + 'Matcher'

def get_dfa_init_chars(dfa_info) -> list:
    init_chars = []
    for trans_info in dfa_info['Start']:
        condition = trans_info['cond']
        if len(condition) > 0:
            cond_type = condition[0]
            cond_value = condition[1]
            if cond_type == 'chars':
                for i in range(0, len(cond_value)):
                    init_chars.append(cond_value[i])
            elif cond_type == 'range':
                start = ord(cond_value[0])
                end = ord(cond_value[1])
                for i in range(start, end + 1):
                    init_chars.append(chr(i))
            elif cond_type == 'function':
                chars = dfa_CONDITION_FUNCTION_INIT_CHARS[cond_value]
                for i in range(0, len(chars)):
                    init_chars.append(chars[i])
    return init_chars

def dfa_gen(name, dfa_info) -> str:
    '''生成dfaMatcher的代码'''
    # "dfa" : {dfa_name: dfa_info}
    # dfa_info: {state_name: state_list}
    # state_list: [state_info ...]
    # state_info: {cond, action, next}
    class_name = gen_dfa_class_name(name)
    state_names = set(['Start', 'End', 'Endwith', 'Error'])
    for state_name in dfa_info:
        state_names.add(state_name)
    ret = 'class %s : public Matcher {\npublic:\n'%(class_name)
    ret += '    enum class State {\n        '
    for name in state_names:
        ret += name + ', '
    ret = ret[:-2] + '\n'
    ret += '    };\n'
    ret += '    %s(TokenType type, size_t priority) {\n'%(class_name)
    ret += '        m_type = type;\n'
    ret += '        m_priority = priority;\n'
    ret += '    }\n'
    ret += '    bool match(Reader* reader, char& cur_ch, Token& token) {\n'
    ret += '        State s = State::Start;\n'
    ret += '        token.value.clear();\n'
    ret += '        token.row = reader->row();\n'
    ret += '        token.col = reader->col();\n'
    ret += '        while(true) {\n'
    ret += '            switch(s) {\n'
    for state_name in dfa_info:
        ret += '            case State::%s:\n'%(state_name)
        has_if_flag = False
        for state_info in dfa_info[state_name]:
            condition = state_info['cond']
            next_state = state_info['next']
            actions = state_info['action']
            action_codes = []
            for act in actions:
                if act == 'read':
                    action_codes.append('reader->getChar(cur_ch);')
                elif act == 'continue':
                    action_codes.append('continue;')
                elif act == 'push':
                    action_codes.append('token.value.push_back(cur_ch);')
            if len(condition) > 0:
                has_if_flag = True
                cond_type = condition[0]
                cond_value = condition[1]
                if cond_type == 'chars':
                    ret += '                if('
                    for ch in cond_value:
                        if ch == '\\':
                            ch = '\\\\'
                        ret += 'cur_ch == \'%s\' ||'%(ch)
                    ret = ret[:-3] + ') {\n'
                    for code in action_codes:
                        ret += '                    %s\n'%(code)
                    ret += '                    s = State::%s;\n'%(next_state)
                    ret += '                    break;\n'
                    ret += '                }\n'
                elif cond_type == 'range':
                    ret += '                if(cur_ch >= \'%s\' && cur_ch <= \'%s\') {\n'%(cond_value[0], cond_value[1])
                    for code in action_codes:
                        ret += '                    %s\n'%(code)
                    ret += '                    s = State::%s;\n'%(next_state)
                    ret += '                    break;\n'
                    ret += '                }\n'
                elif cond_type == 'function':
                    ret += '                if(std::%s(cur_ch)) {\n'%(cond_value)
                    for code in action_codes:
                        ret += '                    %s\n'%(code)
                    ret += '                    s = State::%s;\n'%(next_state)
                    ret += '                }\n'
                # validate_dfa_info保证没有非法cond_type
            else: # len(cond) == 0
                codes = []
                codes.append('                s = State::%s;\n'%(next_state))
                for code in action_codes:
                    codes.append('                %s\n'%(code))
                # 根据之前是否生成了if，判断接下来的部分是否要生成else，以及控制缩进
                if has_if_flag == True:
                    ret += '                else {\n'
                    for idx in range(0, len(codes)):
                        codes[idx] = '    ' + codes[idx]
                for line in codes:
                    ret += line
                if has_if_flag == True:
                    ret += '                }\n'
                ret += '                break;\n'
    ret += '            case State::End:\n'
    ret += '                token.type = m_type;\n'
    ret += '                token.priority = m_priority;\n'
    ret += '                return true;\n'
    ret += '            case State::Endwith:\n'
    ret += '                token.type = m_type;\n'
    ret += '                token.priority = m_priority;\n'
    ret += '                return true;\n'
    ret += '            case State::Error:\n'
    ret += '                return false;\n'
    ret += '            } // switch\n'
    ret += '            reader->getChar(cur_ch);\n'
    ret += '        } //while\n'
    ret += '        return false;\n'
    ret += '    }\n'
    ret += '    TokenType m_type;\n'
    ret += '    size_t m_priority;\n'
    ret += '};\n\n'
    return ret

def lexer_gen(lexer_file_name) -> list:
    '''生成语法分析器的C++源代码
    
    Args:
        is_single: 为True时生成一个.hpp文件，否则生成.h和.cpp文件
        lexer_file_name: lexer描述文件名
    Return:
        返回[h_content, cpp_content]'''
    # 验证lexer描述文件的合法性
    obj = None
    with open(lexer_file_name) as f:
        content = f.read()
        obj = json.loads(content)
    checker = lexer_checker.LexerChecker(list(dfa_CONDITION_FUNCTION_INIT_CHARS.keys()))
    try:
        checker.check(obj)
    except Exception as e:
        print(e)
        exit(1)

    # 生成lexer.h的内容
    enum_per_line = 4
    num = 0
    new_line_flag = False
    file_name = os.path.join(template_dir, 'lexer.h')
    h_cut = cutTemplate(file_name)
    h_content = h_cut['h1'] + '    '
    token_names = list(obj['tokens'].keys())
    token_names.append('EMMCC_INVALID')
    for token_name in token_names:
        h_content += token_name + ', '
        # 控制每行的枚举个数
        if num == enum_per_line - 1:
            h_content += '\n    '
            new_line_flag = True
        else:
            new_line_flag = False
        num = (num + 1) % enum_per_line
    if new_line_flag == False:
        h_content = h_content[:-2] # ", "的长度
    else:
        h_content = h_content[:-7] # ", \n    "的长度
    h_content += '\n' + h_cut['h2']
    
    # 生成lexer.cpp的内容
    file_name = os.path.join(template_dir, 'lexer.cpp')
    cpp_cut = cutTemplate(file_name)
    cpp_content = cpp_cut['cpp1']
    cpp_content += cpp_cut['cpp2']
    # 填写TokenTypeTostring的内容
    for token_name in obj['tokens']:
        cpp_content += '    case TokenType::' + token_name + ': return "' + token_name + '";\n'
    cpp_content += cpp_cut['cpp3']

    # 生成dfa的类
    for dfa_name in obj['dfa']:
        cpp_content += dfa_gen(dfa_name, obj['dfa'][dfa_name])

    cpp_content += cpp_cut['cpp3_1']
    # 为每一个token创建对应的matcher
    cpp_content += '    Matcher* matcher;\n'
    for token_name in obj['tokens']:
        token_type = obj['tokens'][token_name][0]
        token_value = obj['tokens'][token_name][1]
        token_priority = obj['tokens'][token_name][2]
        if token_type == 'char':
            cpp_content += '    matcher = new CharMatcher(%s, \'%s\', %d);\n'%(token_name_to_cpp_type(token_name), token_value, token_priority)
            cpp_content += '    m_matcher_map[\'%s\'].push_back(matcher);\n'%(token_value)
        elif token_type == 'string':
            cpp_content += '    matcher = new StringMatcher(%s, "%s", %d);\n'%(token_name_to_cpp_type(token_name), token_value, token_priority)
            cpp_content += '    m_matcher_map[\'%s\'].push_back(matcher);\n'%(token_value[0])
        elif token_type == 'dfa':
            dfa_class_name = gen_dfa_class_name(token_value)
            cpp_content += '    matcher = new %s(%s, %d);\n'%(dfa_class_name, token_name_to_cpp_type(token_name), token_priority)
            init_chars = get_dfa_init_chars(obj['dfa'][token_value])
            for ch in init_chars:
                cpp_content += '    m_matcher_map[\'%s\'].push_back(matcher);\n'%(ch)
        elif token_type == 'dfa':
            continue
        else:
            print('Error: unknow token type: ' + token_type)
            exit(1)
        cpp_content += '    m_matchers.push_back(matcher);\n'
    # 填写blank_chars的内容
    cpp_content += '    m_blank_chars = {'
    for char_name in obj['blank_chars']:
        cpp_content += '\'%s\', '%(char_name)
    cpp_content = cpp_content[:-2] + '};\n'
    cpp_content += cpp_cut['cpp4']

    return [h_content, cpp_content]

if __name__ == '__main__':
    ap = argparse.ArgumentParser('lexer_gen')
    ap.add_argument('-f', action='store', metavar='FILE', required=True, help='lexer file')
    ap.add_argument('-o', '--out_dir', action='store', metavar='DIR', required=False, default='.', help='output dir')
    ap.add_argument('--prefix', action='store', required=False, default='lexer', metavar='NAME', help='output file name prefix')
    ap.add_argument('--hpp', action='store_true', required=False, default=False, help='generate one .hpp file or .h and .cpp file')
    args = ap.parse_args()
    template_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'template')
    files = lexer_gen(args.f)
    if args.hpp == False:
        file_name = os.path.join(args.out_dir, args.prefix + '.h')
        with open(file_name, 'w') as f:
            f.write(files[0])
        file_name = os.path.join(args.out_dir, args.prefix + '.cpp')
        with open(file_name, 'w') as f:
            f.write(files[1])
    else:
        file_name = os.path.join(args.out_dir, args.prefix + '.hpp')
        with open(file_name, 'w') as f:
            f.wrte(files[0])
            f.wrte(files[1])
    