#!/usr/bin/env python3
# -*- coding:utf-8 -*-

import os
import sys

from ply.lex import TOKEN

try:
    from .lexer_common import *
except:
    from lexer_common import *

import logging
from ctools.logext import *

_rlog, _plog, _mlog, _tlog = get_loggers(__file__, __name__, propagate=False)

_fpath = os.path.dirname(os.path.realpath(__file__))

class GVCLexer(GVCLexerBase):
    def __init__(self, optimize=True, debug=False, lextab='ctools.ly_parser.gvc_lextab', taboutputdir=_fpath, **kwargs) -> None:
        super().__init__(optimize, debug, lextab, taboutputdir, **kwargs)
    ##
    ## Reserved keywords
    ##
    keywords = (
        'AUTO', 'CHAR', 'CONST',
        'DOUBLE', 'ENUM',
        'FLOAT', 'INLINE', 'INT', 'LONG',
        'REGISTER', 'OFFSETOF',
        'SHORT', 'SIGNED', 'SIZEOF', 'STATIC', 'STRUCT', 'INVIS_STRUCT',
        'TYPEDEF', 'UNION', 'UNSIGNED', 'VOID',
        'VOLATILE', '__INT128',
    )

    keywords_new = (
        # '_BOOL', '_COMPLEX',
        # '_NORETURN', '_THREAD_LOCAL', '_STATIC_ASSERT',
        # '_ATOMIC', '_ALIGNOF', '_ALIGNAS',
        )

    keyword_map = {}

    for keyword in keywords:
        keyword_map[keyword.lower()] = keyword

    for keyword in keywords_new:
        keyword_map[keyword[:2].upper() + keyword[2:].lower()] = keyword

    mytokens = (
        # 'comment',
        'CBLOCK',       # code block
        'LINKOP',
        'KEEP', 'KEEPBLOCK',
        'CTREE', 'CTREEBLOCK',
    )

    ##
    ## All the tokens recognized by the lexer
    ##
    tokens = keywords + keywords_new + mytokens + (
        # Identifiers
        'ID',

        # Type identifiers (identifiers previously defined as
        # types with typedef)
        'TYPEID',

        # constants
        'INT_CONST_DEC', 'INT_CONST_OCT', 'INT_CONST_HEX', 'INT_CONST_BIN', 'INT_CONST_CHAR',
        'FLOAT_CONST', 'HEX_FLOAT_CONST',
        'CHAR_CONST',
        'WCHAR_CONST',
        'U8CHAR_CONST',
        'U16CHAR_CONST',
        'U32CHAR_CONST',

        # String literals
        'STRING_LITERAL',
        'WSTRING_LITERAL',
        'U8STRING_LITERAL',
        'U16STRING_LITERAL',
        'U32STRING_LITERAL',

        'TIMES',

        # Assignment
        'EQUALS',

        # Structure dereference (->)
        'ARROW',

        # Delimiters
        'LPAREN', 'RPAREN',         # ( )
        'LBRACKET', 'RBRACKET',     # [ ]
        'LBRACE', 'RBRACE',         # { }
        'COMMA', 'PERIOD',          # . ,
        'SEMI', 'COLON',            # ; :


        # pre-processor
        'PPHASH',       # '#'
        'PPPRAGMA',     # 'pragma'
        'PPPRAGMASTR',
    )

    ##
    ## Lexer states: used for preprocessor \n-terminated directives
    ##
    states = (
        # ppline: preprocessor line directives
        #
        ('ppline', 'exclusive'),

        # pppragma: pragma
        #
        ('pppragma', 'exclusive'),

        ('cblock', 'exclusive'),
        ('keepblock', 'exclusive'),
        ('ctreeblock', 'exclusive'),
    )

    def t_PPHASH(self, t):
        r'[ \t]*\#'
        if self.line_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos):
            t.lexer.begin('ppline')
            self.pp_line = self.pp_filename = None
        elif self.pragma_pattern.match(t.lexer.lexdata, pos=t.lexer.lexpos):
            t.lexer.begin('pppragma')
        else:
            t.type = 'PPHASH'
            return t

    ##
    ## Rules for the ppline state
    ##
    @TOKEN(string_literal)
    def t_ppline_FILENAME(self, t):
        if self.pp_line is None:
            self._error('filename before line number in #line', t)
        else:
            self.pp_filename = t.value.lstrip('"').rstrip('"')

    @TOKEN(decimal_constant)
    def t_ppline_LINE_NUMBER(self, t):
        if self.pp_line is None:
            self.pp_line = t.value
        else:
            # Ignore: GCC's cpp sometimes inserts a numeric flag
            # after the file name
            pass
        _mlog.debug(f"t: {t}")
        _mlog.debug(f"pp_line: {self.pp_line}")

    def t_ppline_NEWLINE(self, t):
        r'\n'
        if self.pp_line is None:
            self._error('line number missing in #line', t)
        else:
            self.lexer.lineno = int(self.pp_line)
            _mlog.debug("{} {}".format(t.lexer.lineno, self.lexer.lineno))

            if self.pp_filename is not None:
                self.filename = self.pp_filename

        t.lexer.begin('INITIAL')

    def t_ppline_PPLINE(self, t):
        r'line'
        _mlog.info("{} {}: {}".format(
            t.lexer.lineno, self.lexer.lineno, t.value.strip()))

    t_ppline_ignore = ' \t'

    def t_ppline_error(self, t):
        self._error('invalid #line directive', t)

    ##
    ## Rules for the pppragma state
    ##
    def t_pppragma_NEWLINE(self, t):
        r'\n'
        t.lexer.lineno += 1
        t.lexer.begin('INITIAL')

    def t_pppragma_PPPRAGMA(self, t):
        r'pragma'
        return t

    t_pppragma_ignore = ' \t'

    def t_pppragma_STR(self, t):
        '.+'
        t.type = 'PPPRAGMASTR'
        return t

    def t_pppragma_error(self, t):
        self._error('invalid #pragma directive', t)


    # Match the first {. Enter cblock state.
    def t_cblock(self, t):
        r'\{'
        t.lexer.code_start = t.lexer.lexpos     # Record the starting position
        t.lexer.level = 1                       # Initial brace level
        t.lexer.begin('cblock')                 # Enter 'cblock' state

    # Rules for the cblock state
    def t_cblock_lbrace(self, t):
        r'\{'
        t.lexer.level +=1

    def t_cblock_rbrace(self, t):
        r'\}'
        t.lexer.level -=1

        # If closing brace, return the code fragment
        if t.lexer.level == 0:
            t.value = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos-1]
            t.type = "CBLOCK"
            t.lexer.lineno += t.value.count('\n')
            t.lexer.begin('INITIAL')
            _tlog.debug("KEEPBLOCK: {}, {}".format(t.lexer.lineno, t.value))
            return t

    # Ignored characters (whitespace)
    t_cblock_ignore = " \t\n"

    # For bad characters, we just skip over it
    def t_cblock_error(self, t):
        t.lexer.skip(1)

    # Match the first (. Enter cblock state.
    def t_keepblock(self, t):
        r'KEEP'
        t.lexer.level =  0
        t.lexer.begin('keepblock')
        t.value = 'KEEP'
        t.type = "KEEP"
        t.lexer.code_start = t.lexer.lexpos    # Record the starting position
        return t

    # Rules for the keepblock state
    def t_keepblock_lparen(self, t):
        r'\('
        t.lexer.level += 1

    def t_keepblock_rparen(self, t):
        r'\)'
        t.lexer.level -= 1

        # If closing paren, return the code fragment
        if t.lexer.level == 0:
            val = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos-1]
            pos = val.find('(')
            t.value = val[pos+1:]
            t.type = "KEEPBLOCK"
            t.lexer.lineno += t.value.count('\n')
            t.lexer.begin('INITIAL')
            return t

    # Ignored characters (whitespace)
    t_keepblock_ignore = " \t\n"

    # For bad characters, we just skip over it
    def t_keepblock_error(self, t):
        t.lexer.skip(1)

    # Match the first (. Enter cblock state.
    def t_ctreeblock(self, t):
        r'CTREE'
        t.lexer.level =  0
        t.lexer.begin('ctreeblock')
        t.value = 'CTREE'
        t.type = "CTREE"
        t.lexer.code_start = t.lexer.lexpos    # Record the starting position
        return t

    # Rules for the ctreeblock state
    def t_ctreeblock_lparen(self, t):
        r'\('
        t.lexer.level += 1

    def t_ctreeblock_rparen(self, t):
        r'\)'
        t.lexer.level -= 1

        # If closing paren, return the code fragment
        if t.lexer.level == 0:
            val = t.lexer.lexdata[t.lexer.code_start:t.lexer.lexpos-1]
            pos = val.find('(')
            t.value = val[pos+1:]
            t.type = "CTREEBLOCK"
            t.lexer.lineno += t.value.count('\n')
            t.lexer.begin('INITIAL')
            _mlog.debug(f'ctree: {t.value}')
            return t

    # Ignored characters (whitespace)
    t_ctreeblock_ignore = " \t\n"

    # For bad characters, we just skip over it
    def t_ctreeblock_error(self, t):
        t.lexer.skip(1)
    ##
    ## Rules for the normal state
    ##
    t_ignore = ' \t'

    # Newlines
    def t_NEWLINE(self, t):
        r'\n+'
        n_cnt = t.value.count('\n')
        t.lexer.lineno += n_cnt
        _mlog.debug(f"t.lexer.lineno: {t.lexer.lineno}, {n_cnt}")

    t_TYPEDEF           = r'typedef'

    t_LINKOP            = r'[><\+-]{1,5}|~'

    t_TIMES             = r'\*'
    t_ARROW             = r'\->'

    # Assignment operators
    t_EQUALS            = r'='

    # Delimiters
    t_LPAREN            = r'\('
    t_RPAREN            = r'\)'
    t_LBRACKET          = r'\['
    t_RBRACKET          = r'\]'
    t_COMMA             = r','
    t_PERIOD            = r'\.'
    t_SEMI              = r';'
    t_COLON             = r':'

    t_STRING_LITERAL = string_literal

    # The following floating and integer constants are defined as
    # functions to impose a strict order (otherwise, decimal
    # is placed before the others because its regex is longer,
    # and this is bad)
    #
    @TOKEN(floating_constant)
    def t_FLOAT_CONST(self, t):
        return t

    @TOKEN(hex_floating_constant)
    def t_HEX_FLOAT_CONST(self, t):
        return t

    @TOKEN(hex_constant)
    def t_INT_CONST_HEX(self, t):
        return t

    @TOKEN(bin_constant)
    def t_INT_CONST_BIN(self, t):
        return t

    @TOKEN(bad_octal_constant)
    def t_BAD_CONST_OCT(self, t):
        msg = "Invalid octal constant"
        self._error(msg, t)

    @TOKEN(octal_constant)
    def t_INT_CONST_OCT(self, t):
        return t

    @TOKEN(decimal_constant)
    def t_INT_CONST_DEC(self, t):
        return t

    # Must come before bad_char_const, to prevent it from
    # catching valid char constants as invalid
    #
    @TOKEN(multicharacter_constant)
    def t_INT_CONST_CHAR(self, t):
        return t

    @TOKEN(char_const)
    def t_CHAR_CONST(self, t):
        return t

    @TOKEN(wchar_const)
    def t_WCHAR_CONST(self, t):
        return t

    @TOKEN(u8char_const)
    def t_U8CHAR_CONST(self, t):
        return t

    @TOKEN(u16char_const)
    def t_U16CHAR_CONST(self, t):
        return t

    @TOKEN(u32char_const)
    def t_U32CHAR_CONST(self, t):
        return t

    @TOKEN(unmatched_quote)
    def t_UNMATCHED_QUOTE(self, t):
        msg = "Unmatched '"
        self._error(msg, t)

    @TOKEN(bad_char_const)
    def t_BAD_CHAR_CONST(self, t):
        msg = "Invalid char constant %s" % t.value
        self._error(msg, t)

    @TOKEN(wstring_literal)
    def t_WSTRING_LITERAL(self, t):
        return t

    @TOKEN(u8string_literal)
    def t_U8STRING_LITERAL(self, t):
        return t

    @TOKEN(u16string_literal)
    def t_U16STRING_LITERAL(self, t):
        return t

    @TOKEN(u32string_literal)
    def t_U32STRING_LITERAL(self, t):
        return t

    # unmatched string literals are caught by the preprocessor

    @TOKEN(bad_string_literal)
    def t_BAD_STRING_LITERAL(self, t):
        msg = "String contains invalid escape code"
        self._error(msg, t)

    @TOKEN(identifier)
    def t_ID(self, t):
        t.type = self.keyword_map.get(t.value, "ID")
        _mlog.debug(f"{t.lexer.lineno} {t.type}, {t.value}")
        # TODO: type_lookup_func
        # if t.type == 'ID' and self.type_lookup_func(t.value):
        #     t.type = "TYPEID"
        return t

    def t_comment(self, t):
        r'(/\*(.|\n)*?\*/)|(//.*)'
        t.lexer.lineno += t.value.count('\n')

    def t_error(self, t):
        msg = 'Illegal character %s' % repr(t.value[0])
        self._error(msg, t)

import os.path
if not os.path.exists(_fpath + 'gvc_lextab.py'):
    # not need chdir
    curr = os.getcwd()
    os.chdir(_fpath)
    prepare_lextab(GVCLexer())
    os.chdir(curr)

