import sys

from lexer import Lexer, Token

class Parser(object):
    INIT = 0

    def __init__(self, f):
        self.f = f
        self.lexer = Lexer(f)
        self.print_to_console = True
        self.status = Parser.INIT

    def parse(self):
        while (self.lexer.has_next()):
            t = self.lexer.next()
            if (self.status == Parser.INIT):
                if (t.token_type == Token.SLASH):
                    self.parse_slash()
                elif (t.token_type == Token.TEXT):
                    self.log(t.value)
                    self.match(Token.TEXT)
                else:
                    print(t)

    # 处理普通文本
    def parse_text(self):
        while (self.lexer.next().token_type == Token.TEXT):
            self.log(self.lexer.next().value)
            self.lexer.advance()
            
    # 处理段落
    def process_paragraph(self, tt):
        self.match(tt)

        if tt == Token.CHAPTER:
            self.log("# ")
        elif tt == Token.SECTION:
            self.log("## ")
        elif tt == Token.SUB_SECTION:
            self.log("### ")
        elif tt == Token.SUB_SUB_SECTION:
            self.log("#### ")

        self.match(Token.LEFT_BRACE)
        self.parse_text()
        self.match(Token.RIGHT_BRACE)
        self.log("\n")

    def process_enum(self):
        i = 1
        while (self.match(Token.SLASH)):
            t = self.lexer.next()
            if (t.token_type == Token.END):
                return

            self.match(Token.ITEM)
            self.log("\n%d. " % (i, ))
            i += 1
            self.parse_text()

    def process_code_list(self):
        self.lexer.set_raw_mode()
        self.log("```\n")
        self.parse_text()
        self.log("```\n")
        self.match(Token.SLASH)


    def process_begin(self):
        self.match(Token.BEGIN)
        self.match(Token.LEFT_BRACE)
        
        t = self.lexer.next()
        if (t.token_type == Token.ENUM):
            self.match(Token.ENUM)
            self.match(Token.RIGHT_BRACE)
            self.process_enum()

        elif (t.token_type == Token.CODE_LIST):
            self.match(Token.CODE_LIST)
            self.match(Token.RIGHT_BRACE)
            self.process_code_list()
            

        # 结尾处的斜杠在 process_enum 里处理掉了，能走到这里的时候，
        # 直接处理 end 即可。
        self.match(Token.END)
        self.match(Token.LEFT_BRACE)

        if (t.token_type == Token.ENUM):
            self.match(Token.ENUM)
        elif (t.token_type == Token.CODE_LIST):
            self.match(Token.CODE_LIST)

        self.match(Token.RIGHT_BRACE)
        self.log("\n")

    # 处理 tex 命令
    def parse_slash(self):
        self.match(Token.SLASH)
        t = self.lexer.next()

        # 处理段落
        if (t.token_type == Token.CHAPTER or t.token_type == Token.SECTION
            or t.token_type == Token.SUB_SECTION or t.token_type == Token.SUB_SUB_SECTION):
            self.process_paragraph(t.token_type)

        # 处理 begin 命令
        if (t.token_type == Token.BEGIN):
            self.process_begin()


    def match(self, token_type):
        t = self.lexer.next()
        if t.token_type != token_type:
            raise OSError("Expect %d, but got %d" % (token_type, t.token_type))

        self.lexer.advance()
        #print(self.lexer.next())
        return True

    def log(self, value):
        if self.print_to_console:
            print(value, end = "")
        else:
            self.output.write(value)


def convert_to_md(file_name):
    f = open(file_name, "r")
    Parser(f).parse()

if __name__ == "__main__":
    convert_to_md(sys.argv[1])
