import os
from tree_sitter_languages import get_language, get_parser
from importlib import resources
from pygments.lexers import guess_lexer_for_filename
from pygments.token import Token
from ast_util import get_ast_tree_from_code, query_ast_tree, parse_tree_captures_to_tags
from test_code.test_code_generator import test_code_1, test_code_2
def test_tree_sitter():
    language = get_language('python')
    parser = get_parser('python')
    test_code = """
            import os
            #!shebang
            # License blah blah (Apache 2.0)
            "This is a module docstring."

            a = 1
            
            def adder(a,b):
                return a+b

            '''This
            is
            not
            a
            multiline
            comment.'''

            b = 2

            class Test:
                "This is a class docstring."

                'This is bogus.'

                def test(self):
                    "This is a function docstring."

                    "Please, no."

                    return 1

            c = 3
            adder(b,c)
        """
    tree = parser.parse(test_code.encode())
    node = tree.root_node
    print("展示整个语法树的结构")
    print(node.sexp())

    # Look for statements that are a single string expression
    # 先定义query语句
    stmt_str_pattern = '(expression_statement (string)) @stmt_str'
    stmt_str_query = language.query(stmt_str_pattern)
    # 使用query语句从节点树中查找statement语句，返回statement的节点
    stmt_strs = stmt_str_query.captures(node)
    stmt_str_points = set(
        (node.start_point, node.end_point) for node, _ in stmt_strs
    )
    print("显示字符串声明的位置")
    print(stmt_str_points)

    doc_str_pattern = """
            (module . (comment)* . (expression_statement (string)) @module_doc_str)

            (class_definition
                body: (block . (expression_statement (string)) @class_doc_str))

            (function_definition
                body: (block . (expression_statement (string)) @function_doc_str))
        """
    doc_str_query = language.query(doc_str_pattern)
    doc_strs = doc_str_query.captures(node)
    doc_str_points = set(
        (node.start_point, node.end_point) for node, _ in doc_strs
    )

    comment_strs = stmt_str_points - doc_str_points
    print("显示注释字符串的位置")
    print(sorted(comment_strs))
    # [((7, 8), (12, 19)), ((19, 12), (19, 28)), ((24, 16), (24, 29))]
    # 输出结果为3组，每一组包含两个二元元组，分别表示起始和结束位置。（行号，列号）（（起始行号，起始列号），（终止行号，中止列号））

    # node.sexp：显示整个语法树的结构。
    # stmt_str_points：显示所有字符串表达式的节点位置。
    # comments_strs：显示非文档字符串的字符串表达式（即注释或其他独立字符串）。
    # doc_str_points：显示模块、类和函数的文档字符串的节点位置。
    aider_str_patter = """
        (class_definition
        name: (identifier) @name.definition.class) @definition.class

        (function_definition
        name: (identifier) @name.definition.function) @definition.function

        (call
            function: [
            (identifier) @name.reference.call
            (attribute
            attribute: (identifier) @name.reference.call)
        ]) @reference.call
    """
    aider_str_query = language.query(aider_str_patter)
    captures = aider_str_query.captures(node)
    captures = list(captures)
    for node, tag in captures:
        print(f"node:{node}")
        print(f"tag:{tag}")

def another_test_code():
    try:
        # query_scm = resources.files(__package__).joinpath("queries", f"tree-sitter-python-tags.scm")
        # query_scm = query_scm.read_text()
        scm_file_path = os.path.join(os.getcwd(), "queries", "tree-sitter-python-tags.scm")
        with open(scm_file_path, "r") as source_file:
            query_scm = source_file.read
        test_file_path = os.path.join(os.getcwd(), "test_code", "test_code_generator.py")
        with open(test_file_path, "r") as source_file:
            source_code = source_file.read
        language = get_language('python')
        parser = get_parser('python')
        # tree = parser.parse(bytes(source_code, "utf-8"))
        tree = parser.parse(source_code.encode())
        query = language.query(query_scm)
        captures = query.captures(tree.root_node)
        captures = list(captures)
        print(captures)
    except KeyError:
        print("error when loading scm file")

def test_lexer():
    fname = "test_code/test_code_generator.py"
    code ="""
        import os
        #!shebang
        # License blah blah (Apache 2.0)
        "This is a module docstring."
    
        a = 1
    
        '''This
        is
        not
        a
        multiline
        comment.'''
        
        def adder(a,b):
            return a+b
    
        b = 2
    
        class Test:
            "This is a class docstring."

            'This is bogus.'

            def test(self):
                "This is a function docstring."

                "Please, no."

                return 1

        c = 3
        adder(b,c)
        
    """
    lexer = guess_lexer_for_filename(fname, code)
    tokens = list(lexer.get_tokens(code))
    tokens = [token[1] for token in tokens if token[0] in Token.Name]
    for token in tokens:
        print(token)
if __name__ == '__main__':
    rel_fname_1,fname_1, code_1 = test_code_1()
    rel_fname_2,fname_2, code_2 = test_code_2()
    code_1_ast_tree = get_ast_tree_from_code(code_1)
    code_2_ast_tree = get_ast_tree_from_code(code_2)
    query_scm = """
        (class_definition
        name: (identifier) @name.definition.class) @definition.class

        (function_definition
        name: (identifier) @name.definition.function) @definition.function

        (call
            function: [
            (identifier) @name.reference.call
            (attribute
            attribute: (identifier) @name.reference.call)
        ]) @reference.call
    """
    captures_1 = query_ast_tree(code_1_ast_tree, query_scm)
    captures_2 = query_ast_tree(code_2_ast_tree, query_scm)
    file_1_data = list(parse_tree_captures_to_tags(rel_fname=rel_fname_1, fname=fname_1, captures=captures_1))
    file_2_data = list(parse_tree_captures_to_tags(rel_fname=rel_fname_2, fname=fname_2, captures=captures_2))
    print(file_1_data)
    print(file_2_data)