# _*_ coding: utf-8 _*_
# .@FileName:create_node_exp
# .@Data....:2025-01-29 : 16 : 36
# .@Aurhor..:LiuJingYu
"""
launch:
   import maya.cmds as cmds
   FileName.main() 
"""
from __future__ import unicode_literals, print_function
import re
from typing import *
# class TokenBase(object):
#     def __init__(self, s):
#         self.s = s
#     def __repr__(self):
#         return "{}<{}>".format(self.__class__.__name__, self.s)
#
# class GenNameExpExc(Exception):
#     pass
#
# class NameToken(TokenBase):
#     pass
#
# class ValueNameToken(TokenBase):
#     pass
#
# skip_space = re.compile(r"[ \t]+")
# name_match = re.compile(r"[a-zA-Z0-9_]+")
# value_name_match = re.compile(r"\{[a-zA-Z0-9_]+\}")
#
# def _lex(exp):
#     exp = exp
#     while True:
#         m = skip_space.match(exp)
#
#         if m:
#             exp = exp[m.end():]
#
#         if len(exp) == 0:
#             return
#
#         m = name_match.match(exp)
#
#         if m:
#             token = NameToken(exp[m.start(): m.end()])
#             exp = exp[m.end():]
#             yield token
#             continue
#
#         m = value_name_match.match(exp)
#
#         if m:
#             token = ValueNameToken(exp[m.start()+1: m.end()-1])
#             exp = exp[m.end():]
#             yield token
#             continue
#
#         raise GenNameExpExc("lex error")
#
#
# def compile(exp, values):
#
#     tokens = list(_lex(exp))
#     for kv in values:
#         name = ""
#         for t in tokens:
#             if isinstance(t, NameToken):
#                 name += t.s
#             else:
#                 v = kv.get(t.s)
#                 if v is None:
#                     raise GenNameExpExc("ket not found")
#                 name += str(v)
#         yield name
#
# if __name__ == "__main__":
#     test_exp = "bix_{name_id}_joint"
#
#     print(list(_lex(test_exp)))
#
#     print(list(compile(test_exp,
#                        [
#                            {"name_id": 0},
#                            {"name_id": 1},
#                            {"name_id": 2}
#                        ])))






# -*- coding: utf-8 -*-

class TokenBase(object):
    """词法单元基类 (兼容Python 3.7)

    Attributes:
        s (str): 词法单元对应的原始字符串
    """

    def __init__(self, s: str):
        self.s = s


class NameToken(TokenBase):
    """名称词法单元，匹配标识符"""


class CoToken(TokenBase):
    """冒号词法单元，匹配:符号"""


class LFToken(TokenBase):
    """换行符词法单元，匹配换行"""



class ExpExc(Exception):
    """表达式解析异常基类"""



class Exp:
    """表达式解析器 (兼容Python 3.7)

    语法规则：
        - 标识符: 由字母、数字和下划线组成 [a-zA-Z0-9_]+
        - 分隔符: 冒号 :
        - 格式: <标识符>:<标识符>
        - 允许空格和换行分隔

    Attributes:
        exp (str): 需要解析的原始表达式
    """

    # 预编译正则表达式模式
    _NAME_PATTERN: Pattern = re.compile(r"[a-zA-Z0-9_]+")  # 名称匹配
    _COLON_PATTERN: Pattern = re.compile(r":")  # 冒号匹配
    _LINEBREAK_PATTERN: Pattern = re.compile(r"\n|\r\n")  # 换行符匹配
    _WHITESPACE_PATTERN: Pattern = re.compile(r"[ \t]+")  # 空白符匹配

    def __init__(self, exp: str):
        """初始化解析器实例

        Args:
            exp: 需要解析的原始字符串
        """
        self.exp = exp.strip()  # 去除首尾空白

    def __iter__(self) -> Generator[Tuple[str, str], None, None]:
        """主解析入口，生成(name, value)对

        Yields:
            Tuple[str, str]: 解析出的名称对

        Raises:
            ExpExc: 当遇到无效语法时抛出
        """
        tokens: List[TokenBase] = list(self._lex())

        while tokens:
            # 跳过开头的换行符
            while tokens and isinstance(tokens[0], LFToken):
                tokens.pop(0)

            # 检查是否有足够的token组成有效对
            if len(tokens) < 3:
                if tokens:
                    raise ExpExc(f"不完整的表达式，剩余token: {tokens}")
                return

            # 验证token模式：NameToken -> CoToken -> NameToken
            if (isinstance(tokens[0], NameToken) and
                    isinstance(tokens[1], CoToken) and
                    isinstance(tokens[2], NameToken)):

                # 提取名称对并更新token列表
                yield tokens[0].s, tokens[2].s
                tokens = tokens[3:]
            else:
                # 获取错误位置信息
                error_pos = sum(len(t.s) for t in tokens[:3])
                raise ExpExc(f"无效的语法结构，位置{error_pos}: {tokens[:3]}")

    def _lex(self) -> Generator[TokenBase, None, None]:
        """词法分析器，生成token序列

        Yields:
            TokenBase: 词法单元实例

        Raises:
            ExpExc: 遇到无法识别的字符时抛出
        """
        exp = self.exp
        pos = 0  # 跟踪解析位置

        while exp:
            # 跳过空白字符
            ws_match = self._WHITESPACE_PATTERN.match(exp)
            if ws_match:
                exp = exp[ws_match.end():]
                pos += ws_match.end()
                continue

            # 尝试匹配名称
            name_match = self._NAME_PATTERN.match(exp)
            if name_match:
                token = NameToken(name_match.group())
                exp = exp[name_match.end():]
                pos += name_match.end()
                yield token
                continue

            # 尝试匹配冒号
            colon_match = self._COLON_PATTERN.match(exp)
            if colon_match:
                token = CoToken(colon_match.group())
                exp = exp[1:]
                pos += 1
                yield token
                continue

            # 尝试匹配换行符
            lf_match = self._LINEBREAK_PATTERN.match(exp)
            if lf_match:
                token = LFToken(lf_match.group())
                exp = exp[lf_match.end():]
                pos += lf_match.end()
                yield token
                continue

            # 无匹配时抛出异常
            raise ExpExc(f"无法识别的字符 '{exp[0]}' 在位置 {pos}")


if __name__ == "__main__":
    # 测试用例
    test_exp = """\t
\r\n
Loc0001:transform
Loc0002:mesh
    """

    try:
        # 解析测试表达式
        result = list(Exp(test_exp))
        print("解析结果:")
        for pair in result:
            print(f"  {pair[0]} => {pair[1]}")
        print("=" * 20)
    except ExpExc as e:
        print(f"解析错误: {str(e)}")

