import jieba.posseg as pseg
import re

# 词性映射表
POS_MAPPING = {
    'n': '名词', 'nr': '人名', 'ns': '地名', 'nt': '机构名', 'nz': '其他专名',
    'v': '动词', 'vd': '动副词', 'vn': '名动词', 'vx': '形式动词',
    'a': '形容词', 'ad': '副形词', 'an': '名形词', 'd': '副词', 'r': '代词',
    's': '处所词', 'f': '方位词', 'b': '区别词', 'u': '助词', 'uzhe': '着',
    'ule': '了', 'uguo': '过', 'ude': '的/地', 'usuo': '所', 'udeng': '等',
    'uyy': '一样', 'udh': '的话', 'c': '连词', 'cc': '并列连词', 'p': '介词',
    'pba': '把', 'pbei': '被', 'xc': '其他虚词', 'e': '叹词', 'y': '语气词',
    'o': '拟声词', 'h': '前缀', 'k': '后缀', 'x': '非语素字', 'w': '标点符号',
    'm': '数词', 'mq': '数量词', 'q': '量词', 'qt': '时量词', 'qv': '动量词',
    'qvm': '名量词'
}

def get_pos_chinese(pos):
    """获取词性的中文描述"""
    return POS_MAPPING.get(pos, pos)

def analyze_pos_composition(text):
    """分词并返回 (词, 词性) 列表"""
    return [(word, flag) for word, flag in pseg.cut(text.strip())]

def is_semantically_valid(
    text,
    valid_single_pos={'n', 'nr', 'ns', 'nt', 'nz', 'v', 'vd', 'vn', 'vx', 'r', 's', 'f', 'b'},
    invalid_single_pos={'a', 'ad', 'an', 'd', 'u', 'uzhe', 'ule', 'uguo', 'ude', 'c', 'cc', 'p', 'pba', 'pbei', 'xc', 'm', 'mq', 'q'},
    invalid_combo_patterns=[r'^u+$', r'^[ucp]+$', r'u{2,}', r'^d+$', r'^[da]+$']
):
    """
    检查文本是否具有语义有效性。

    Args:
        text: 输入文本。
        valid_single_pos: 可单独出现的有效词性集合。
        invalid_single_pos: 单独出现无效的词性集合。
        invalid_combo_patterns: 无效的词性组合模式。

    Returns:
        (is_valid, reason, pos_details): 是否有效、原因、分词结果。
    """
    # 处理空输入
    text = text.strip()
    if not text:
        return False, "空输入，无语义", []

    # 获取词性序列
    word_list = analyze_pos_composition(text)
    pos_sequence = ''.join(pos for _, pos in word_list)

    # 特殊情况：单独“跑”或“吃”直接有效
    if len(word_list) == 1 and word_list[0][0] in {'跑', '吃'}:
        word, pos = word_list[0]
        return True, f"有效单词：'{word}'（{get_pos_chinese(pos)}，单独‘跑’或‘吃’有效）", word_list

    # 检查无效组合
    for pattern in invalid_combo_patterns:
        if re.search(pattern, pos_sequence):
            pos_details = [f"{word}({get_pos_chinese(pos)})" for word, pos in word_list]
            return False, f"无效词性组合：{pattern}（{' + '.join(pos_details)}）", word_list

    # 单个词的情况
    if len(word_list) == 1:
        word, pos = word_list[0]
        pos_chinese = get_pos_chinese(pos)
        if pos in invalid_single_pos:
            return False, f"单独无效：'{word}'（{pos_chinese}需搭配其他词）", word_list
        if pos in valid_single_pos:
            return True, f"有效单词：'{word}'（{pos_chinese}，单独动词或名词等有效）", word_list
        return False, f"单独无效：'{word}'（{pos_chinese}不在有效范围）", word_list

    # 分类实词和虚词
    content_pos = {'n', 'nr', 'ns', 'nt', 'nz', 'v', 'vd', 'vn', 'vx', 'a', 'ad', 'an', 'r', 's', 'f', 'b'}
    content_words = [(word, pos) for word, pos in word_list if pos in content_pos]
    function_words = [(word, pos) for word, pos in word_list if pos not in content_pos]

    # 检查仅虚词或虚词比例过高
    if not content_words:
        pos_details = [f"{word}({get_pos_chinese(pos)})" for word, pos in function_words]
        return False, f"仅含虚词：{' + '.join(pos_details)}", word_list
    if len(function_words) >= len(content_words):
        pos_details = [f"{word}({get_pos_chinese(pos)})" for word, pos in word_list]
        return False, f"虚词比例过高：{' + '.join(pos_details)}", word_list

    # 检查有效短语模式
    valid_patterns = [
        r'v.*u.*[va]', r'v.*d.*[va]',  # 动补结构
        r'v.*n', r'n.*v',  # 动宾、主谓
        r'[ad].*[nv]',  # 偏正结构
        r'(v.{1,3}){2,}', r'(n.{1,3}){2,}', r'(a.{1,3}){2,}',  # 并列结构
        r'n.*v.*n', r'r.*[vna]'  # 主谓宾、代词短语
    ]

    # 结构检查
    has_valid_pattern = any(re.search(pattern, pos_sequence) for pattern in valid_patterns)
    has_verb = any(pos.startswith('v') for _, pos in word_list)
    has_noun = any(pos.startswith('n') for _, pos in word_list)
    has_adj = any(pos.startswith('a') for _, pos in word_list)
    has_u = any(pos.startswith('u') for _, pos in word_list)

    structure_type = ""
    if has_verb and has_adj and has_u:
        structure_type = "动补结构"
    elif has_noun and has_verb:
        structure_type = "主谓结构"
    elif has_verb and has_noun and not has_u:
        structure_type = "动宾结构"
    elif len(text) <= 4 and content_words:
        structure_type = "短实词短语"

    # 最终判断
    pos_details = [f"{word}({get_pos_chinese(pos)})" for word, pos in word_list]
    detail_msg = f"词性组成：{' + '.join(pos_details)}"
    if has_valid_pattern or structure_type:
        if structure_type:
            detail_msg += f"，结构：{structure_type}"
        return True, f"有效短语（{detail_msg}）", word_list
    return False, f"无效结构（{detail_msg}）", word_list

def test_semantic_validity(text):
    """测试并打印语义有效性结果"""
    is_valid, reason, pos_details = is_semantically_valid(text)
    print(f"输入: '{text}'")
    print(f"分词结果: {[f'{word}({get_pos_chinese(pos)})' for word, pos in pos_details]}")
    print(f"是否有效: {is_valid}")
    print(f"原因: {reason}\n")

if __name__ == "__main__":
    test_cases = [
        "跑得快", "的了是", "非常", "美丽的", "我吃饭", "看书", "跑步", "书", "和",
        "吃得很饱", "北京欢迎你", "对换模式", "了的是", "的着过", "的地得", "把被和",
        "你好么bro", "美丽的","1" 
    ]
    print("语义有效性测试结果：")
    print("=" * 50)
    for test in test_cases:
        test_semantic_validity(test)