import nltk
import re

# Ensure NLTK resources are available
nltk.download('averaged_perceptron_tagger', quiet=True)
nltk.download('punkt', quiet=True)

# POS mapping for English descriptions
POS_MAPPING = {
    'NN': '名词', 'NNS': '复数名词', 'NNP': '专有名词', 'NNPS': '复数专有名词',
    'VB': '动词原形', 'VBD': '过去式动词', 'VBG': '动名词', 'VBN': '过去分词',
    'VBP': '现在时动词', 'VBZ': '第三人称单数动词',
    'JJ': '形容词', 'JJR': '比较级形容词', 'JJS': '最高级形容词',
    'RB': '副词', 'RBR': '比较级副词', 'RBS': '最高级副词',
    'PRP': '代词', 'PRP$': '物主代词', 'IN': '介词', 'DT': '限定词',
    'CC': '并列连词', 'TO': 'to作为介词或不定式标记', 'MD': '情态动词',
    'WDT': 'wh-限定词', 'WP': 'wh-代词', 'WRB': 'wh-副词', 'RP': '小品词',
    'UH': '感叹词', ',': '逗号', '.': '句号', ':': '冒号', 'SYM': '符号'
}

def get_pos_chinese(pos):
    """Return Chinese description for a POS tag."""
    return POS_MAPPING.get(pos, pos)

def analyze_pos_composition(text):
    """Tokenize text and return list of (word, pos) tuples."""
    tokens = nltk.word_tokenize(text.strip().lower())
    return nltk.pos_tag(tokens)

def is_semantically_valid(
    text,
    valid_single_pos={'NN', 'NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'PRP'},
    invalid_single_pos={'JJ', 'JJR', 'JJS', 'RB', 'RBR', 'RBS', 'IN', 'DT', 'CC', 'TO', 'MD', 'WDT', 'WP', 'WRB', 'RP'},
    invalid_combo_patterns=[r'^IN+$', r'^[IN|DT|CC]+$', r'IN{2,}', r'^RB+$', r'^[RB|JJ]+$']
):
    """
    Check if English text is semantically valid based on POS patterns.

    Args:
        text: Input text.
        valid_single_pos: Set of POS tags valid for single-word inputs.
        invalid_single_pos: Set of POS tags invalid for single-word inputs.
        invalid_combo_patterns: List of regex patterns for invalid POS combinations.

    Returns:
        Tuple (is_valid, reason, pos_details): Boolean validity, reason string, and POS-tagged word list.
    """
    # Handle empty input
    text = text.strip()
    if not text:
        return False, "空输入，无语义", []

    # Special case: single "run" or "eat" is valid
    if text.lower() in {'run', 'eat'}:
        word_list = analyze_pos_composition(text)
        word, pos = word_list[0]
        return True, f"有效单词：'{word}'（{get_pos_chinese(pos)}，单独‘run’或‘eat’有效）", word_list

    # Get POS composition
    word_list = analyze_pos_composition(text)
    pos_sequence = ''.join(pos for _, pos in word_list)

    # Check invalid POS patterns
    for pattern in invalid_combo_patterns:
        if re.search(pattern, pos_sequence):
            pos_details = [f"{word}({get_pos_chinese(pos)})" for word, pos in word_list]
            return False, f"无效词性组合：{pattern}（{' + '.join(pos_details)}）", word_list

    # Single-word case
    if len(word_list) == 1:
        word, pos = word_list[0]
        pos_chinese = get_pos_chinese(pos)
        if pos in invalid_single_pos:
            return False, f"单独无效：'{word}'（{pos_chinese}需搭配其他词）", word_list
        if pos in valid_single_pos:
            return True, f"有效单词：'{word}'（{pos_chinese}，单独名词或动词等有效）", word_list
        return False, f"单独无效：'{word}'（{pos_chinese}不在有效范围）", word_list

    # Classify content and function words
    content_pos = {'NN', 'NNS', 'NNP', 'NNPS', 'VB', 'VBD', 'VBG', 'VBN', 'VBP', 'VBZ', 'JJ', 'JJR', 'JJS', 'PRP'}
    content_words = [(word, pos) for word, pos in word_list if pos in content_pos]
    function_words = [(word, pos) for word, pos in word_list if pos not in content_pos]

    # Check for only function words or high function word ratio
    if not content_words:
        pos_details = [f"{word}({get_pos_chinese(pos)})" for word, pos in function_words]
        return False, f"仅含虚词：{' + '.join(pos_details)}", word_list
    if len(function_words) >= len(content_words):
        pos_details = [f"{word}({get_pos_chinese(pos)})" for word, pos in word_list]
        return False, f"虚词比例过高：{' + '.join(pos_details)}", word_list

    # Check valid phrase patterns
    valid_patterns = [
        r'VB.*[NN|NNS|NNP|NNPS]',  # Verb-object (e.g., "eat apple")
        r'[NN|NNS|NNP|NNPS|PRP].*VB',  # Subject-verb (e.g., "she runs")
        r'[JJ|RB].*[NN|VB]',  # Adjective/adverb modifying noun/verb
        r'(VB.{0,3}){2,}', r'(NN.{0,3}){2,}',  # Parallel structures
        r'[NN|PRP].*VB.*NN',  # Subject-verb-object (e.g., "she eats apple")
        r'PRP.*[VB|NN|JJ]'  # Pronoun phrases
    ]

    # Structure check
    has_valid_pattern = any(re.search(pattern, pos_sequence) for pattern in valid_patterns)
    has_verb = any(pos.startswith('VB') for _, pos in word_list)
    has_noun = any(pos.startswith('NN') for _, pos in word_list)
    has_adj = any(pos.startswith('JJ') for _, pos in word_list)
    has_prep = any(pos == 'IN' for _, pos in word_list)

    structure_type = ""
    if has_verb and has_adj:
        structure_type = "动词+形容词结构"
    elif has_noun and has_verb:
        structure_type = "主谓结构"
    elif has_verb and has_noun and not has_prep:
        structure_type = "动宾结构"
    elif len(text.split()) <= 4 and content_words:
        structure_type = "短实词短语"

    # Final validation
    pos_details = [f"{word}({get_pos_chinese(pos)})" for word, pos in word_list]
    detail_msg = f"词性组成：{' + '.join(pos_details)}"
    if has_valid_pattern or structure_type:
        if structure_type:
            detail_msg += f"，结构：{structure_type}"
        return True, f"有效短语（{detail_msg}）", word_list
    return False, f"无效结构（{detail_msg}）", word_list

def test_semantic_validity(text):
    """Test and print semantic validity results for given text."""
    is_valid, reason, pos_details = is_semantically_valid(text)
    print(f"Input: '{text}'")
    print(f"POS Tags: {[f'{word}({get_pos_chinese(pos)})' for word, pos in pos_details]}")
    print(f"Valid: {is_valid}")
    print(f"Reason: {reason}\n")

if __name__ == "__main__":
    test_cases = [
        "run fast",  # Valid: Verb + adverb
        "of the is",  # Invalid: Pure function words
        "very",  # Invalid: Adverb alone
        "beautiful",  # Invalid: Adjective alone
        "I eat",  # Valid: Subject-verb
        "read book",  # Valid: Verb-object
        "running",  # Valid: Single verb
        "book",  # Valid: Single noun
        "and",  # Invalid: Conjunction alone
        "eat well",  # Valid: Verb + adverb
        "London welcomes you",  # Valid: Subject-verb-object
        "switch mode",  # Valid: Verb-object
        "the of in",  # Invalid: Pure function words
        "run",  # Valid: Single "run"
        "eat"  # Valid: Single "eat"
    ]
    print("English Semantic Validity Test Results:")
    print("=" * 50)
    for test in test_cases:
        test_semantic_validity(test)