import spacy
from spacy import displacy

nlp = spacy.load("en_core_web_sm")

# 1、句型分析
def analyze_syntax(essay_text):
    doc = nlp(essay_text)

    sentence_types = {"simple": 0, "compound": 0, "complex": 0, "compound_complex": 0}
    highlight_sentences = []

    for sent in doc.sents:
        # 获取所有子句依存关系
        dep_labels = [token.dep_ for token in sent]

        # 复合句检测
        has_conj = any(dep == "conj" for dep in dep_labels)
        # 复杂句检测：是否包含从属连词（如 advcl, relcl）
        has_sub_clause = any(dep in ["advcl", "relcl", "ccomp"] for dep in dep_labels)

        # 句型分类
        if not has_conj and not has_sub_clause:
            sentence_types["simple"] += 1
        elif has_conj and not has_sub_clause:
            sentence_types["compound"] += 1
        elif not has_conj and has_sub_clause:
            sentence_types["complex"] += 1
        else:
            sentence_types["compound_complex"] += 1

        # 选取亮点句子
        if has_sub_clause:
            highlight_sentences.append(sent.text)

    return {
        'syntaxData': {
            'labels': ["Simple", "Compound", "Complex", "Compound-Complex"],
            'data': list(sentence_types.values())
        },
        'highlightSentences': highlight_sentences
    }

# 2、句子依存关系树
def analyze_syntax_tree(sentence):
    """
    解析句子的依存树结构，生成前端可视化的数据格式，并识别句子类型。
    """
    doc = nlp(sentence)

    def build_tree(token):
        """
        递归构建依存树结构，排除标点符号。
        """
        return {
            "name": f"{token.text} ({token.dep_})",
            "children": [build_tree(child) for child in token.children if not child.is_punct]
        }

    # 获取根节点：`ROOT` 节点（spacy 标准定义）
    root = [token for token in doc if token.dep_ == "ROOT"][0]
    tree_data = build_tree(root)

    # 句型分类逻辑
    dep_labels = [token.dep_ for token in doc]
    has_conj = any(dep == "conj" for dep in dep_labels)
    has_sub_clause = any(dep in ["advcl", "relcl", "ccomp"] for dep in dep_labels)

    # 句型分类
    if not has_conj and not has_sub_clause:
        sentence_type = "简单句"
    elif has_conj and not has_sub_clause:
        sentence_type = "复合句"
    elif not has_conj and has_sub_clause:
        sentence_type = "复杂句"
    else:
        sentence_type = "复合-复杂句"

    return {
        "treeData": tree_data,
        "sentenceType": sentence_type
    }

# 3. 语法结构可视化数据生成 (Displacy 格式)
def analyze_syntax_structure(sentence):
    """
    使用 spaCy 的 displacy 渲染语法结构。
    返回经过优化样式的 HTML 格式，适应前端对话框布局。
    """
    doc = nlp(sentence)

    # 渲染为 HTML
    options = {
        "compact": True,  # 紧凑模式，避免过多空白区域
        "color": "#4A90E2",  # 设置线条颜色
        "bg": "#fafafa",    # 背景色
        "font": "Source Sans Pro"  # 字体
    }
    html = displacy.render(doc, style="dep", page=False, options=options)

    # 添加自定义 CSS 以适应对话框布局
    custom_css = """
    <style>
        .displacy {
            overflow-x: auto;
            overflow-y: hidden;
            padding: 10px;
            border-radius: 8px;
            background-color: #f9f9f9;
            max-width: 100%;
            box-shadow: 0px 2px 4px rgba(0, 0, 0, 0.1);
        }
        .displacy svg {
            width: 100% !important;
            height: auto !important;
        }
        .displacy .word {
            font-size: 14px;
            font-weight: 500;
        }
        .displacy .dep {
            font-size: 12px;
            color: #888;
        }
    </style>
    """

    # 返回带有样式的 HTML
    return {"html": custom_css + html}