from typing import Dict, List
from app.utils.pymupdf_tools import pdf_get_content_region
from tests.base_test import base_test_case

logger = base_test_case.get_logger(__name__)
TEST_DATA_DIR = base_test_case.test_data_dir
OUTPUT_DATA_DIR = base_test_case.output_data_dir

import json
import fitz  # PyMuPDF
import numpy as np
from collections import Counter
import re

# DBSCAN 聚类
from sklearn.cluster import DBSCAN
from sklearn.preprocessing import StandardScaler

"""
设计原则：
1. 不依赖特定关键词（如“例题”“考点”）
2. 仅使用布局特征 + 基础文本模式（A. B. 【】）
3. 通过聚类发现结构，再用上下文逻辑组装 Q&A
"""


def extract_line_features(text_dict: Dict, page_rect) -> List[Dict]:
    lines = []
    page_width = page_rect.width
    page_height = page_rect.height

    for block in text_dict.get("blocks", []):
        if block.get("type") != 0:
            continue
        for line in block.get("lines", []):
            bbox = line["bbox"]
            x0, y0, x1, y1 = bbox
            height = y1 - y0
            width = x1 - x0
            spans = line.get("spans", [])
            if not spans:
                continue
            full_text = "".join(s.get("text", "") for s in spans).strip()
            if not full_text or full_text.isspace():
                continue

            sizes = [s.get("size", 0) for s in spans]
            fonts = [s.get("font", "") for s in spans]
            flags = [s.get("flags", 0) for s in spans]

            avg_size = float(np.mean(sizes)) if sizes else 0
            max_size = float(max(sizes)) if sizes else 0
            is_bold = any((f & 2) or "Bold" in ft for f, ft in zip(flags, fonts))
            indent = float(x0)
            relative_y = y0 / page_height

            # 基础文本模式（通用）
            starts_with_letter_opt = bool(re.match(r'^\s*[A-Ea-e][\.\)]', full_text))
            has_chinese_bracket = "【" in full_text and "】" in full_text
            is_answer_marker = any(kw in full_text for kw in ["【答案】", "答案：", "答："])
            is_analysis_marker = any(kw in full_text for kw in ["【解析】", "解析："])

            lines.append({
                "text": full_text,
                "bbox": [x0, y0, x1, y1],
                "y_top": y0,
                "y_bottom": y1,
                "avg_size": avg_size,
                "max_size": max_size,
                "height": float(height),
                "indent": indent,
                "is_bold": int(is_bold),
                "relative_y": relative_y,
                "starts_with_letter_opt": int(starts_with_letter_opt),
                "has_chinese_bracket": int(has_chinese_bracket),
                "is_answer_marker": int(is_answer_marker),
                "is_analysis_marker": int(is_analysis_marker),
                # 聚类特征：布局为主
                "feature_vector": [avg_size, indent, relative_y, int(is_bold)]
            })
    lines.sort(key=lambda x: x["y_top"])
    return lines


def cluster_lines(lines: List[Dict]) -> List[int]:
    if len(lines) < 2:
        return [0] * len(lines)
    X = np.array([line["feature_vector"] for line in lines])
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)
    clustering = DBSCAN(eps=0.6, min_samples=1, metric='euclidean')
    labels = clustering.fit_predict(X_scaled)
    return labels.tolist()


def analyze_pdf_structure(text_dict: Dict, page_rect, verbose: bool = True) -> Dict:
    lines = extract_line_features(text_dict, page_rect)
    if not lines:
        return {"error": "no text lines"}

    # 计算行间距（用于判断段落隔离）
    for i in range(len(lines)):
        if i == 0:
            lines[i]["prev_gap"] = 0
        else:
            lines[i]["prev_gap"] = lines[i]["y_top"] - lines[i - 1]["y_bottom"]
    gaps = [line["prev_gap"] for line in lines[1:]]
    avg_gap = np.mean(gaps) if gaps else 10

    for line in lines:
        line["is_isolated"] = int(line.get("prev_gap", 0) > avg_gap * 1.8)

    cluster_labels = cluster_lines(lines)
    for line, label in zip(lines, cluster_labels):
        line["cluster_id"] = label

    # === 基于聚类统计推断类型 ===
    cluster_info = {}
    for cid in set(cluster_labels):
        cluster_lines_subset = [l for l in lines if l["cluster_id"] == cid]
        avg_size = np.mean([l["avg_size"] for l in cluster_lines_subset])
        bold_ratio = np.mean([l["is_bold"] for l in cluster_lines_subset])
        avg_indent = np.mean([l["indent"] for l in cluster_lines_subset])
        isolated_ratio = np.mean([l["is_isolated"] for l in cluster_lines_subset])

        # 启发式判断
        if avg_size > 14.5 or (avg_size > 12.5 and bold_ratio > 0.5):
            label_type = "title_1"
        elif avg_size > 11.5 and (bold_ratio > 0.3 or isolated_ratio > 0.5):
            label_type = "title_2"
        elif any(l["starts_with_letter_opt"] for l in cluster_lines_subset):
            label_type = "option"
        elif any(l["is_answer_marker"] for l in cluster_lines_subset):
            label_type = "answer"
        elif any(l["is_analysis_marker"] for l in cluster_lines_subset):
            label_type = "analysis"
        else:
            label_type = "body"

        cluster_info[cid] = label_type

    for line in lines:
        line["predicted_label"] = cluster_info[line["cluster_id"]]

    # === 后处理：构建 Q&A 结构 ===
    questions = []
    chapters = []

    i = 0
    while i < len(lines):
        line = lines[i]
        if line["predicted_label"] in ["title_1", "title_2"]:
            chapters.append({
                "level": "1" if line["predicted_label"] == "title_1" else "2",
                "text": line["text"]
            })
            i += 1
            continue

        # 尝试识别题干：孤立的大字号行 + 后续有选项
        if (line["predicted_label"] == "body" and
                line["avg_size"] >= 9.0 and
                line["is_isolated"] and
                i + 1 < len(lines) and
                lines[i + 1]["predicted_label"] == "option"):

            # 合并多行题干（直到遇到选项或答案）
            stem_lines = [line["text"]]
            j = i + 1
            while j < len(lines):
                next_line = lines[j]
                if next_line["predicted_label"] in ["option", "answer", "analysis", "title_1", "title_2"]:
                    break
                if abs(next_line["avg_size"] - line["avg_size"]) <= 1.5:
                    stem_lines.append(next_line["text"])
                else:
                    break
                j += 1

            q = {
                "question_id": len(questions) + 1,
                "stem": " ".join(stem_lines),
                "options": [],
                "answer": None,
                "analysis": None
            }

            # 收集后续选项
            while j < len(lines) and lines[j]["predicted_label"] == "option":
                q["options"].append(lines[j]["text"])
                j += 1

            # 收集答案和解析
            if j < len(lines) and lines[j]["predicted_label"] == "answer":
                q["answer"] = lines[j]["text"]
                j += 1
            if j < len(lines) and lines[j]["predicted_label"] == "analysis":
                q["analysis"] = lines[j]["text"]
                j += 1

            questions.append(q)
            i = j
        else:
            i += 1

    result = {
        "metadata": {
            "total_lines": len(lines),
            "clusters": len(set(cluster_labels)),
            "questions_count": len(questions),
            "chapters_count": len(chapters)
        },
        "chapters": chapters,
        "questions": questions,
        "raw_lines": lines if verbose else None
    }

    if verbose:
        print(f"✅ 发现 {len(questions)} 道题目，{len(chapters)} 个标题")
        for q in questions[:2]:
            print(f"\n❓ 题干: {q['stem'][:80]}...")
            if q['options']:
                print(" 📌 选项:", "; ".join(q['options'][:2]))
            if q['answer']:
                print(" ✅ 答案:", q['answer'])
            if q['analysis']:
                print(" 🔍 解析:", q['analysis'][:60] + "...")

    return result


if __name__ == '__main__':
    """ PDF内容分析:    识别 标题,章节,题号,题干,选项,解析,答案   """
    pdf_path = str(TEST_DATA_DIR / "1711605374231.pdf")
    doc = fitz.open(pdf_path)
    page = doc[0]
    roi = pdf_get_content_region(page, zoom_factor=1.0)
    text_dict = page.get_text('dict', clip=roi, sort=True)

    result = analyze_pdf_structure(
        text_dict,
        page_rect=page.rect,
        verbose=True
    )

    with open(OUTPUT_DATA_DIR / "structured_output.json", "w", encoding="utf-8") as f:
        json.dump(result, f, ensure_ascii=False, indent=2)

    doc.close()