import random
from flask import Flask, render_template, request, jsonify, json
from elasticsearch import Elasticsearch
import re
import os
import math
from collections import Counter
import jieba
from nltk.stem import PorterStemmer
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from collections import defaultdict
import threading
import umap
import numpy as np


app = Flask(__name__)

# Elasticsearch配置
es = Elasticsearch(
    ["http://localhost:9200"],
    max_retries=3,
    retry_on_timeout=True,
    timeout=30
)

INDEX_NAME = "news_documents"


@app.route("/")
def index():
    """渲染搜索首页"""
    return render_template("index.html")


@app.route("/search")
def search_page():
    """渲染搜索页面"""
    return render_template("search.html")


@app.route("/details")
def details():
    return render_template("details.html")


@app.route("/similarity")
def similarity_page():
    """渲染文档相似度对比页面"""
    return render_template("similarity.html")


@app.route("/duplicate-check")
def duplicate_check_page():
    """渲染文档查重页面"""
    return render_template("duplicate_check.html")


@app.route("/cluster")
def cluster_page():
    """渲染聚类页面"""
    return render_template("cluster.html")


# 全局变量用于存储 IDF 和 TF-IDF 向量（中英文分开）
global_idf_dict_cn = None  # 中文 IDF 字典
global_idf_dict_en = None  # 英文 IDF 字典

global_tfidf_cache_cn = None  # 中文 TF-IDF 缓存
global_tfidf_cache_en = None  # 英文 TF-IDF 缓存

global_doc_metadata_cn = None  # 中文文档元数据
global_doc_metadata_en = None  # 英文文档元数据

global_vocabulary_cn = None
global_vocabulary_en = None


def vectorize_tfidf(tfidf_vec, vocabulary):
    return np.array([tfidf_vec.get(term, 0.0) for term in vocabulary])


def get_term_frequencies(doc_id, field="content"):
    """
    使用 Elasticsearch 的 term_vectors API 获取指定文档的词频
    """
    try:
        response = es.termvectors(
            index=INDEX_NAME,
            id=doc_id,
            body={
                "fields": [field],
                "term_statistics": True,
                "positions": False,
                "offsets": False
            }
        )
        tv = response.get("term_vectors", {}).get(field, {}).get("terms", {})
        return {term: info["term_freq"] for term, info in tv.items()}
    except Exception as e:
        app.logger.error(f"获取 term vectors 失败: {e}")
        return {}


def get_all_document_ids(prefix=None):
    """
    获取所有文档 ID（用于后续 term_vectors 查询）
    """
    scroll_size = 500
    query_body = {"size": scroll_size}

    if prefix:
        query_body["query"] = {"prefix": {"doc_id": prefix}}

    response = es.search(index=INDEX_NAME, body=query_body, scroll='2m')
    scroll_id = response["_scroll_id"]
    hits = response["hits"]["hits"]

    doc_ids = []

    while hits:
        doc_ids.extend(hit["_id"] for hit in hits)
        response = es.scroll(scroll_id=scroll_id, scroll='2m')
        scroll_id = response["_scroll_id"]
        hits = response["hits"]["hits"]

    es.clear_scroll(scroll_id=scroll_id)
    return doc_ids


def compute_idf_from_es(doc_ids, field="content"):
    """
    使用 term_vectors 构建 IDF 字典
    """
    N = len(doc_ids)
    doc_freq = defaultdict(int)
    all_terms = set()

    for i, doc_id in enumerate(doc_ids):
        terms = es.termvectors(
            index=INDEX_NAME,
            id=doc_id,
            body={
                "fields": [field],
                "term_statistics": True,
                "positions": False,
                "offsets": False
            }
        ).get("term_vectors", {}).get(field, {}).get("terms", {})

        unique_terms = set(terms.keys())
        all_terms.update(unique_terms)

        for term in unique_terms:
            doc_freq[term] += 1

        if (i + 1) % 100 == 0:
            print(f"已处理 {i + 1} 篇文档")

    # 构建 IDF
    idf_dict = {
        term: math.log(N / (doc_freq[term] + 1))  # 平滑处理
        for term in all_terms
    }

    return idf_dict


def build_tfidf_cache(doc_ids, idf_dict, field="content"):
    """
    缓存每篇文档的 TF-IDF 向量
    """
    tfidf_cache = {}

    for i, doc_id in enumerate(doc_ids):
        tv_response = es.termvectors(
            index=INDEX_NAME,
            id=doc_id,
            body={
                "fields": [field],
                "term_statistics": True,
                "positions": False,
                "offsets": False
            }
        )

        terms = tv_response.get("term_vectors", {}).get(field, {}).get("terms", {})

        # 获取真正的词频总和
        total_terms = sum(info["term_freq"] for info in terms.values())

        # 构建 TF-IDF 向量
        tfidf = {
            term: (info["term_freq"] / float(total_terms)) * idf_dict.get(term, 0.0)
            for term, info in terms.items()
        }

        # 确保所有值都是 float 类型
        tfidf = {k: float(v) for k, v in tfidf.items()}

        # 归一化前确认向量中没有字符串
        tfidf_cache[doc_id] = normalize_dict(tfidf)

        if (i + 1) % 100 == 0:
            print(f"已生成 {i + 1} 个 TF-IDF 向量")

    return tfidf_cache


def user_query_to_tfidf_vector(text, idf_dict, lang="cn"):
    """
    用户输入文本 → TF-IDF 向量
    """
    if lang == "cn":
        tokens = preprocess_cn(text).split()
    else:
        tokens = preprocess_en(text).split()

    word_count = Counter(tokens)
    total_words = len(tokens)

    tf = {w: c / total_words for w, c in word_count.items()}
    tfidf = {w: tf[w] * idf_dict.get(w, 0) for w in word_count}
    return normalize_dict(tfidf)


def build_idf_and_tfidf_vectors():
    global global_idf_dict_cn, global_idf_dict_en
    global global_tfidf_cache_cn, global_tfidf_cache_en
    global global_doc_metadata_cn, global_doc_metadata_en
    global global_vocabulary_cn, global_vocabulary_en

    print("初始化 TF-IDF 缓存...")

    # 分别处理中文和英文文档
    doc_ids_cn = get_all_document_ids("cn_")
    doc_ids_en = get_all_document_ids("en_")

    # 中文 IDF + TF-IDF
    idf_dict_cn = compute_idf_from_es(doc_ids_cn, field="content")
    tfidf_cache_cn = build_tfidf_cache(doc_ids_cn, idf_dict_cn, field="content")
    titles_cn = []
    for doc_id in doc_ids_cn:
        source = es.get(index=INDEX_NAME, id=doc_id)["_source"]
        titles_cn.append(source.get("title", "[无标题]"))
    global_idf_dict_cn = idf_dict_cn
    global_tfidf_cache_cn = tfidf_cache_cn
    global_doc_metadata_cn = {
        "doc_ids": doc_ids_cn,
        "titles": titles_cn
    }
    global_vocabulary_cn = list(idf_dict_cn.keys())  # 提取中文词表

    # 英文 IDF + TF-IDF
    idf_dict_en = compute_idf_from_es(doc_ids_en, field="content.english")
    tfidf_cache_en = build_tfidf_cache(doc_ids_en, idf_dict_en, field="content.english")
    titles_en = []
    for doc_id in doc_ids_en:
        source = es.get(index=INDEX_NAME, id=doc_id)["_source"]
        titles_en.append(source.get("title", "[无标题]"))
    global_idf_dict_en = idf_dict_en
    global_tfidf_cache_en = tfidf_cache_en
    global_doc_metadata_en = {
        "doc_ids": doc_ids_en,
        "titles": titles_en
    }
    global_vocabulary_en = list(idf_dict_en.keys())  # 提取英文词表

    print("初始化完成")


# 中文停用词路径
CN_STOPWORDS_PATH = "cn_stopwords.txt"
# 加载中文停用词
with open(CN_STOPWORDS_PATH, "r", encoding="utf-8") as f:
    cn_stopwords = set(line.strip() for line in f)

# 加载英文停用词
en_stopwords = set(stopwords.words('english'))


# 中文预处理
def preprocess_cn(text):
    text = re.sub(r"[^\u4e00-\u9fa5\s]", "", text)
    words = jieba.lcut(text)
    filtered_words = [w for w in words if w.strip() and w not in cn_stopwords]
    return " ".join(filtered_words)


# 英文预处理
def preprocess_en(text):
    text = re.sub(r"[^a-zA-Z0-9\s-]", "", text)
    words = word_tokenize(text.lower())
    words = [w for w in words if w.isalpha() and w not in en_stopwords]
    stemmer = PorterStemmer()
    stemmed = [stemmer.stem(w) for w in words]
    return " ".join(stemmed)


def get_origin_document(title):
    res = es.search(index="news_documents_ori", body={
        "query": {"match_phrase": {"title": title}},
        "size": 1
    })
    if not res['hits']['hits']:
        return None
    doc = res['hits']['hits'][0]['_source']
    return {
        "title": doc.get("title", "[无标题]"),
        "content": doc.get("content", "[无内容]")
    }


# 自定义 jsonify，禁用 ensure_ascii
def jsonify_utf8(data):
    return app.response_class(
        response=json.dumps(data, ensure_ascii=False).encode('utf-8'),
        mimetype='application/json'
    )


def get_analyzer(query):
    # 去除空格和标点符号后，判断是否全部为英文字母和数字
    clean_query = re.sub(r'[^a-zA-Z0-9]', '', query)
    if clean_query and re.fullmatch(r'[a-zA-Z0-9]+', clean_query):
        return "english_analyzer"
    else:
        return "chinese_analyzer"


def get_content_type(query):
    # 去除空格和标点符号后，判断是否全部为英文字母和数字
    clean_query = re.sub(r'[^a-zA-Z0-9]', '', query)
    if clean_query and re.fullmatch(r'[a-zA-Z0-9]+', clean_query):
        return "content.english"
    else:
        return "content"


def compute_tf(word_list):
    """计算TF值"""
    tf_dict = {}
    total_words = len(word_list)
    word_count = Counter(word_list)

    for word, count in word_count.items():
        tf_dict[word] = count / float(total_words)
    return tf_dict


def compute_idf(documents):
    """计算IDF值"""
    N = len(documents)
    idf_dict = {}
    all_tokens = set().union(*documents)

    for token in all_tokens:
        doc_count = sum(1 for doc in documents if token in doc)
        idf_dict[token] = math.log(N / (doc_count + 1))  # 平滑处理
    return idf_dict


def compute_tfidf(tf, idf):
    """计算TF-IDF向量"""
    tfidf = {}
    for word, value in tf.items():
        tfidf[word] = value * idf.get(word, 0)
    return tfidf


def cosine_similarity_manual(vec1, vec2):
    intersection = set(vec1.keys()) & set(vec2.keys())

    numerator = sum(vec1[x] * vec2[x] for x in intersection)

    sum1 = sum([x ** 2 for x in vec1.values()])
    sum2 = sum([x ** 2 for x in vec2.values()])

    denominator = math.sqrt(sum1) * math.sqrt(sum2)

    if not denominator:
        return 0.0
    return float(numerator) / denominator


def jaccard_similarity(set1, set2):
    intersection = len(set1 & set2)
    union = len(set1 | set2)
    return intersection / union if union else 0.0


def vectorize_tfidf(tfidf_vec, vocabulary):
    """基于固定词汇表生成 TF-IDF 向量"""
    return np.array([tfidf_vec.get(term, 0.0) for term in vocabulary])


def normalize_vector(vec):
    """对 numpy array 或 list 进行 L2 归一化"""
    norm = math.sqrt(sum(x ** 2 for x in vec))
    return vec / norm if norm != 0 else vec


def normalize_dict(vec):
    """对字典形式的向量进行 L2 归一化"""
    norm = math.sqrt(sum(x ** 2 for x in vec.values()))
    return {k: v / norm for k, v in vec.items()}


def cosine_similarity(vec1, vec2):
    all_terms = set(vec1.keys()) | set(vec2.keys())
    dot_product = sum(vec1.get(term, 0.0) * vec2.get(term, 0.0) for term in all_terms)
    norm1 = math.sqrt(sum(v ** 2 for v in vec1.values()))
    norm2 = math.sqrt(sum(v ** 2 for v in vec2.values()))
    return dot_product / (norm1 * norm2) if norm1 and norm2 else 0.0


def cosine_similarity_vector(vec1, vec2):
    """计算两个 NumPy 向量的余弦相似度"""
    dot_product = np.dot(vec1, vec2)
    norm1 = np.linalg.norm(vec1)
    norm2 = np.linalg.norm(vec2)
    if not norm1 or not norm2:
        return 0.0
    return dot_product / (norm1 * norm2)


def cosine_distance(vec1, vec2):
    return 1 - cosine_similarity_vector(vec1, vec2)


def kmeans_plusplus(data, vocabulary, k):
    doc_ids = list(data.keys())
    vectors = [normalize_vector(vectorize_tfidf(vec, vocabulary)) for vec in data.values()]

    # 随机选择第一个质心
    centroids = [vectors[random.randint(0, len(vectors) - 1)]]

    for _ in range(k - 1):
        dists = [
            min(cosine_distance(vec, c) ** 2 for c in centroids)
            for vec in vectors
        ]
        total = sum(dists)
        r = random.uniform(0, total)

        selected_idx = None
        for i, d in enumerate(dists):
            r -= d
            if r <= 0:
                selected_idx = i
                break

        if selected_idx is not None:
            centroids.append(vectors[selected_idx])

    return centroids


def check_convergence(old_clusters, new_clusters, tol=0.01):
    old_sets = [set(doc_id for doc_id, _ in cluster) for cluster in old_clusters.values()]
    new_sets = [set(doc_id for doc_id, _ in cluster) for cluster in new_clusters.values()]
    same_count = sum(len(old & new) / len(old | new) for old, new in zip(old_sets, new_sets))
    return same_count / len(old_sets) > (1 - tol)


def kmeans_manual(data, vocabulary, k=20, max_iter=100):
    # 向量化并归一化
    data_vectors = {
        doc_id: normalize_vector(vectorize_tfidf(vec, vocabulary))
        for doc_id, vec in data.items()
    }

    # 使用 K-means++ 初始化质心
    centroid_vectors = kmeans_plusplus(data, vocabulary, k=k)
    centroids = [(None, vec) for vec in centroid_vectors]

    previous_clusters = None

    for _ in range(max_iter):
        clusters = defaultdict(list)

        # 分配文档到最近质心
        for doc_id, vec in data_vectors.items():
            distances = [cosine_distance(vec, c[1]) for c in centroids]
            closest = distances.index(min(distances))
            clusters[closest].append((doc_id, vec))

        # 更新质心
        new_centroids = []
        for i in range(k):
            docs_in_cluster = clusters[i]
            if not docs_in_cluster:
                # 从数据中选一个最远点作为替代质心
                farthest_doc = max(
                    data_vectors.items(),
                    key=lambda item: min(cosine_distance(item[1], c[1]) for c in centroids)
                )
                new_centroids.append((None, normalize_vector(farthest_doc[1])))
            else:
                avg_vector = np.mean([vec for _, vec in docs_in_cluster], axis=0)
                new_centroids.append((None, normalize_vector(avg_vector)))

        # 收敛判断（质心变化 + 簇分配变化）
        if previous_clusters is not None:
            centroid_stable = all(
                cosine_distance(c_old[1], c_new[1]) < 1e-4 for c_old, c_new in zip(centroids, new_centroids))
            assignment_stable = check_convergence(previous_clusters, clusters)
            if centroid_stable and assignment_stable:
                print("收敛，聚类结束")
                break

        centroids = new_centroids
        previous_clusters = clusters.copy()

    return clusters, [c[1] for c in centroids]


def find_closest_docs(cluster, center, top_n=5):
    # 对于每个文档计算与中心的距离
    scored_docs = []
    for doc_id, vec in cluster:
        dist = cosine_distance(vec, center)
        scored_docs.append((doc_id, dist))

    # 按距离排序
    scored_docs.sort(key=lambda x: x[1])

    # 返回前N个
    return scored_docs[:top_n]


@app.route("/api/search")
def search():
    query = request.args.get("q", "").strip()
    lang = request.args.get("lang", "all")  # cn/en/all
    page = int(request.args.get("page", 1))
    size = 10

    if not query:
        return jsonify({"error": "搜索词不能为空"}), 400

    from_ = (page - 1) * size

    base_query = {}

    # 动态构建查询
    if lang == "cn":
        content_key = "content"
        base_query = {
            "bool": {
                "must": [
                    {
                        "match": {
                            "content": {
                                "query": query,
                                "analyzer": "chinese_analyzer"
                            }
                        }
                    }
                ],
                "filter": [
                    {"prefix": {"doc_id": "cn_"}}
                ]
            }
        }

    elif lang == "en":
        content_key = "content.english"
        base_query = {
            "bool": {
                "must": [
                    {
                        "match": {
                            "content.english": {
                                "query": query,
                                "analyzer": "english_analyzer"
                            }
                        }
                    }
                ],
                "filter": [
                    {"prefix": {"doc_id": "en_"}}
                ]
            }
        }

    else:  # lang == "all"
        content_key = get_content_type(query)
        base_query = {
            "match": {
                get_content_type(query): {
                    "query": query,
                    "analyzer": get_analyzer(query)
                }
            }
        }

    es_query = {
        "query": base_query,
        "highlight": {
            "pre_tags": ["<strong>"],
            "post_tags": ["</strong>"],
            "fields": {
                "title": {"fragment_size": 150, "number_of_fragments": 3},
                "content": {"fragment_size": 200, "number_of_fragments": 3},
                "content.english": {"fragment_size": 200, "number_of_fragments": 3}
            }
        },
        "from": from_,
        "size": size
    }

    try:
        response = es.search(index=INDEX_NAME, body=es_query)
    except Exception as e:
        return jsonify({"error": f"搜索失败: {str(e)}"}), 500

    results = []
    for hit in response["hits"]["hits"]:
        highlight = hit.get("highlight", {})

        title_key = "title"

        title_snippet = highlight.get(title_key, [hit["_source"].get("title", "[无标题]")])[0]
        content_snippet = highlight.get(content_key, [hit["_source"].get("content", "[无内容]")])[0]

        results.append({
            "title": title_snippet,
            "content": content_snippet[:300] + "...",
            "lang": hit["_source"].get("lang", "未知"),
            "id": hit["_id"],
            "score": hit["_score"]
        })

    return jsonify_utf8({
        "total": response["hits"]["total"]["value"],
        "results": results
    })


@app.route("/api/document")
def get_document():
    doc_id = request.args.get("id")
    if not doc_id:
        return jsonify({"error": "缺少文档ID"}), 400

    try:
        # 先检查是否存在
        if not es.exists(index=INDEX_NAME, id=doc_id):
            return jsonify({"error": f"文档 {doc_id} 不存在"}), 404

        result = es.get(index=INDEX_NAME, id=doc_id)
        source = result["_source"]

        # 统一返回字段名，并使用安全访问方式
        return jsonify({
            "title": source.get("title", "[无标题]"),
            "content": source.get("content", "[无内容]"),
            "id": result["_id"],  # 使用 _id 作为唯一标识符
            "lang": source.get("lang", "未知"),
        })

    except Exception as e:
        app.logger.error(f"获取文档失败: {str(e)}", exc_info=True)
        return jsonify({"error": f"服务器内部错误: {str(e)}"}), 500


@app.route("/api/similarity")
def api_similarity():
    title1 = request.args.get("title1", "").strip()
    title2 = request.args.get("title2", "").strip()

    if not title1 or not title2:
        return jsonify({"error": "标题不能为空"}), 400

    try:
        # 查询处理后文档
        res1 = es.search(index=INDEX_NAME, body={"query": {"match_phrase": {"title": title1}}, "size": 1})
        if not res1['hits']['hits']:
            return jsonify({"error": f"未找到标题为 '{title1}' 的文档"}), 404
        doc1_processed = res1['hits']['hits'][0]['_source']
        content1_processed = doc1_processed.get('content', '') or doc1_processed.get('content.english', '')

        res2 = es.search(index=INDEX_NAME, body={"query": {"match_phrase": {"title": title2}}, "size": 1})
        if not res2['hits']['hits']:
            return jsonify({"error": f"未找到标题为 '{title2}' 的文档"}), 404
        doc2_processed = res2['hits']['hits'][0]['_source']
        content2_processed = doc2_processed.get('content', '') or doc2_processed.get('content.english', '')

        def tokenize(text):
            return [w for w in text.split() if w.strip()]

        tokens1 = tokenize(content1_processed)
        tokens2 = tokenize(content2_processed)

        tf1 = compute_tf(tokens1)
        tf2 = compute_tf(tokens2)

        # 直接使用 TF 向量
        vec1 = normalize_dict(tf1)
        vec2 = normalize_dict(tf2)

        # 余弦相似度
        sim_cos = cosine_similarity_manual(vec1, vec2)

        set1 = set(tokens1)
        set2 = set(tokens2)

        # 获取共现词
        common_words = list(set1 & set2)

        return jsonify({
            "similarity": sim_cos,
            "common_words": common_words,
            "doc1": get_origin_document(title1),
            "doc2": get_origin_document(title2),
            "processed_doc1": content1_processed,
            "processed_doc2": content2_processed
        })

    except Exception as e:
        app.logger.error(f"文档相似度查询失败: {str(e)}")
        return jsonify({"error": "服务器内部错误"}), 500


@app.route("/api/duplicate-check", methods=["POST"])
def api_duplicate_check():
    data = request.get_json()
    user_text = data.get("text", "").strip()
    lang = request.args.get("lang", "cn")  # cn / en

    if not user_text:
        return jsonify({"error": "请输入文本内容"}), 400

    try:
        # 根据语言选择 IDF 字典和 TF-IDF 缓存
        if lang == "cn":
            idf_dict = global_idf_dict_cn
            tfidf_cache = global_tfidf_cache_cn
            metadata = global_doc_metadata_cn
        else:
            idf_dict = global_idf_dict_en
            tfidf_cache = global_tfidf_cache_en
            metadata = global_doc_metadata_en

        # 用户查询转为 TF-IDF
        user_vec = user_query_to_tfidf_vector(user_text, idf_dict, lang)

        results = []
        for doc_id, tfidf_vec in tfidf_cache.items():
            sim = cosine_similarity(user_vec, tfidf_vec)
            idx = metadata["doc_ids"].index(doc_id)
            title = metadata["titles"][idx]

            results.append({
                "title": title,
                "similarity": sim,
                "doc_id": doc_id
            })

        results.sort(key=lambda x: x["similarity"], reverse=True)

        # 查重率：Top 5 平均相似度
        top_n = results[:5]
        plagiarism_rate = sum(item['similarity'] for item in top_n) / len(top_n) if top_n else 0

        return jsonify({
            "plagiarism_rate": plagiarism_rate,
            "user_tokens": list(user_vec.keys()),  # 所有用户输入的 token
            "results": results[:10]
        })

    except Exception as e:
        app.logger.error(f"文档查重失败: {str(e)}")
        return jsonify({"error": "服务器内部错误"}), 500


@app.route("/api/original-document")
def get_original_document():
    doc_id = request.args.get("id")
    if not doc_id:
        return jsonify({"error": "缺少文档ID"}), 400

    ORIGINAL_INDEX_NAME = "news_documents_ori"

    try:
        # 检查是否存在
        if not es.exists(index=ORIGINAL_INDEX_NAME, id=doc_id):
            return jsonify({"error": f"原始文档 {doc_id} 不存在"}), 404

        result = es.get(index=ORIGINAL_INDEX_NAME, id=doc_id)
        source = result["_source"]

        return jsonify({
            "title": source.get("title", "[无标题]"),
            "content": source.get("content", "[无内容]"),
            "id": result["_id"],
            "lang": source.get("lang", "未知"),
        })

    except Exception as e:
        app.logger.error(f"获取原始文档失败: {str(e)}")
        return jsonify({"error": "服务器内部错误"}), 500


def get_top_keywords_from_vector(vec, vocabulary, top_n=20):
    """从向量中提取权重最高的关键词"""
    index_to_word = {i: word for i, word in enumerate(vocabulary)}
    top_indices = np.argsort(vec)[-top_n:]
    return [
        {"word": index_to_word[i], "weight": float(vec[i])}
        for i in reversed(top_indices)
    ]


@app.route("/api/cluster")
def api_cluster_all():
    lang = request.args.get("lang", "cn")
    k = int(request.args.get("k", "20"))

    try:
        if lang == "cn":
            tfidf_cache = global_tfidf_cache_cn
            vocab = global_vocabulary_cn
        else:
            tfidf_cache = global_tfidf_cache_en
            vocab = global_vocabulary_en

        # 聚类
        clusters, centers = kmeans_manual(tfidf_cache, vocab, k=k)

        # 构建聚类展示结果（包含关键词）
        result_clusters = []
        for cluster_idx, docs_in_cluster in clusters.items():
            centroid = centers[cluster_idx]
            scored_docs = [(doc_id, cosine_distance(vec, centroid)) for doc_id, vec in docs_in_cluster]
            scored_docs.sort(key=lambda x: x[1])
            top_docs = scored_docs[:5]

            # 从向量中提取关键词用于词云
            top_keywords = get_top_keywords_from_vector(centroid, vocab)

            result_clusters.append({
                "cluster_id": int(cluster_idx),
                "size": len(docs_in_cluster),
                "docs": [{
                    "title": es.get(index=INDEX_NAME, id=doc_id)["_source"].get("title", "[无标题]"),
                    "doc_id": doc_id,
                    "distance": round(dist, 4)
                } for doc_id, dist in top_docs],
                "centroid_keywords": top_keywords  # 返回关键词
            })

        # 准备降维数据
        doc_ids = []
        vectors = []
        labels = []

        for cluster_idx, docs in clusters.items():
            for doc_id, vec in docs:
                doc_ids.append(doc_id)
                vectors.append(vec)
                labels.append(cluster_idx)

        X = np.array(vectors)

        # UMAP 降维
        reducer = umap.UMAP(n_components=2, n_neighbors=15, min_dist=0.1)
        reduced_vectors = reducer.fit_transform(X)

        # 返回统一结果
        return jsonify({
            "clusters": result_clusters,
            "embeddings": {
                "doc_ids": doc_ids,
                "coordinates": reduced_vectors.tolist(),
                "labels": labels
            }
        })

    except Exception as e:
        app.logger.error(f"统一聚类接口失败: {str(e)}")
        return jsonify({"error": "服务器内部错误"}), 500



if __name__ == "__main__":
    build_idf_and_tfidf_vectors()  # 同步加载
    app.run(debug=False, host='0.0.0.0', port=5000)
