#!/usr/bin/env python3
# -*- coding: utf-8 -*-

import json
import sys
import re
from datetime import datetime

try:
    import numpy as np
    from sklearn.cluster import KMeans
    from sklearn.feature_extraction.text import TfidfVectorizer
    from sklearn.metrics.pairwise import cosine_similarity
except Exception as e:
    print(json.dumps({
        "status": "error",
        "message": f"缺少 TF-IDF/KMeans 依赖，请执行 pip install scikit-learn numpy：{e}"
    }, ensure_ascii=False))
    sys.exit(1)


def normalize_params(params, doc_count):
    n_clusters = params.get("n_clusters", 8)
    n_clusters = max(2, min(n_clusters, doc_count))
    top_keywords = max(1, params.get("top_keywords", 5))
    top_recommendations = max(1, params.get("top_recommendations", 5))
    max_features = max(100, params.get("max_features", 3000))
    return n_clusters, top_keywords, top_recommendations, max_features


def build_vectorizer(max_features):
    return TfidfVectorizer(
        max_features=max_features,
        ngram_range=(1, 2),
        token_pattern=r"(?u)\b[\w\u4e00-\u9fff]{2,}\b",
        analyzer="word"
    )


STOP_WORDS = {"在市", "开放式基金", "开放式", "基金", "of"}
STOP_WORDS_LOWER = {word.lower() for word in STOP_WORDS}
CHINESE_PATTERN = re.compile(r"[\u4e00-\u9fff]")


def is_meaningful_keyword(term: str) -> bool:
    if not term:
        return False
    normalized = term.lower()
    if normalized in STOP_WORDS_LOWER:
        return False
    if len(term) <= 1 and not CHINESE_PATTERN.search(term):
        return False
    return True


def normalize_keyword(term: str) -> str:
    if not term:
        return ""
    cleaned = term.replace(" ", "").replace("·", "").strip()
    return cleaned


def extract_keywords(kmeans, vectorizer, top_keywords):
    keywords = {}
    feature_names = vectorizer.get_feature_names_out()
    order_centroids = kmeans.cluster_centers_.argsort()[:, ::-1]
    for cluster_id in range(kmeans.n_clusters):
        filtered_terms = []
        for feature_index in order_centroids[cluster_id]:
            term = normalize_keyword(feature_names[feature_index])
            if not is_meaningful_keyword(term):
                continue
            if term in filtered_terms:
                continue
            filtered_terms.append(term)
            if len(filtered_terms) >= top_keywords:
                break
        if not filtered_terms:
            filtered_terms = [
                normalize_keyword(feature_names[ind])
                for ind in order_centroids[cluster_id, :top_keywords]
            ]
        keywords[cluster_id] = filtered_terms[:top_keywords]
    return keywords


def build_recommendations(labels, cosine_sim, fund_codes, top_n):
    recommendations = []
    for idx, source_code in enumerate(fund_codes):
        sims = cosine_sim[idx]
        candidates = []
        for jdx, target_code in enumerate(fund_codes):
            if idx == jdx or labels[idx] != labels[jdx]:
                continue
            candidates.append((target_code, sims[jdx]))
        candidates.sort(key=lambda x: x[1], reverse=True)
        for target_code, score in candidates[:top_n]:
            recommendations.append({
                "sourceFundCode": source_code,
                "targetFundCode": target_code,
                "clusterId": int(labels[idx]),
                "score": float(score),
                "reason": "同簇相似基金"
            })
    return recommendations


def main():
    raw = sys.stdin.read()
    if not raw:
        print(json.dumps({
            "status": "error",
            "message": "缺少输入参数"
        }, ensure_ascii=False))
        sys.exit(1)

    payload = json.loads(raw)
    documents = payload.get("documents", [])
    params = payload.get("params", {})

    if not documents:
        print(json.dumps({
            "status": "error",
            "message": "documents 不能为空"
        }, ensure_ascii=False))
        sys.exit(1)

    texts = [doc.get("text", "") for doc in documents]
    fund_codes = [doc.get("fundCode") for doc in documents]
    texts = [text.strip() for text in texts]

    valid_indices = [idx for idx, text in enumerate(texts) if text]
    if not valid_indices:
        print(json.dumps({
            "status": "error",
            "message": "所有文本内容均为空，无法聚类"
        }, ensure_ascii=False))
        sys.exit(1)

    texts = [texts[idx] for idx in valid_indices]
    fund_codes = [fund_codes[idx] for idx in valid_indices]

    n_clusters, top_keywords, top_recommendations, max_features = normalize_params(params, len(texts))

    vectorizer = build_vectorizer(max_features)
    tfidf_matrix = vectorizer.fit_transform(texts)

    if len(texts) < n_clusters:
        n_clusters = max(2, len(texts))

    kmeans = KMeans(n_clusters=n_clusters, n_init=10, random_state=42)
    labels = kmeans.fit_predict(tfidf_matrix)

    cluster_keywords = extract_keywords(kmeans, vectorizer, top_keywords)

    center_sim = cosine_similarity(tfidf_matrix, kmeans.cluster_centers_)
    cosine_sim = cosine_similarity(tfidf_matrix)

    assignments = []
    for idx, fund_code in enumerate(fund_codes):
        assignments.append({
            "fundCode": fund_code,
            "clusterId": int(labels[idx]),
            "score": float(center_sim[idx, labels[idx]])
        })

    recommendations = build_recommendations(labels, cosine_sim, fund_codes, top_recommendations)

    result = {
        "status": "success",
        "assignments": assignments,
        "clusters": [
            {
                "clusterId": cluster_id,
                "keywords": words
            } for cluster_id, words in cluster_keywords.items()
        ],
        "recommendations": recommendations,
        "timestamp": datetime.now().isoformat()
    }
    print(json.dumps(result, ensure_ascii=False))


if __name__ == "__main__":
    main()



