from flask import Flask, request, jsonify
from sentence_transformers import SentenceTransformer
import numpy as np
from pymilvus import Collection, connections
from sklearn.metrics.pairwise import cosine_similarity

# Flask初始化
app = Flask(__name__)

# 连接Milvus数据库
connections.connect(alias='default', host='47.106.154.179', port='19530')
print("连接milvus成功")

# 加载SentenceTransformer模型
model = SentenceTransformer('bert-base-chinese')
print("加载sentence模型成功")

# Milvus数据库中的分类表
collection_name = "category"

# 创建Milvus Collection对象
collection = Collection(name=collection_name)

SUCCESS = "success"
FAILED = "failed"


# 计算embedding
def get_embedding(text):
    try:
        return model.encode(text)
    except Exception as e:
        print(f"Error in getting embedding: {e}")
        raise


# 从Milvus数据库中检索所有的embedding
def get_all_embeddings():
    try:
        collection.load()  # 确保collection已经加载到内存中
        # 查询Milvus数据库中的所有记录
        results = collection.query(expr="id >= 0", output_fields=["id", "number", "name", "embedding"])
        embeddings = []
        for result in results:
            embeddings.append((result['id'], result['number'], result['name'], result['embedding']))
        return embeddings
    except Exception as e:
        print(f"Error in querying embeddings: {e}")
        raise


# 计算相似度
def get_most_similar_category(query_embedding):
    try:
        # 获取Milvus中的所有类别embedding
        embeddings = get_all_embeddings()
        # 提取Milvus中的embedding
        all_embeddings = np.array([item[3] for item in embeddings])
        all_ids = np.array([item[0] for item in embeddings])  # 可能是 int64 类型
        all_numbers = [item[1] for item in embeddings]
        all_names = [item[2] for item in embeddings]

        # 计算余弦相似度
        similarities = cosine_similarity([query_embedding], all_embeddings)[0]

        # 获取最相似的分类
        most_similar_idx = np.argmax(similarities)

        # 将 category_id 转换为 int 类型
        category_id = int(all_ids[most_similar_idx])

        # 将 similarity 转换为 float 类型
        similarity = float(similarities[most_similar_idx])

        return category_id, all_numbers[most_similar_idx], all_names[most_similar_idx], similarity
    except Exception as e:
        print(f"Error in calculating similarity: {e}")
        raise


# 定义RESTful接口
@app.route('/classify', methods=['POST'])
def classify_document():
    try:
        # 获取请求中的摘要和关键词
        data = request.get_json()

        # 检查输入数据有效性
        abstract = data.get('abstract', '').strip()
        keywords = data.get('keywords', '').strip()

        if not abstract or not keywords:
            return jsonify({
                "status": FAILED,
                "message": "Abstract or keywords missing or empty"
            }), 400

        # 处理关键词：按 "；" 分隔
        keyword_list = [kw.strip() for kw in keywords.split('；') if kw.strip()]

        if len(keyword_list) == 0:
            return jsonify({
                "status": FAILED,
                "message": "No valid keywords found"
            }), 400

        # 计算摘要的embedding
        abstract_embedding = get_embedding(abstract)

        # 计算每个关键词的embedding，并取平均
        keyword_embeddings = np.mean([get_embedding(kw) for kw in keyword_list], axis=0)

        # 加权摘要和关键词embedding（可以调整权重值）
        final_embedding = 0.4 * abstract_embedding + 0.6 * keyword_embeddings

        # 获取最相似的分类
        category_id, category_number, category_name, similarity = get_most_similar_category(final_embedding)

        # 返回JSON结果
        result = {
            'status': SUCCESS,
            'data': {
                "category_id": category_id,
                "category_number": category_number,
                "category_name": category_name,
                "similarity": similarity
            }
        }

        return jsonify(result)

    except Exception as e:
        print(f"Error in /classify endpoint: {e}")
        return jsonify({
            "status": FAILED,
            "message": str(e)
        }), 500


if __name__ == '__main__':
    app.run(host='0.0.0.0', port=5000, debug=True)
