# -*- coding: utf-8 -*-
import os
from langchain_text_splitters import RecursiveCharacterTextSplitter
import pandas as pd
from pymilvus import FieldSchema, CollectionSchema, DataType, MilvusClient
from pymilvus.milvus_client import IndexParams
from langchain_openai import ChatOpenAI
from sentence_transformers import SentenceTransformer, CrossEncoder  
from langchain_community.retrievers import BM25Retriever  
from functools import lru_cache   
from langchain.prompts import PromptTemplate  
from langchain.chains import LLMChain  
import torch
import numpy as np


client = MilvusClient(uri="http://localhost:19530")
root=os.path.dirname(__file__)

os.environ["OPENAI_API_KEY"] = "none"

# 向量化模型的网络路径
# API_URL = "https://px-sg1.matpool.com:26998/embed"


# LLM for query rewrite (占位，填你的 API)
chat_model = ChatOpenAI(
        api_key="sk-b07d91eefa82450d90448d549e87d4b7",  # API key
        base_url="https://api.deepseek.com/v1",
        model="deepseek-chat",
        timeout=30,
        max_retries=3
    )


# Reranker (全局加载，MiniLM 轻量)
reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2')


# Query rewrite prompt
rewrite_prompt = PromptTemplate(
    input_variables=["query"],
    template="""将以下查询重写为中英双语的更全面的搜索查询，添加同义词、相关术语，保持原意。用于网络安全知识库检索。只返回重写内容，除此之外的文本都不要返回。
原查询: {query}
重写查询: """
)

# 重写链
rewrite_chain = LLMChain(llm=chat_model, prompt=rewrite_prompt)


# 加载 Parquet 数据 
parquet_path = os.path.join(root, "../data/security_embeddings.parquet")

# 文本分割模型路径
# segmentation_model = os.path.join(root,'../document-segmentation')

# def split_text(path):
#     df=pd.read_parquet(path)
#     # p=pipeline(
#     #     task=Tasks.document_segmentation,
#     #     model=segmentation_model,
#     #     device='cpu'
#     # )
#     # 使用langchain自带的文本分割
#     splitter = RecursiveCharacterTextSplitter(
#         chunk_size=2000,      # 每个 chunk ~2000 字符，可调 (太大细节丢，太小噪声多)
#         chunk_overlap=300,    # 重叠 300 字符，保上下文连贯
#         length_function=len,  # 按字符长度切
#         separators=["\n\n", "\n", ". ", "。", "? ", "！", " ", ""],  # 优先按段/句切，支持中英标点
#     )
#     all_chunks=[]
#     for idx in range(len(df)):
#         text=df.iloc[idx]['text']
#         category=df.iloc[idx]['category']
#         # res=p(text)[OutputKeys.TEXT]
#         # chunks = [chunk.strip() for chunk in res.split('\n') if chunk.strip()]  # Clean up empty chunks
#         chunks = splitter.split_text(text)  # 智能切割，返回 list[str]
#         for sub_idx, chunk in enumerate(chunks):
#             all_chunks.append({
#                 'id': f"{idx}_{sub_idx}",
#                 'text': chunk.strip(),
#                 'category': category
#             })
#     return all_chunks




# 加载数据
# Knowledge_path=os.path.join(root,"../data/train.parquet")
# 词嵌入模型
# 使用在线模型，避免本地路径问题
# embedder = SentenceTransformer("sentence-transformers/all-MiniLM-L6-v2")
# embedder = SentenceTransformer(os.path.join(root, "../Qwen3-Embedding-0.6B"))

# 全局 texts BM25(预加载)
# 优化内存使用：延迟加载，只在需要时加载
texts_global = None
def get_texts_global():
    global texts_global
    if texts_global is None:
        df_global = pd.read_parquet(parquet_path)
        texts_global = df_global['text'].tolist()
    return texts_global
# 创建向量数据库并导入数据
def create_embedding_database():
    """
    从 Parquet 加载数据，创建 HNSW 索引。
    """
    print("Milvus数据库功能已禁用")
    return
    # DIMENSION = 1024
    # # 加载 Parquet
    # df = pd.read_parquet(parquet_path)
    # texts = df['text'].tolist()
    # ids = df['orange_id'].tolist()  # 用 orange_id
    # categories = df['category'].tolist()
    # embeddings = np.stack(df['embedding'].values)  # Parquet 有 embedding 列
    # total_entities = len(df)
    # print(f"Loaded {total_entities} entities from Parquet.")

    # # 分批插入 (BATCH_SIZE=5000, 防内存oom)
    # BATCH_SIZE = 5000
    # inserted_count = 0
    # fields = [
    #     FieldSchema(name="id", dtype=DataType.VARCHAR, is_primary=True, auto_id=True, max_length=100),
    #     FieldSchema(name="orange_id", dtype=DataType.VARCHAR, max_length=100),
    #     FieldSchema(name="text", dtype=DataType.VARCHAR, max_length=65535),
    #     FieldSchema(name="category", dtype=DataType.VARCHAR, max_length=100),
    #     FieldSchema(name="embedding", dtype=DataType.FLOAT_VECTOR, dim=DIMENSION)
    # ]
    # schema = CollectionSchema(fields=fields, description="网络安全向量集合")

    # # 创建集合 如果集合已存在则删除
    # if client.has_collection("security_papers"):
    #     client.drop_collection("security_papers")
    # client.create_collection(
    #     collection_name="security_papers",
    #     schema=schema
    # )

    # for i in range(0, total_entities):
    #     # 每次选择最小的批次，防止超序列
    #     # batch_end = min(i + BATCH_SIZE, total_entities)
    #     batch_entities = {
    #         "orange_id": ids[i],
    #         "text": texts[i],
    #         "category": categories[i],
    #         "embedding": embeddings[i].tolist()
    #     }
    #     # 插入数据
    #     res = client.insert(
    #         collection_name="security_papers",
    #         data=batch_entities
    #     )
    #     inserted_count += res['insert_count']
    #     print(f"Inserted batch {i//BATCH_SIZE + 1}: {res['insert_count']} entities (total: {inserted_count})", flush=True)

    # print(f"Inserted {inserted_count} entities into collection 'security_papers'.")

    # # HNSW 索引 (优化召回/速度)
    # index_params = IndexParams()
    # index_params.add_index("embedding","HNSW","hnsw_index",metric_type="COSINE",M=16,efConstruction=200)
    
    # # 创建索引
    # client.create_index(
    #     collection_name="security_papers",
    #     index_params=index_params
    # )
    # print("HNSW index created.")

    # # 加载集合到内存
    # client.load_collection("security_papers")
    # print("Collection loaded to memory.")


# 查询重写
def rewrite_query(query: str) -> str:
    """
    用 LLM 重写查询，提升召回。
    """
    try:
        rewritten = rewrite_chain.run(query)
        print(f"原始查询: {query}\n重写查询: {rewritten.strip()}")
        return rewritten.strip()
    except Exception as e:
        print(f"重写失败: {e}, 使用原始查询。")
        return query


# Hybrid search (向量 + BM25)
def hybrid_search(query: str, top_k: int = 5,):
    """
    Hybrid 检索：BM25 关键词过滤 + Milvus 向量 + rerank。
    - BM25: 精确关键词匹配。
    - 向量: 语义相似。
    - 融合: alpha=0.7 向量 + 0.3 BM25。
    - Rerank: cross-encoder 精排 top-10 → top-k。
    """
    # print("Milvus数据库功能已禁用，hybrid_search返回空结果")
    # return []
    # 1. Query rewrite
    # query = rewrite_query(query)

    # # 2. BM25 检索 (从全文本)
    # # bm25_retriever = BM25Retriever.from_texts(get_texts_global())  # texts_global 是全局 Parquet 文本
    # # bm25_docs = bm25_retriever.get_relevant_documents(query)[:10]  # top-10 候选
    # # bm25_scores = [doc.metadata.get('score', 0.5) for doc in bm25_docs]  # BM25 评分

    # # 3. 向量搜索 (Milvus)
    # with torch.no_grad():  # 显式 no_grad
    #     query_embedding = embedder.encode([query], batch_size=1)[0].tolist()  # batch=1 安全
    # torch.cuda.empty_cache()  # 释 embed 内存
    # search_params = {
    #     "metric_type": "COSINE",
    #     "params": {"ef": 50}  # HNSW 搜索 ef，50 平衡速度/召回
    # }
    # results = client.search(
    #     collection_name="security_papers",
    #     data=[query_embedding],
    #     anns_field="embedding",
    #     search_params=search_params,
    #     limit=10,  # top-10 候选
    #     output_fields=["text", "category"],
    #     # expr=expr
    # )
    # vector_docs = [hit.entity['text'] for hit in results[0]]
    # vector_scores = [hit.score for hit in results[0]]

    # # 4. 融合分数 (alpha * vector + (1-alpha) * bm25)
    # alpha = 0.7
    # fused_docs = []
    # for v_doc, v_score in zip(vector_docs, vector_scores):
    #     # # 简单匹配 BM25 (实际用 doc id 匹配)
    #     # for b_doc, b_score in zip(bm25_docs, bm25_scores):
    #     #     if v_doc[:100] == b_doc.page_content[:100]:  # 粗匹配
    #     #         fused_score = alpha * v_score + (1 - alpha) * b_score
    #     #         fused_docs.append((v_doc, fused_score))
    #     #         break
    #     #     else:
    #     #         # 无匹配 使用向量分数
    #     #         fused_docs.append((v_doc, v_score))
    #     fused_docs.append((v_doc, v_score))

    # # 5. Rerank top-10
    # pairs = [[query, doc] for doc, _ in fused_docs[:10]]
    # if pairs:
    #     rerank_scores = reranker.predict(pairs)
    #     reranked = sorted(zip(fused_docs, rerank_scores), key=lambda x: x[1], reverse=True)[:top_k]
    #     return [{"text": doc, "score": score, "rerank_score": r_score} 
    #             for (doc, score), r_score in reranked]
    # return []

# 缓存搜索
@lru_cache(maxsize=100)  # 缓存 100 查询
def search_similar_text(query, top_k=3):
    """
    优化检索：hybrid + rerank + cache + filter。
    """
    try:
        # result=hybrid_search(query, top_k=top_k)
        # return result
        # 暂时不使用Milvus数据库，直接返回空结果
        print(f"搜索查询: {query} (Milvus数据库已禁用)")
        return []
        
    except Exception as e:
        print(f"搜索错误: {e}")
        return []



if __name__ == "__main__":
    # 在运行前使用docker拉取milvus数据库并运行   在docker桌面端hub页面搜索 milvusdb/milvus
    # 如果本地没有milvus数据库，请先运行 create_embedding_database() 创建数据库
    # create_embedding_database()
    
    query = "威胁模型是什么？"
    print(search_similar_text(query))
    

