# -*- coding: utf-8 -*-
import os
import re
from langchain_community.vectorstores import Milvus
from langchain_community.document_loaders import TextLoader
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.prompts import PromptTemplate
from langchain_community.llms import VLLMOpenAI
from langchain.chains import RetrievalQA

# 配置参数
MODEL_PATH = "/path/to/Qwen3-32B"  # 替换为实际模型路径
DOC_PATH = "./ht_knowledge.md"  # 知识库文件路径
MILVUS_HOST = "localhost"
MILVUS_PORT = "19530"
EMBEDDING_MODEL = "moka-ai/m3e-base"  # 中文Embedding模型

def setup_milvus_vectorstore():
    """创建Milvus向量数据库并加载文档"""
    
    # 加载并预处理文档
    loader = TextLoader(DOC_PATH, encoding="utf-8")
    raw_text = loader.load()
    
    # 清洗Markdown格式
    def clean_md_content(text):
        return re.sub(r"\!\[.*?\]\(.*?\)|\#{1,6}\s*|[-*]{2,3}", "", text)
    
    for doc in raw_text:
        doc.page_content = clean_md_content(doc.page_content)
    
    # 分割文本
    text_splitter = RecursiveCharacterTextSplitter(
        chunk_size=500,       # 块大小
        chunk_overlap=100,    # 重叠大小
        separators=["\n\n", "\n", "。", "？", "！", "；", "…"]
    )
    documents = text_splitter.split_documents(raw_text)
    print(f"文档分割为 {len(documents)} 个片段")
    
    # 创建Embedding模型
    embeddings = HuggingFaceEmbeddings(
        model_name=EMBEDDING_MODEL,
        model_kwargs={"device": "cuda"}  # 使用GPU加速
    )
    
    # 创建Milvus向量库
    vector_db = Milvus.from_documents(
        documents,
        embeddings,
        connection_args={"host": MILVUS_HOST, "port": MILVUS_PORT},
        collection_name="ht_knowledge",      # 集合名称
        drop_old=True                         # 覆盖已有数据
    )
    return vector_db

def setup_qwen_llm():
    """部署Qwen-32B模型"""
    
    return VLLMOpenAI(
        openai_api_key="EMPTY",   # vllm需要非空值
        openai_api_base="http://localhost:8000/v1",  # vllm默认端口
        model_name=MODEL_PATH,    # 本地模型路径
        max_tokens=2048,          # 最大输出长度
        top_p=0.9,                # 采样参数
        temperature=0.7,          # 温度参数
        presence_penalty=1.1,     # 主题重复惩罚
        stop=["<|endoftext|>"]    # 停止token
    )

def run_rag_qa(vector_db):
    """运行RAG问答系统"""
    
    # 定制化提示模板
    prompt_template = """
    [系统]
    你是一个专业的知识助手，请根据提供的上下文信息回答问题。
    如果问题无法从上下文得到答案，请如实告知不知道。
    
    [上下文]
    {context}
    
    [问题]
    {question}
    
    [回答要求]
    1. 答案简洁准确
    2. 重要数据需标注来源片段
    3. 不要编造不存在的信息
    """
    
    PROMPT = PromptTemplate(
        template=prompt_template,
        input_variables=["context", "question"]
    )
    
    # 创建检索链
    qa_chain = RetrievalQA.from_chain_type(
        llm=setup_qwen_llm(),
        chain_type="stuff",
        retriever=vector_db.as_retriever(
            search_kwargs={"k": 5}  # 返回5个相关片段
        ),
        return_source_documents=True,
        chain_type_kwargs={"prompt": PROMPT}
    )
    
    # 交互式问答
    print("\nRAG系统已启动，输入'exit'退出")
    while True:
        query = input("\n用户问题: ")
        if query.lower() == "exit":
            break
            
        # 执行检索和生成
        result = qa_chain.invoke({"query": query})
        
        # 处理结果
        answer = result["result"].strip()
        sources = [doc.metadata.get("source", "") for doc in result["source_documents"]]
        
        print(f"\n回答: {answer}")
        print(f"\n来源片段: {list(set(sources))}")

if __name__ == "__main__":
    # 步骤1: 启动vllm服务（需单独运行）
    vllm_cmd = f"python -m vllm.entrypoints.openai.api_server \
        --model {MODEL_PATH} \
        --tokenizer {MODEL_PATH} \
        --trust-remote-code \
        --port 8000"
    print(f"请先启动vllm服务:\n{vllm_cmd}\n")
    
    # 步骤2: 建立向量库
    print("正在创建Milvus向量库...")
    vector_store = setup_milvus_vectorstore()
    print("向量库创建完成!\n")
    
    # 步骤3: 运行问答系统
    run_rag_qa(vector_store)