

# import os
# import logging
# import pickle
# from PyPDF2 import PdfReader
# from langchain.chains.question_answering import load_qa_chain
# from langchain_openai import OpenAI,ChatOpenAI
# from langchain_openai import OpenAIEmbeddings
# from langchain_community.embeddings import DashScopeEmbeddings
# from langchain_community.callbacks.manager import get_openai_callback
# from langchain.text_splitter import RecursiveCharacterTextSpliter
# from langchain_community.vectorstores import FAISS
# from typing import List,Tuple

# def process_text_with_spliter(text:str,page_numbers: List[int],save_path:str=None) ->FAISS:
#     """
#         处理文本并创建向量存储
#         参数：
#             text：提取的文本内容
#             page_number: 每行文本对应的页码列表
#             save_path: 可选，保存向量数据库的路径
#         返回：
#             knowledgeBase： 基于FAISS的向量存储对象
#     """
#     # 创建文本分割器（递归字符文本分割器），用于将长文本分割成小块
#     text_splitter=RecursiveCharacterTextSplitter(
#         spearators =["\n\n","\n","."," ",""],
#         chunk_size = 512, #切片大小512个字符，建议值
#         chunk_overlap = 128, #重叠部分128个字符，chunk_size的10%，20%
#         length_function=len, 
#     )
#     #分割文本
#     chunks = text_splitter.split_text(text)
#     #loggings.debug(f"Text split info {len(chunks)} chunks")
#     print(f"文本被分割成{len(chunks)}个块")
#     #创建嵌入模型，OpenAI嵌入模型，配置环境变量OPENAI_API_KEY
#     #embeddings = OpenAIEmbeddings()
#     #调用阿里百炼平台文本嵌入模型，配置环境变量DASHSCOPE_API_KEY
#     embeddings = DashScopeEmbeddings(
#         model = "text-embedding-v4"
#     )
#     # 从文本块创建知识库
#     knowledgeBase= FAISS.from_texts(chunks,embeddings)
#     print("已从文本块创建知识库...")
#     page_info = {chunk:page_numbers[i] for i,chunk in enumerate(chunks)}
#     knowledgeBase.page_info = page_info
#     # 如果提供了保存路径，则保存向量数据和页码信息
#     if save_path:
#         #确保目录存在
#         os.makedirs(sava_path,exist_ok=True)
#         #保存FAISS向量数据库
#         knowledgeBase.save_local(save_path)
#         print(f"向量数据库已保存到：{save_path}")
#         #保存页码信息到同一目录
#         with open(os.path.join(save_path,"page_info.pk1"),"wb") as f:
#             pikle.dump(page_info,f)
#         print(f"页码信息已保存到：{os.path.join(save_path,'page_info.pk1')}")
#     return knowledgeBase

###########################################################################################################################################
import os
from langchain_community.document_loaders import TextLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.vectorstores import FAISS
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.chains import RetrievalQA
# from langchain.llms import OpenAI  # 可以换成本地 vllm
from langchain.chains import RetrievalQA
from langchain.llms import HuggingFacePipeline
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline

def process_text_with_spliter(docs):
    # 2. 拆分文本（避免太长）
    text_splitter = RecursiveCharacterTextSplitter(
        separators =["\n\n","\n","."," ",""],
        chunk_size = 512, #切片大小512个字符，建议值
        chunk_overlap = 128, #重叠部分128个字符，chunk_size的10%，20%
        length_function=len,
    )
    docs = text_splitter.split_documents(documents)

    # 3. 向量化（这里用 BGE-M3 模型作为 embedding）
    embeddings = HuggingFaceEmbeddings(
        #model_name="BAAI/bge-m3",   # 本地模型
        model_name="/data/data_ldf/models/embeddings/models--BAAI--bge-m3/snapshots/5617a9f61b028005a4858fdac845db406aefb181", #本地模型，绝对路径
        model_kwargs={'device': 'cuda'}  # GPU加速
    )
    return docs,embeddings
def create_vectorstore(docs,embeddings,save_path):
    # 4. 建立向量数据库
    vectorstore = FAISS.from_documents(docs, embeddings)
    if save_path:
        #确保目录存在
        os.makedirs(save_path,exist_ok=True)
        #保存FAISS向量数据库
        vectorstore.save_local(save_path)
        print(f"向量数据库已保存到：{save_path}")

    # 5. 构建检索器
    retriever = vectorstore.as_retriever(search_kwargs={"k": 1})
    return retriever

# 6. 构建 RAG QA Chain
# 如果你用 vllm，本地加载模型替换 OpenAI
def llm_init():
        # 4. 加载本地 LLM（vLLM）
    model_path = "/data/data_ldf/models/models--Qwen--Qwen2.5-3B/snapshots/3aab1f1954e9cc14eb9509a215f9e5ca08227a9b"  # 本地模型路径
    tokenizer = AutoTokenizer.from_pretrained(model_path)
    model = AutoModelForCausalLM.from_pretrained(
        model_path,
        device_map="auto",  # 自动映射 GPU
        torch_dtype="auto"
    )

    # 用 vLLM 的 pipeline
    hf_pipeline = pipeline(
        task="text-generation",
        model=model,
        tokenizer=tokenizer,
        max_new_tokens=1024,
        temperature=1e-5
    )

    llm = HuggingFacePipeline(pipeline=hf_pipeline)
    return llm
    # -------------------------------
# def create_qa(llm,retriever):
#     qa_chain = RetrievalQA.from_chain_type(
#     llm=llm,
#     retriever=retriever,
#     return_source_documents=True
#     )      
#     # # 5. 构建 RAG QA Chain
#     # qa_chain = RetrievalQA.from_chain_type(
#     #     llm=llm,
#     #     retriever=retriever,
#     #     return_source_documents=True
#     # )
#     # llm = OpenAI(temperature=0)  

#     # qa_chain = RetrievalQA.from_chain_type(
#     #     llm=llm,
#     #     retriever=retriever,
#     #     return_source_documents=True
#     # )
#     # result = qa_chain.invoke(query)
#     # return result


if __name__=="__main__":
    # 1. 读取 MinerU 生成的 md 文件
    loader = TextLoader("/data/data_ldf/doc/turnpdf/Linux_centos7.4/auto/Linux_centos7.4.md", encoding="utf-8")
    documents = loader.load()
    docs,embeddings=process_text_with_spliter(documents)
    save_path="/data/data_ldf/faissdb"
    # 7. 提问
    query = "如何实现网卡绑定"
    # create_vectorstore(docs,embeddings,save_path)
    retriever=create_vectorstore(docs,embeddings,save_path)
    llm=llm_init()

    # # print("答案：", result["result"])
    # # print("\n参考内容：")
    # # for doc in result["source_documents"]:
    # #     print(doc.metadata, doc.page_content[:200], "...")
    # -------------------------------
    # 5. 构建 RAG QA Chain
    qa_chain = RetrievalQA.from_chain_type(
        llm=llm,
        retriever=retriever,
        return_source_documents=True
    )

    # -------------------------------
    # 6. 查询
    query = "CentOS7.4 的网络配置方式有哪些？"
    result = qa_chain.invoke(query)

    print("答案：", result["result"])
    print("\n参考内容：")
    for doc in result["source_documents"]:
        print(doc.metadata,"======================\n",doc.page_content, "...")
################################################################################################################################

