"aim: from naive RAG to agentic RAG ,and evaluate RAG's performance,输入文档为pdf word txt md execl"
import json

"""一.Naive RAG搭建
(1)文档的构建
每个不同的类型，每个类型的文件包括不同专业的数据集当前仅包括pdf word excel

(1) 文档处理（文档加载器）
from langchain.documents_loaders
"""
"加载werd只是加载文本，表格是每个格子读取数据，没有前后关系，图片直接跳过"
# from langchain_community.document_loaders.word_document import Docx2txtLoader
# loader = Docx2txtLoader(file_path="./word/分布式集群通信.docx")
# data = loader.load()
# print(data)
# from langchain_community.document_loaders import TextLoader
# loader = TextLoader(file_path="./txt/README.txt",encoding="utf-8")
# data = loader.load()
# print(data)
# "pdf自动跳过图片，可能还会有视频，音频呢"
from langchain_community.document_loaders import PyPDFLoader
loader = PyPDFLoader(file_path="./pdf/muxi/曦云系列_通用计算GPU_mx-smi使用手册_CN_V05.pdf")
data = loader.load_and_split()
from langchain.text_splitter import CharacterTextSplitter
text_spliter = CharacterTextSplitter(separator = "\n\n",chunk_size=1024,chunk_overlap=100)
data = text_spliter.split_documents(data)
# 文本存储（文本存储的时候除了向量存储，还有元数据的提取）
from langchain_community.embeddings import HuggingFaceEmbeddings,SentenceTransformerEmbeddings
import requests
import json
from typing import List, Optional, Any, Dict
embeddings =  HuggingFaceEmbeddings(model_name="../text2vec-base-chinese")
# print(embeddings)
#向量检索（检索的方式是什么？怎么判断检索的数据很准确呢）
from langchain_community.vectorstores import FAISS
db = FAISS.from_documents(data, embeddings)
retriever = db.as_retriever(search_type="mmr")
query = "什么是mx-smi工具"
docs = retriever.get_relevant_documents(query)
print(len(docs))
print(docs[0].page_content)
text = []
for doc in docs:
    text.append(doc.page_content)
# print(len(text))
# print(str(text))
# def vllm():
#     pass

# from langchain.text_splitter import NLTKTextSplitter
# # text_splitter = CharacterTextSplitter.from_tiktoken_encoder(
# #     # separator = "\n",
# #     chunk_size = 1000,
# #     chunk_overlap  = 200
# #
# # )
# text_splitter = NLTKTextSplitter(
#     # separator = "\n",
#     chunk_size = 1000,
#     chunk_overlap  = 500
#
# )
# data = text_splitter.split_documents(data)
# print(len(data))
# for i in range(len(data)):
#     # print(type(data[i]))
#     print(data[i].page_content)
#     print(len(data[i].page_content))
    # a = json.load(data[i])
    # print(len(a))
# "excel表格数据"
# from langchain_community.document_loaders import UnstructuredFileLoader
# loader = UnstructuredFileLoader(file_path="./excel/muxi/智凯100全量测试数据.xls")
# data = loader.load()
# print(data)
# from langchain_core.document_loaders import BaseLoader
# from langchain_core.documents import Document
# import pandas as pd
#
#自定义加载excel
# from typing import AsyncIterator, Iterator
# import pandas as pd
# from langchain_core.document_loaders import BaseLoader
# from langchain_core.documents import Document
#
#
# class ExcelDocumentLoader(BaseLoader):
#     """An example document loader that reads an Excel file."""
#
#     def __init__(self, file_path: str, sheet_name: str = 0) -> None:
#         """Initialize the loader with a file path.
#
#         Args:
#             file_path: The path to the Excel file to load.
#             sheet_name: The name of the sheet to load. Defaults to 0 (the first sheet).
#         """
#         self.file_path = file_path
#         self.sheet_name = sheet_name
#
#     def lazy_load(self) -> Iterator[Document]:
#         """A lazy loader that reads an Excel file.
#
#         When you're implementing lazy load methods, you should use a generator
#         to yield documents one by one.
#         """
#         # 读取Excel文件
#         df = pd.read_excel(self.file_path, sheet_name=self.sheet_name)
#         # 转换数据框为文档
#         for index, row in df.iterrows():
#             yield Document(
#                 page_content=row.to_string(),
#                 metadata={"index": index, "source": self.file_path}
#             )
#
#     async def alazy_load(self) -> AsyncIterator[Document]:
#         """An async lazy loader that reads an Excel file."""
#         # 读取Excel文件
#         df = pd.read_excel(self.file_path, sheet_name=self.sheet_name)
#         # 转换数据框为文档
#         for index, row in df.iterrows():
#             yield Document(
#                 page_content=row.to_string(),
#                 metadata={"index": index, "source": self.file_path}
#             )
#
#
# # 示例用法
# if __name__ == "__main__":
#
#     loader = ExcelDocumentLoader("./excel/35Cvs48C.xlsx")
#     documents = list(loader.lazy_load())
#     for doc in documents:
#         print(doc)
#llama__index加载数据https://docs.llamaindex.ai/en/stable/understanding/loading/loading/ 构建llamaindex的管道
# from llama_index.core import SimpleDirectoryReader
# #
# # # 加载指定目录中的文件
# documents = SimpleDirectoryReader("./txt/").load_data()


#
# # 获取特定文件的所有数据
# target_file_name = 'vllm大模型推理demo (1).pdf'
# target_file_content = []
# #
# from llama_index.core.node_parser import SentenceSplitter
# parser = SentenceSplitter(
#     chunk_size=4000,
#     chunk_overlap=200,
#     separator="/n",  # 可选自定义分隔符
# )
# node = parser.get_nodes_from_documents(documents)
# print(len(node))
# print(node[1].text)
# print(documents)
# for doc in documents:
#     if doc.metadata.get('file_name') == target_file_name:
#         nodes = parser.get_nodes_from_documents([doc])
#         print(len(nodes))
#         print(nodes[0].text)
# 合并内容
# full_content = '\n'.join(target_file_content)
#
# # 打印内容
# print(full_content)
#存储
# from llama_index.core import SimpleDirectoryReader, VectorStoreIndex
# from llama_index.core.node_parser import SentenceSplitter
# from llama_index.embeddings.huggingface import HuggingFaceEmbedding
# from llama_index.vector_stores.faiss import FaissVectorStore
# import faiss
# import os

# # 确保目录存在
# os.makedirs("./temp", exist_ok=True)

# # 1. 文档加载
# reader = SimpleDirectoryReader(
#     input_dir="./temp",
#     required_exts=[".txt", ".pdf", ".docx"],  # 指定支持的文件类型
#     recursive=True,  # 递归读取子目录
#     exclude_hidden=True  # 排除隐藏文件
# )
# documents = reader.load_data()
# print(f"加载了 {len(documents)} 个文档")

# # 2. 文档分割
# splitter = SentenceSplitter(
#     chunk_size=2000,
#     chunk_overlap=500,
#     separator=" ",  # 添加空格作为分隔符
#     paragraph_separator="\n\n",  # 段落分隔符
#     chunking_tokenizer_fn=None,  # 使用默认分词
#     secondary_chunking_regex="[^。？！]*[。？！]?",  # 中文句子分割
#     include_metadata=True  # 包含元数据
# )
# nodes = splitter.get_nodes_from_documents(documents)
# print(f"分割后得到 {len(nodes)} 个节点")

# # 3. 文本嵌入
# embed_model = HuggingFaceEmbedding(
#     model_name="../text2vec-base-chinese",
#     # 推荐使用更先进的模型（如果可用）:
#     # model_name="BAAI/bge-small-zh-v1.5",
#     # model_name="GanymedeNil/text2vec-large-chinese",
#     model_kwargs={"device":  "cpu"},  # 自动检测GPU
#     # encode_kwargs={
#     #     "normalize_embeddings": True,  # 标准化嵌入向量
#     #     "batch_size": 32,  # 批处理大小
#     #     "show_progress_bar": True  # 显示进度条
#     # }
# )

# # 4. 向量存储和索引创建
# dimension = 768  # 自动获取嵌入维度
# print(f"嵌入维度: {dimension}")

# # 使用更高效的索引类型
# faiss_index = faiss.IndexFlatIP(dimension)  # 使用内积代替L2距离
# # 或者使用更高级的索引:
# # faiss_index = faiss.IndexHNSWFlat(dimension, 32)  # 32是HNSW参数

# vector_store = FaissVectorStore(faiss_index=faiss_index)

# # 创建索引
# index = VectorStoreIndex(
#     nodes=nodes,
#     embed_model=embed_model,
#     vector_store=vector_store,
#     insert_batch_size=64  # 批量插入优化内存使用
# )
# retriever = index.as_retriever(
#             similarity_top_k=2,
#             verbose=True
#         )
# results = retriever.retrieve("什么是mx-smi")
# # print(results)
# # 5. 保存索引（可选）
#         # 提取相关内容
# relevant_docs = []
# for result in results:
#     print(result.text)
#     relevant_docs.append(result.text)
    # relevant_docs.append({
    #     "text": result.node.text,
    #     "metadata": result.node.metadata,
    #     "similarity": result.score
    # })
# print(str(relevant_docs))

# "********************************"
prompt_templete = (
    "你是一个专业的知识助手，需要基于以下上下文信息回答问题。\n"
    "-------------------------------------\n"
    "上下文信息：\n"
    "{context}\n"
    "-------------------------------------\n"
    "用户问题：{query}\n\n"
    "要求：\n"
    "1. 仅使用提供的上下文信息回答问题，不要编造信息\n"
    "2. 如果上下文不包含答案，明确回复'根据提供的信息，无法回答该问题'\n"
    "3. 回答要简洁专业，使用与问题相同的语言\n"
    "4. 对于技术术语，保持原文表述\n\n"
    "请根据以上要求回答问题："
)

input_text = prompt_templete.replace("context",str(text)).replace("query","如何查询显卡的状态")
print(input_text)
import requests
def extract_after_think(text):
    """提取'think'或'thinking'之后的内容"""
    # 统一转为小写处理
    lower_text = text.lower()
    
    # 查找'think'的位置
    think_pos = lower_text.find('</think>')
    if think_pos == -1:
        return "未找到'think'相关内容"
    text = text[think_pos:]
    text = text[8:]
    # 提取从'think'开始的所有内容
    return text
def llm(input_text: str):
    url = "http://192.168.235.94:1096/v1/chat/completions"
    headers = {
        "Accept": "application/json",
         "Context-type": "application/json"
    }
    data = {
        "model": "llama_65b",
        "messages": [{"role":"user","content":input_text}],
        "max_tokens": 8192,
        "stream": False,
    }
    response = requests.post(url=url,headers=headers,json=data)
    print(response.status_code)
    if response.status_code == 200 :
        # print(response.json)
        result = response.json()["choices"][0]["message"]["content"]
        result = extract_after_think(result)
    return result
result_text = llm(input_text)
print(result_text)