import os
import time
# 加载提取文档的包
from langchain_community.document_loaders import Docx2txtLoader, PyPDFLoader, UnstructuredExcelLoader,TextLoader
# 加载分割文档的包
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_huggingface import HuggingFaceEmbeddings
# 加载向量数据库的包
from langchain_chroma import Chroma
# 多重检索的包
from langchain.retrievers.multi_query import MultiQueryRetriever   
# 引入chat_model包
import torch
from langchain_huggingface import HuggingFacePipeline
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# 引入prompttemplate包
from langchain_core.prompts import ChatPromptTemplate,PromptTemplate
from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain


# # 设置环境变量以指定使用的GPU编号
# os.environ["CUDA_VISIBLE_DEVICES"] = "1"  # 使用GPU 1

class RAG():
    def __init__(self, doc='/changzheng/PracticalTraining/lyj/lingdongai/models/data/datatoday.docx'):    # 此处输入检索文件的地址
        self.doc = doc
        self.split_texts = []
        self.db_path = "./chroma_langchain_db"
        
        # # 使用本地Hugging Face模型作为LLM
        # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        # model_id = "models/models/Qwen2___5-3B-Instruct"  # 指定模型ID
        # tokenizer = AutoTokenizer.from_pretrained(model_id)
        # model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=torch.float16).to(device)
        
        # # 在创建pipeline时指定device
        # pipe = pipeline(
        #     "text-generation",
        #     model=model,
        #     tokenizer=tokenizer,
        #     max_new_tokens=500,
        #     device=0 if torch.cuda.is_available() else -1  # 使用GPU时device=0，否则为-1
        # )
        
        # self.llm = HuggingFacePipeline(pipeline=pipe)
        #构建embedding模型
        model_path = "/changzheng/PracticalTraining/lyj/lingdongai/models/rag_model"
        self.embeddings = HuggingFaceEmbeddings(model_name=model_path)

        self.template = [
    ("system", "你是医学领域的专家,你会根据提供的{context}内容来回答用户有关于科室和疾病的问题。"),
    
]
        self.prompt = ChatPromptTemplate.from_messages(self.template)
    # 从文件中提取内容，并返回
    def getFile(self):
        doc = self.doc

        # 定义加载器字典，可以加载哪些类型的文件
        loaders = {
            "docx": Docx2txtLoader,
            "pdf": PyPDFLoader,
            "xlsx": UnstructuredExcelLoader,
            "txt": TextLoader,
        }
        
        # 读取文件的后缀名
        file_extension = doc.split(".")[-1]
        

        # 根据后缀名，选择对应的加载器
        loader_class = loaders.get(file_extension)
        #查看加载器的类型
        # print(loader_class)
        # 加载其他文件类型并提取内容
        if loader_class:
            try:
                loader = loader_class(doc)
                text = loader.load()
                return text
            except Exception as e: 
                print(f"Error loading {file_extension} file: {e}") 
        else:
             print(f"Unsupported file extension: {file_extension}")
             return None 

    # 分割文档
    def splitDoc(self):
        # 获取文件内容
        full_text = self.getFile()
        if full_text is None:
            print("Failed to load document.")
            return None
        
        # 创建一个递归字符文本分割器
        splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=50)
        # 将文本分割成多个片段
        split_texts = splitter.split_documents(full_text)
        self.split_texts = split_texts
        return self.split_texts

     #向量化与向量存储
    def embeddingAndVectorDB(self):
        # 分割文件为chunks
        split_texts = self.splitDoc()

        db =Chroma.from_documents(
            documents = split_texts,
            embedding = self.embeddings,
            persist_directory="./chroma_langchain_db", 
        )
        return db


    # ... 其他代码 ...

    # # 检查GPU是否可用并打印状态
    # print("Is GPU available:", torch.cuda.is_available())


    #用余弦相似度检索相关的文本块，并生成prompt,传给推理中的llm
    def askAndFindFiles(self, question):
        if os.path.exists(self.db_path):
            db = Chroma(persist_directory="./chroma_langchain_db", embedding_function=self.embeddings)
        else:
            db = self.embeddingAndVectorDB()
            if db is None:
                print("Failed to create vector database.")
                return None
        retriever = db.as_retriever(search_type="similarity_score_threshold",search_kwargs={"score_threshold":.3,"k":1})
        results = retriever.invoke(question)

        # 拼接results中的context为字符串
        _content = ""
        for i in results:
            _content += i.page_content
        
        cot = (
            f"你是一个有逻辑推理能力的助手。请分步骤回答以下问题：\n"
            f"问题：{question}\n"
            "第一步：分析问题的主要要素。\n"
            "第二步：提出假设并验证。\n"
            "第三步：总结推理结果并提供答案。"
        )
        

        finally_prompt = f'你是医学领域的专家,你会根据提供的{_content}内容来回答用户有关于科室和疾病的问题。{cot}'
        return finally_prompt

    
if __name__ == '__main__':
    # 创建RAG实例对象
    chat = RAG()
    question = input('请输入您的问题：')       # 我生病了, 皮肤病是什么  直接调用item.info里的问题就行
    chat.askAndFindFiles(question)
