from langchain_community.llms.ollama import Ollama
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.documents import Document
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.embeddings import OllamaEmbeddings
from langchain_chroma import Chroma
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain.chains.retrieval import create_retrieval_chain
from duckduckgo_search import DDGS
import fitz

class langchainLLM:
    def __init__(self,model = "deepseek-r1:8b",init_prompt = ' '):
        self.output_parser = StrOutputParser()
        self.llm = Ollama(model = model)
        self.embeddingsModel = OllamaEmbeddings(model = model)
        
        self.prompt = ChatPromptTemplate.from_messages([
            ("system", "你是一个专业的人工智能助手"),
            ("user", f"{init_prompt}"),
        ])

        self.docPrompt = ChatPromptTemplate.from_messages([("system", """请根据提供的上下文: {context} \n\n 回答问题: {input}""")])

        self.chain = self.prompt | self.llm | self.output_parser
        self.documentChain = create_stuff_documents_chain(self.llm, self.docPrompt)
        self.retrieval_chain = None

        self.docs = []
        self.loader = None
        self.textSplitter = RecursiveCharacterTextSplitter()
    
    # 调用大模型
    def callLLM(self,prompt,isStream = False,combineDocuments = False):
        if(combineDocuments):
            if(self.Loader == None):
                return None
            else:
                if(isStream):
                    ret = self.retrieval_chain.stream({"input":prompt})    # 调用llm, 流式输出
                    return ret
                else:
                    ans = self.retrieval_chain.invoke(prompt)
                    return ans
        else:
            if(isStream):
                ret = self.chain.stream({"input":prompt})    # 调用llm, 流式输出
                return ret
            else:
                ans = self.chain.invoke(prompt)
                return ans
        
    # 提取PDF文件中的文本
    def extractTextFromPdf(self,file_path) -> str:
        pdf_document = fitz.open(file_path)
        text = ""
        for page_num in range(pdf_document.page_count):
            page = pdf_document.load_page(page_num)
            text += page.get_text("text")
        return text
    
    # 联网获取文本
    def searchOnline(self,query) -> list:
        try:
            results = DDGS(proxy = 'http://192.168.133.14:8080').text(query, max_results = 10)
            print(results)
        except Exception as e:
            if(type(e) == RuntimeError):
                print(f"请求超时！{e}")
            else:
                print(f'联网获取文本时出现异常:{e}')
                results = []
        finally:
            return results

    def loadDocument(self,file_path:str = '',search_online = False,query:str = ''):
        try:
            if(search_online):
                
                self.docs.extend([Document(str(x)) for x in self.searchOnline(query)])
            else:
                self.docs.append(Document(page_content = self.extractTextFromPdf(file_path)))
            # self.docs = [Document(page_content = self.extractTextFromPdf(file_path))]
            self.documents = self.textSplitter.split_documents(self.docs)       # 分割文档里的文字内容

            # 向量化文档里的文字,把文档里的文字转化为高维向量
            self.vectorStoreDB = Chroma.from_documents(self.docs , self.embeddingsModel)
            print(self.vectorStoreDB)

            self.retriever = self.vectorStoreDB.as_retriever()                  # 创建检索器
            self.retrieval_chain = create_retrieval_chain(self.retriever, self.documentChain)    # 创建检索用的chain

        except Exception as e:
            print(f'加载文档时出错:{e}')
        
    

if __name__ == "__main__":
    # langchain_llm = langchainLLM()
    # langchain_llm.loadDocument(r"E:\program\QtProject\chatbot_2025a1\pdf\testpdf0.pdf")
    
    query = "0721是什么意思？"
    results = DDGS(proxy = 'http://192.168.133.14:8080').text(query, max_results = 5)
    print(results)

    '''
    ret = langchain_llm.retrieval_chain.stream({"input": "这个pdf讲了什么内容"})

    for chunk in ret:
        print(chunk.get('answer'))
        print(type(chunk))

    print(langchain_llm.retrieval_chain)
    '''
