#https://zhuanlan.zhihu.com/p/17675153472
import os
 
#from langchain.chat_models import ChatDashScope
#from langchain.llms import Tongyi
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain_community.llms import Tongyi
from langchain_community.document_loaders import Docx2txtLoader,TextLoader
from langchain_community.document_loaders import PyPDFLoader
from langchain.chains import SimpleSequentialChain
from langchain.prompts import ChatPromptTemplate

from langchain_community.document_loaders import DirectoryLoader
from common import hztool


from langchain_community.vectorstores import Chroma 
from langchain.chains import RetrievalQA
from langchain.prompts import PromptTemplate
from langchain.text_splitter import CharacterTextSplitter
from langchain.document_loaders import TextLoader
from langchain_community.embeddings.huggingface import HuggingFaceEmbeddings 
hf = HuggingFaceEmbeddings(
        model_name='C:/Users/Administrator/.cache/modelscope/hub/models/Jerry0/text2vec-base-chinese' 
    )
 
llm = Tongyi()
def xsplit(loader):
    documents = loader.load()

    # 将文档分割为更小的片段以适配向量化
    text_splitter = CharacterTextSplitter( separators=["\n\n", "\n", "。", "."],chunk_size=2000, chunk_overlap=50)

    split_docs = text_splitter.split_documents(documents)
    _data,ids,metadatas= hztool.chunk2dbcode(split_docs)
    return split_docs,ids,metadatas,_data

def loadText(url,encoding="gbk"):
 
    #loader = TextLoader("d:/beifen/aitest2.txt")
    loader = TextLoader(url,encoding=encoding)
    split_docs,ids,metadatas,_data = xsplit(loader)
 
    vectorstore = Chroma.from_documents(split_docs,ids=ids,embedding=hf,  persist_directory="D:/ai/adbdisk3")
    return vectorstore

def loadDoc(url):
 
    #loader = TextLoader("d:/beifen/aitest2.txt")
    loader = Docx2txtLoader(url)
    split_docs,ids,metadatas,_data = xsplit(loader)
    vectorstore = Chroma.from_documents(split_docs,ids=ids,embedding=hf,  persist_directory="D:/ai/adbdisk3")
    return vectorstore

def loadPdf(url):
    loader = PyPDFLoader(url)     
    split_docs,ids,metadatas,_data = xsplit(loader)
    vectorstore = Chroma.from_documents(split_docs,ids=ids,embedding=hf,  persist_directory="D:/ai/adbdisk3")
    return vectorstore

def loaddir(path,glob="**/*.pdf"):

    loader = DirectoryLoader(path=path, glob=glob, show_progress=True) 
    split_docs,ids,metadatas,_data = xsplit(loader)
    vectorstore = Chroma.from_documents(split_docs,ids=ids,embedding=hf,  persist_directory="D:/ai/adbdisk3")
    return vectorstore


def dirload(path):
    ls=hztool.pathlist(path)
    for fp in ls :
        try:
            print(fp)
            loadall(fp)
        except Exception  as e:
            print(e)



def getdb():
    vectorstore = Chroma(persist_directory="D:/ai/adbdisk3", embedding_function=hf)
    return vectorstore


def loadall(url):
    if hztool.isDoc(url):
        loadDoc(url)
    elif hztool.isPdf(url):
        loadPdf(url)
    elif hztool.isTxt(url):
        loadText(url)
    else:
        print("不支持的文件格式 "+url)
        

def test():
    loaddir("d:/beifen")
    vectorstore=getdb()
    retriever = vectorstore.as_retriever(search_kwargs={"k": 3})  # 检索相关性最高的3个片段

    # 4. 定义提示模板
    prompt_template = PromptTemplate(
        input_variables=["context", "question"],
        template="""
        You are an intelligent assistant. Based on the following context:
        {context}
        Please answer the question:
        {question}
        """
    )

    # 5. 构建 RAG 流程
    qa_chain = RetrievalQA.from_chain_type(
        llm=llm,
        retriever=retriever,
        return_source_documents=True,
        chain_type_kwargs={"prompt": prompt_template}
    )

    # 6. 查询
    query = "What is the main benefit of using RAG with LLMs?"
    result = qa_chain({"query": query})

    # 输出结果
    print("Answer:")
    print(result["result"])

    # 如果需要查看检索到的上下文
    print("\\nSource Documents:")
    for doc in result["source_documents"]:
        print(doc.page_content)







 

