# pip install --upgrade 'volcengine-python-sdk[ark]'
# pip install langchain==0.3.7
# pip install langchain_community==0.3.7
# pip install langchain_openai
# pip install unstructured
# pip install unstructured[pdf]
# pip install unstructured[docx]
# pip install python-magic
# pip install python-magic-bin
# 如果您使用linux系统，需要使用apt install libmagic1
# https://github.com/ahupp/python-magic/issues/313
# pip install numpy==1.26.3 


# 1.Load 导入Document Loaders
import os
from langchain_community.document_loaders import DirectoryLoader
from typing import Dict, List, Any
from langchain.embeddings.base import Embeddings
from langchain.pydantic_v1 import BaseModel
from volcenginesdkarkruntime import Ark

base_dir = "./OneFlower"  # 文档的存放目录
api_key = "" #YOURAPIKEY
base_url="https://ark.cn-beijing.volces.com/api/v3"
model = 'ep-20241111110355-2tp82' #YOURMODELID
os.environ["LLM_MODELEND"] = "ep-20241104131149-csxf9"  # 你的Doubao-pro-32k模型
query = "公司的名字全称是什么" #你要提问的问题

# 加载Documents

documents = []
loader = DirectoryLoader(
    base_dir,
    show_progress=True
    ) #默认读取全部非隐藏文件
documents = loader.load()

# print(documents)

from langchain_core.vectorstores import InMemoryVectorStore
from langchain_text_splitters import RecursiveCharacterTextSplitter

text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=200)

class DoubaoEmbeddings(BaseModel, Embeddings):
    client: Ark = None
    api_key: str = api_key
    model: str

    def __init__(self, **data: Any):
        super().__init__(**data)
        if self.api_key == "":
            self.api_key = os.environ["OPENAI_API_KEY"]
        self.client = Ark(
            base_url=base_url,
            api_key=self.api_key
        )

    def embed_query(self, text: str) -> List[float]:
        """
        生成输入文本的 embedding.
        Args:
            texts (str): 要生成 embedding 的文本.
        Return:
            embeddings (List[float]): 输入文本的 embedding，一个浮点数值列表.
        """
        embeddings = self.client.embeddings.create(model=self.model, input=text)
        return embeddings.data[0].embedding

    def embed_documents(self, texts: List[str]) -> List[List[float]]:
        return [self.embed_query(text) for text in texts]

    class Config:
        arbitrary_types_allowed = True



splits = text_splitter.split_documents(documents)
vectorstore = InMemoryVectorStore.from_documents(
    documents=splits, embedding=DoubaoEmbeddings(
        model=model
    )
)

retriever = vectorstore.as_retriever()

from langchain.chains import create_retrieval_chain
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_core.prompts import ChatPromptTemplate

system_prompt = (
    "You are an assistant for question-answering tasks. "
    "Use the following pieces of retrieved context to answer "
    "the question. If you don't know the answer, say that you "
    "don't know. Use three sentences maximum and keep the "
    "answer concise."
    "\n\n"
    "{context}"
)

prompt = ChatPromptTemplate.from_messages(
    [
        ("system", system_prompt),
        ("human", "{input}"),
    ]
)

from langchain_openai import ChatOpenAI  # ChatOpenAI模型
# 实例化一个大模型工具 - Doubao-pro-32k
llm = ChatOpenAI(
    api_key=api_key,
    base_url=base_url,
    model=os.environ["LLM_MODELEND"], 
    temperature=0)

question_answer_chain = create_stuff_documents_chain(llm, prompt)
rag_chain = create_retrieval_chain(retriever, question_answer_chain)

results = rag_chain.invoke({"input": "易速鲜花员工手册中董事长致辞中的企业精神有哪些"})

# results
# print(results)
# print(results["context"][0].page_content)
# print(results["context"][0].metadata)
print(results["answer"])
