import os
from langchain.document_loaders import WebBaseLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.text_splitter import TokenTextSplitter
from langchain.embeddings import QianfanEmbeddingsEndpoint
from langchain.vectorstores import Chroma
from langchain.chains import RetrievalQA
from langchain.chat_models import QianfanChatEndpoint
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
from langchain.memory import ConversationSummaryMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.document_loaders import PyPDFLoader
from langchain.document_loaders import DirectoryLoader
from langchain.document_loaders import UnstructuredFileLoader
from chinese_recursive_text_splitter import ChineseRecursiveTextSplitter
import jieba as jb
from langchain.embeddings import HuggingFaceEmbeddings

model_name = "GanymedeNil/text2vec-large-chinese"
model_kwargs = {'device': 'cpu'}
encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity
embeddings = HuggingFaceEmbeddings(
    model_name=model_name,
    model_kwargs=model_kwargs,
    encode_kwargs=encode_kwargs
)
print('embedding model loaded')

os.environ['QIANFAN_AK'] = "a6OGDgMKaWCHpiUP3afGfR9U"
os.environ['QIANFAN_SK'] = "ijE4BGBxsNbv689h3j04ko3zzPL6SVM0"

# 此处为 Langsmith 相关功能开关。当且仅当你知道这是做什么用时，可删除注释并设置变量以使用 Langsmith 相关功能
# os.environ['LANGCHAIN_TRACING_V2'] = "true"
# os.environ['LANGCHAIN_ENDPOINT'] = "https://api.smith.langchain.com"
# os.environ['LANGCHAIN_API_KEY'] = "your_langchian_api_key"
# os.environ['LANGCHAIN_PROJECT'] = "your_project_name"

is_chinese = True

CUSTOM_PROMPT_TEMPLATE = """
    使用下面的语料来回答本模板最末尾的问题。如果你不知道问题的答案，直接回答 "我不知道"，禁止随意编造答案。
    为了保证答案尽可能简洁，你的回答必须不超过三句话。
    请注意！在每次回答结束之后，你都必须接上 "感谢你的提问" 作为结束语
    以下是一对问题和答案的样例：
        请问：秦始皇的原名是什么
        秦始皇原名嬴政。感谢你的提问。
    
    以下是语料：
    
    {context}
    
    请问：{question}
"""
QUESTION1 = "采购订单共有几个状态"
QUESTION2 = "如何取消采购订单变更"
QUESTION3 = "采购订单⾏有几种创建⽅式"

# loader = WebBaseLoader(WEB_URL)
loader = PyPDFLoader("采购协同 产品操作说明书.pdf")
# loader = DirectoryLoader('/Users/shaoqintian/langchain/qianfan-langchain-QA-demo/data/cut', glob='**/*.txt')
data = loader.load()
# print(data)

# cut_data = " ".join([w for w in list(jb.cut(data))])
text_splitter = ChineseRecursiveTextSplitter(chunk_size = 384, chunk_overlap = 0, separators=["\n\n", "\n", " ", "", "。", "，"])
# text_splitter = TokenTextSplitter(chunk_size = 1000, chunk_overlap = 0)
all_splits = text_splitter.split_documents(data)

vectorstore = Chroma.from_documents(documents=all_splits, embedding=embeddings)

# 基于prompt问题查找相似文档
# print("prompt问题："+QUESTION1)
# docs = vectorstore.similarity_search_with_relevance_scores(QUESTION1)

# [(document.page_content, score) for document, score in docs]

QA_CHAIN_PROMPT = PromptTemplate.from_template(CUSTOM_PROMPT_TEMPLATE)

llm = QianfanChatEndpoint(streaming=True)
retriever=vectorstore.as_retriever(search_type="similarity_score_threshold", search_kwargs={'score_threshold': 0.0})
                                   
# qa_chain = RetrievalQA.from_chain_type(llm, retriever=retriever, chain_type_kwargs={"prompt": QA_CHAIN_PROMPT})
# result = qa_chain({"query": QUESTION1})
# print(result)

# qa_chain = RetrievalQA.from_chain_type(llm, retriever=retriever, chain_type_kwargs={"prompt": QA_CHAIN_PROMPT}, return_source_documents=True)
# result = qa_chain({"query": QUESTION1})
# len(result['source_documents'])
# print(result['source_documents'])
# print(result)

memory = ConversationSummaryMemory(llm=llm,memory_key="chat_history",return_messages=True)
qa = ConversationalRetrievalChain.from_llm(llm, retriever=retriever, memory=memory, combine_docs_chain_kwargs={"prompt": QA_CHAIN_PROMPT})
print("问题："+QUESTION1)
result = qa(QUESTION1)
print(result['answer'])

print("问题："+QUESTION2)
result = qa(QUESTION2)
print(result['answer'])

print("问题："+QUESTION3)
result = qa(QUESTION3)
print(result['answer'])