# 获取新的令牌：https://help.aliyun.com/document_detail/611472.html?spm=a2c4g.2399481.0.0
from getpass import getpass
import os

from langchain.chains.retrieval import create_retrieval_chain
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_community.llms import Tongyi
from langchain.chains.combine_documents import create_stuff_documents_chain
from langchain_community.document_loaders import PyPDFLoader
from langchain_text_splitters import RecursiveCharacterTextSplitter
from langchain_community.embeddings import  ModelScopeEmbeddings
from langchain_community.vectorstores import FAISS


# API KEY: sk-7a31e7bee6e8465ca00e6bdb375dc4cc
# 归属账号 1531421489726557
DASHSCOPE_API_KEY = r"sk-7a31e7bee6e8465ca00e6bdb375dc4cc"
os.environ["DASHSCOPE_API_KEY"] = DASHSCOPE_API_KEY
# 模型下载路径
os.environ['MODELSCOPE_CACHE'] = "D:\\code\\models\\modelscope"

# OMP: Error #15: Initializing libomp140.x86_64.dll, but found libomp140.x86_64.dll already initialized.
# OMP: Hint This means that multiple copies of the OpenMP runtime have been linked into the program. That is dangerous, since it can degrade performance or cause incorrect results. The best thing to do is to ensure that only a single OpenMP runtime is linked into the process, e.g. by avoiding static linking of the OpenMP runtime in any library. As an unsafe, unsupported, undocumented workaround you can set the environment variable KMP_DUPLICATE_LIB_OK=TRUE to allow the program to continue to execute, but that may cause crashes or silently produce incorrect results. For more information, please see http://openmp.llvm.org/
os.environ['KMP_DUPLICATE_LIB_OK']='TRUE'

# from modelscope import snapshot_download
# model_dir = snapshot_download('iic/nlp_corom_sentence-embedding_chinese-base')
# model_dir = snapshot_download('damo/nlp_corom_sentence-embedding_english-base')
#
# print("下载完成")


template = """
问题：{question}
回答：我们一步一步的思考。
"""
# 提示词 Prompt模板
prompt = ChatPromptTemplate.from_template("""
依据提供的 context内容回答问题:
<context>
{context}
</context>
Question: {input}
""")

output_parser = StrOutputParser()

# 模型 qwen-max
"""
集成文档
https://python.langchain.com/v0.2/docs/integrations/providers/alibaba_cloud/
"""
llm = Tongyi(model='qwen-max')
document_chain = create_stuff_documents_chain(llm, prompt)

# loader = PyPDFLoader("笔记.pdf")
loader = PyPDFLoader("RLJIAOZhiweiV2.pdf")
print("正在加载文档")
docs = loader.load()
print("文档加载完成")
# 分词
text_splitter = RecursiveCharacterTextSplitter()
documents = text_splitter.split_documents(docs)

# 词向量模型
# embeddings=ModelScopeEmbeddings(model_id='iic/nlp_corom_sentence-embedding_chinese-base')
embeddings=ModelScopeEmbeddings(model_id="iic/nlp_corom_sentence-embedding_chinese-base")

# 向量数据库
vector = FAISS.from_documents(documents, embeddings)

retriever = vector.as_retriever()
retrieval_chain = create_retrieval_chain(retriever, document_chain)


question = "根据文档内容、学生信息【姓名：赵永飞，学校：北京邮电大学，年龄35，想申请牛津大学硕士】生成一封推荐信，生成的推荐信内容相似度和文档中的推荐信内容相似度不能超过50%。"
print("正在思考问题：", question)
result = retrieval_chain.invoke({
    "input": question
})
# print("结果", result)
print(result["answer"])

