from langchain_community.vectorstores import Qdrant
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables import RunnableParallel, RunnablePassthrough
from langchain_community.chat_models import ErnieBotChat
from langchain_community.embeddings import ErnieEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import TextLoader, PyPDFLoader
from langchain_community.llms import Tongyi
import json
import warnings
warnings.filterwarnings('ignore')

# 读取API Key
with open(r'./ERNIE_config.json') as f:
    key = f.read()
    api_key = json.loads(key)

ernie_client_id = api_key['API Key']             # 你的文心大模型API KEY
ernie_client_secret = api_key['Secret Key']      # 你的文心大模型Secret Key
DASHSCOPE_API_KEY = api_key['DASHSCOPE_API_KEY'] # 阿里通义千问模型DASHSCOPE_API_KEY


loader = PyPDFLoader('01_“未来校园”智能应用专项赛.pdf')     # 读取数据
documents = loader.load()     # 将数据转为指定格式

documents

text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)   # 指定切分时每份文件的token数
chunked_documents = text_splitter.split_documents(documents)    # 将文本切分成指定长度大小


# 利用Embedding将文本向量化，并存储在向量数据库中
vectorstore = Qdrant.from_documents(
    documents=chunked_documents,  # 已分块的文档
    embedding=ErnieEmbeddings(ernie_client_id=ernie_client_id, ernie_client_secret=ernie_client_secret),  # 用文心大模型的Embedding Model做嵌入
    location=":memory:",  # 存储在内存中
    collection_name="my_documents"
)  # 指定collection_name

retriever = vectorstore.as_retriever()     # 索引器

# 配置上下文（context）及用户输入（question）
setup_and_retrieval = RunnableParallel(
    {"context": retriever, "question": RunnablePassthrough()}
)

template = """Answer the question based only on the following context:
    {context}

Question: {question}
"""

prompt = ChatPromptTemplate.from_template(template)

# 配置你要用的模型
# 文心大模型
model = ErnieBotChat(
    model_name='ERNIE-Bot-4',                 # 调用的大模型
    ernie_client_id=ernie_client_id,          # 你的文心大模型API KEY
    ernie_client_secret=ernie_client_secret   # 你的文心大模型Secret Key
)
# 通义千问大模型
model_tongyi = Tongyi(temperature=1, api_key=DASHSCOPE_API_KEY)

# 输出解析器
output_parser = StrOutputParser()

# 构建完整的链（Chain
chain = setup_and_retrieval | prompt | model | output_parser
chain_tongyi = setup_and_retrieval | prompt | model_tongyi | output_parser

# 效果测试
answer1 = chain.invoke('输入你的问题1')
print(answer1)

answer2 = chain.invoke("输入你的问题1")
print(answer2)

