"""
pip install tiktoken
pip install InstructorEmbedding
pip install sentence_transformers

https://zhuanlan.zhihu.com/p/622017658

"""

from operator import itemgetter
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings, HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings
from langchain.prompts import ChatPromptTemplate
from langchain.schema.output_parser import StrOutputParser
from langchain.schema.runnable import RunnableLambda, RunnablePassthrough
from langchain.vectorstores import FAISS
from PyCmpltrtok.common import sep

sep('vector store')
vectorstore = FAISS.from_texts([
    "harrison worked at kensho",
    "harrison's wife is kate",
    "harrison is such a good man",
    "harrison works very hard",
    "lilei is a good student",
    "hanmeimei is also a good student",
    "we all work hard to get a job"
], embedding=HuggingFaceEmbeddings(  # OpenAIEmbeddings, HuggingFaceInstructEmbeddings, HuggingFaceEmbeddings
    # model_name='moka-ai/m3e-base'
    model_name=r'D:\_const\wsl\my_github\m3e-base'
))
sep('retriever')
retriever = vectorstore.as_retriever()

sep('template')
template = """Answer the question based only on the following context:
{context}

Question: {question}
"""
prompt = ChatPromptTemplate.from_template(template)

sep('model')
model = ChatOpenAI(
    streaming=True,
    verbose=True,
    callbacks=[],
    openai_api_key="token1",
    openai_api_base=f"http://127.0.0.1:6001/v1",
    model_name="chatglm2-6b-int4",
    temperature=0.0,
    openai_proxy=None,
    top_p=1.0,
    max_tokens=2048,
)

sep('chain')
chain = (
    {"context": retriever, "question": RunnablePassthrough()}
    | prompt
    | model
    | StrOutputParser()
)

sep('invoke')
xres = chain.invoke("where did harrison work?")
print(xres)
