from llama_index.core import Settings, VectorStoreIndex, Document
from llama_index.core import PromptTemplate
import custom_llm
import custom_embedding

LLM_MODEL = "glm-4"
EMBEDDING_MODEL = "embedding-2"
ZHIPU_API_KEY = "ab5070471f43c2267d9de50d91e3ba0b.IlnmhMI3RnJlY1Zc"

# define our LLM
Settings.llm = custom_llm.ChatGLM(model=LLM_MODEL, reuse_client=True, api_key=ZHIPU_API_KEY, )

# define embed model
Settings.embed_model = custom_embedding.ChatGLMEmbeddings(model=EMBEDDING_MODEL, reuse_client=True,
                                                          api_key=ZHIPU_API_KEY, )

# Load the your data
# documents = SimpleDirectoryReader("./data").load_data()
text_list = ['今天天气不错', '机器学习是一个新技术', '天气真不错', '小白龙的大哥是孙悟空']
documents = [Document(text=t) for t in text_list]

# Create the vector index
index = VectorStoreIndex.from_documents(documents)

# =============Query=================
query_engine = index.as_query_engine()
response = query_engine.query("小白龙的大哥是谁")
print(response)
response = query_engine.query("今天天气怎样")
print(response)
response = query_engine.query("猪八戒的大哥是谁")
print(response)

# =============Query Prompt Template=================
# Prompt Template
text_qa_template_str = (
    "上下文信息如下："
    "\n---------------------\n{context_str}\n---------------------\n"
    "基于以上的上下文信息和你自己的知识，回答以下问题："
    "{query_str}\n如果上下文信息没有帮助，你也可以基于自己的知识回答。答案要求格式，推理逻辑在前，最终答案在后且以中括号[]包裹。"
    "\n"
)
text_qa_template = PromptTemplate(text_qa_template_str)
# 将索引嵌入Prompt Template作为上下文
query_engine = index.as_query_engine(text_qa_template=text_qa_template, )
response = query_engine.query("小白龙的大哥是谁")
print(response)
response = query_engine.query("猪八戒的大哥是谁")
print(response)
