import os
import logging
import sys
os.environ["OPENAI_API_KEY"] = "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"
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))

# ==========Load Document===================
from llama_index.core import SimpleDirectoryReader
# 从文件夹读取
documents = SimpleDirectoryReader(input_dir='/Users/weigang/Desktop/llm-data').load_data()
# 从指定文件读取，输入为List
documents = SimpleDirectoryReader(input_files=['/Users/weigang/Desktop/llm-data/tj.xls']).load_data()

from llama_index.core import Document
# 直接从文本转换
text_list = ["/Users/weigang/Desktop/llm-data/tj.xls", "/Users/weigang/Desktop/llm-data/pxk.docx", ...]
documents = [Document(t) for t in text_list]

# ==========Create Node===================

# 解析文档为Node
# Node以数据 Chunks 的形式呈现文档，同时 Node 保留与其他 Node 和 索引结构 的关系
from llama_index.core.node_parser import SimpleNodeParser
parser = SimpleNodeParser()
nodes = parser.get_nodes_from_documents(documents)

# ==========Create Index===================
# 列表索引（List Index）
# 向量存储索引（Vector Store Index）-- 最常用
# 树索引（Tree Index）
# 关键词表索引（Keyword Table Index）
# 可组合性图索引（Composability Graph Index）
# Pandas Index and SQL Index
# 文档摘要索引（Document Summary Index）
# 知识图谱索引（Knowledge Graph Index）
# https://zhuanlan.zhihu.com/p/653116624

# 直接将文档构建为 Index
# 这种方式可以跳过 Node 构建
from llama_index.core import GPTVectorStoreIndex
index = GPTVectorStoreIndex.from_documents(documents)

# 从 Node 构建 Index
from llama_index.core import GPTVectorStoreIndex
index = GPTVectorStoreIndex(nodes)

# 索引中插入文档
from llama_index.core import GPTVectorStoreIndex
index = GPTVectorStoreIndex([])
for doc in documents:
    index.insert(doc)

# 通过LLM构建索引
from llama_index.core import LLMPredictor, GPTVectorStoreIndex, PromptHelper, ServiceContext
from langchain import OpenAI
# define LLM
llm_predictor = LLMPredictor(llm=OpenAI(temperature=0, model_name="text-davinci-003"))
# define prompt helper
# set maximum input size
max_input_size = 4096
# set number of output tokens
num_output = 256
# set maximum chunk overlap
max_chunk_overlap = 20
prompt_helper = PromptHelper(max_input_size, num_output, max_chunk_overlap)
service_context = ServiceContext.from_defaults(llm_predictor=llm_predictor, prompt_helper=prompt_helper)
index = GPTVectorStoreIndex.from_documents(
    documents, service_context=service_context
)


# ==========Save Index===================
import os.path as osp
from llama_index.core import GPTVectorStoreIndex,StorageContext,load_index_from_storage
index_file = "/Users/weigang/Desktop/llm-data"
if not osp.isfile(index_file):
    # 判断是否存在，不存在则创建
    # save the index
    index.storage_context.persist(persist_dir=index_file)
else:
    # 存在则 load
    index = load_index_from_storage(StorageContext.from_defaults(persist_dir=index_file))


# ==========Query Index===================
# 查询index
query_engine = index.as_query_engine(similarity_top_k=2)
response = query_engine.query("What did the author do growing up?")
print(response)

# ==========Set Model===================
# 如果是default，则是 创建并提纯（create and refine）的顺序迭代通过 Node；
# 如果是embedding，则根据 top-k 相似的 nodes 进行回复合成。
from llama_index.core import GPTListIndex
index = GPTListIndex.from_documents(documents)
# mode="default"
response = index.as_query_engine().query("What did the author do growing up?", mode="default")
# mode="embedding"
response = index.query("What did the author do growing up?", mode="embedding")