# %% [markdown]
# # 本地RAG演示Demo
# 

# %%
pip install llama-index


# %%

# 引入国内的大语言模型
import os
from llama_index.llms.dashscope import DashScope, DashScopeGenerationModels

api_key=os.getenv("DASHSCOPE_API_KEY")

dashscope_llm = DashScope(model_name="qwen-max",api_key=api_key)

resp = dashscope_llm.complete("你好啊？")
print(resp)

# %% [markdown]
# # https://docs.llamaindex.ai/en/stable/examples/embeddings/dashscope_embeddings/
# # 安装embedding的包，这里用在线的

# %%
pip install llama-index-embeddings-dashscope


# %%
# imports -test
# 在线Embedding模型的测试
from llama_index.embeddings.dashscope import (
    DashScopeEmbedding,
    DashScopeTextEmbeddingModels,
    DashScopeTextEmbeddingType,
)

# Create embeddings
# text_type=`document` to build index
embedder = DashScopeEmbedding(
    model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V2,
    text_type=DashScopeTextEmbeddingType.TEXT_TYPE_DOCUMENT,
)
text_to_embedding = ["风急天高猿啸哀", "渚清沙白鸟飞回", "无边落木萧萧下", "不尽长江滚滚来"]
# Call text Embedding
result_embeddings = embedder.get_text_embedding_batch(text_to_embedding)
# requests and embedding result index is correspond to.
for index, embedding in enumerate(result_embeddings):
    if embedding is None:  # if the correspondence request is embedding failed.
        print("The %s embedding failed." % text_to_embedding[index])
    else:
        print("Dimension of embeddings: %s" % len(embedding))
        print(
            "Input: %s, embedding is: %s"
            % (text_to_embedding[index], embedding[:5])
        )

# %%
# import
from llama_index.core import Settings

# llm
Settings.llm = dashscope_llm
# chunk
Settings.chunk_size = 1024
# embedding
Settings.embed_model=embedder

# %%
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader

# 知识文件夹
reader = SimpleDirectoryReader("./data/paul_graham/")
documents = reader.load_data()

# %%
documents

# %%
# 文档切割
from llama_index.core.node_parser import SentenceSplitter

nodes = SentenceSplitter().get_nodes_from_documents(documents)

print(nodes)

# %%
# 索引
from llama_index.core.storage.docstore import SimpleDocumentStore

docstore = SimpleDocumentStore()
docstore.add_documents(nodes)

# %%
from llama_index.core import StorageContext,SummaryIndex,SimpleKeywordTableIndex

storage_context = StorageContext.from_defaults(docstore=docstore)
summary_index = SummaryIndex(nodes, storage_context=storage_context)
vector_index = VectorStoreIndex(nodes, storage_context=storage_context)
keyword_table_index = SimpleKeywordTableIndex(
    nodes, storage_context=storage_context
)

# %%
len(storage_context.docstore.docs)

# %%
query_engine = summary_index.as_query_engine()
response = query_engine.query("鲍恩撑出生地是哪里？")

print(response)


