from llama_index.core import Settings
from llama_index.core import (
    SimpleDirectoryReader,
    VectorStoreIndex,
    StorageContext,
    load_index_from_storage,
)
from llama_index.core.node_parser import SentenceSplitter
from llama_index.embeddings.ollama import OllamaEmbedding
store_path = "./storage/deepseek_v3_2"
use_ollama = True

if use_ollama:
    from llama_index.llms.ollama import Ollama

    llm = Ollama(model="qwen2.5:7b-instruct-q4_0", request_timeout=120.0)
else:
    from llama_index.llms.openllm import OpenLLM
    import os

    llm = OpenLLM(
        model="deepseek-ai/DeepSeek-R1-Distill-Qwen-7B",
        # model="Qwen/Qwen2-7B-Instruct",
        api_base="https://api.siliconflow.cn/v1",
        api_key=os.environ.get("SILICON_API_KEY"),
        max_tokens=2048,
        temperature=0.1,
    )
    # from llama_index.llms.deepseek import DeepSeek
    # llm = DeepSeek(model="deepseek-chat")

    # llm = OpenLLM(
    #     model="Qwen/Qwen2.5-7B-Instruct", api_base="http://10.0.100.210/inf-app/a14565593800110080709472/v1",
    #     api_key="123456",
    #     is_chat_model=True,
    #     max_tokens=2048,
    #     temperature=0.1,
    # )

Settings.llm = llm
Settings.embed_model = OllamaEmbedding(
    model_name="bge-m3",
    base_url="http://127.0.0.1:11434",
)

try:
    storage_context = StorageContext.from_defaults(
        persist_dir=store_path
    )
    vector_index = load_index_from_storage(storage_context)
    index_loaded = True
except:
    index_loaded = False

if not index_loaded:
    # load documents
    documents = SimpleDirectoryReader(input_files=["DeepSeek_V3.pdf"]).load_data()
    splitter = SentenceSplitter(chunk_size=1024)
    nodes = splitter.get_nodes_from_documents(documents)
    vector_index = VectorStoreIndex(nodes)
    # persist index
    vector_index.storage_context.persist(persist_dir=store_path)

query_engine = vector_index.as_query_engine(similarity_top_k=2)

response = query_engine.query(
    "DeepSeek v3是采用什么精度进行训练的?使用的是什么GPU",
)
for node in response.source_nodes:
    print("-" * 20)
    print(node.text)
    print(node.metadata)
print(str(response))

response = query_engine.query(
    "DeepSeek v3有哪些改进，为什么训练成本相对比较低",
)
for node in response.source_nodes:
    print("-" * 20)
    print(node.text)
    print(node.metadata)
print(str(response))
