# 终端执行： pip install llama-index-embeddings-ollama
from typing import Dict

from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings, load_index_from_storage, StorageContext
from llama_index.embeddings.ollama import OllamaEmbedding
from llama_index.llms.openai import OpenAI
import os

from llama_index.llms.openai.utils import ALL_AVAILABLE_MODELS, CHAT_MODELS

from env import Env

# 定义 DeepSeek 模型及其上下文长度
DEEPSEEK_MODELS: Dict[str, int] = {
    "deepseek-ai/DeepSeek-R1-Distill-Llama-8B": 128000,
}

ALL_AVAILABLE_MODELS.update(DEEPSEEK_MODELS)
CHAT_MODELS.update(DEEPSEEK_MODELS)
API_KEY = Env.API_KEY  # 替换为实际的API密钥


# 设置deepseek大模型
def deepseek_llm(**kwargs) -> OpenAI:
    llm = OpenAI(api_key=API_KEY, model='deepseek-ai/DeepSeek-R1-Distill-Llama-8B',
                 api_base='https://api.siliconflow.cn/v1', temperature=0.6,
                 **kwargs)
    return llm


def embed_model(**kwargs) -> OllamaEmbedding:
    ollama_embedding = OllamaEmbedding(
        model_name="nomic-embed-text:latest",
        base_url='http://localhost:11434/',
    )
    return ollama_embedding


PERSIST_DIR = "./storage"
# 初始化大模型和嵌入向量的配置
Settings.llm = deepseek_llm()
Settings.embed_model = embed_model()


def get_index():
    # 从文件中创建索引,recursive=True表示的是递归解析文件夹下的子文件夹
    documents = SimpleDirectoryReader('./docs', recursive=True).load_data()
    print("正在创建索引...")
    # show_progress=True表示的是在终端展示embedding执行的进度
    index = VectorStoreIndex.from_documents(documents, show_progress=True)
    index.storage_context.persist(persist_dir=PERSIST_DIR)
    return index


if __name__ == '__main__':
    index = get_index()
    # streaming = True设置为流式输出，False为普通输出
    r = index.as_retriever()
    data = r.retrieve('第15题sql的答案是什么？')
    print(data)
    query_engine = index.as_query_engine(streaming=True)
    # 根据用户问题检索后生成答案
    streaming_response = query_engine.query('第15题sql的答案是什么？')
    print("正在生成回复...")
    print("回答是：")
    streaming_response.print_response_stream()
