from langchain_huggingface import HuggingFaceEmbeddings
from llama_index.core import Settings

# 先设置一个本地嵌入模型，避免报错
Settings.embed_model = HuggingFaceEmbeddings(
    model_name="D:/models/sentence-transformersall-MiniLM-L6-v2",
    model_kwargs={'device': 'cuda'}
)

# 设置必要的配置，避免默认初始化
Settings.llm = None

print("=== LlamaIndex 默认配置 ===")
print("\n--- 嵌入模型配置 ---")
# print(f"嵌入模型: {Settings.embed_model}")
print(f"嵌入模型类型: {type(Settings.embed_model)}")

print("\n--- 大语言模型配置 ---")
print(f"LLM: {Settings.llm}")
if Settings.llm:
    print(f"LLM 类型: {type(Settings.llm)}")

print("\n--- 文本分割配置 ---")
print(f"块大小 (chunk_size): {Settings.chunk_size}")
print(f"块重叠 (chunk_overlap): {Settings.chunk_overlap}")

print("\n--- 模型参数配置 ---")
print(f"上下文窗口大小: {Settings.context_window}")
print(f"输出token数: {Settings.num_output}")

print("\n--- 其他配置 ---")
print(f"是否启用回调: {Settings.callback_manager}")
