import os
import chromadb
from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings, StorageContext
from llama_index.core.chat_engine.types import ChatMode
from llama_index.core.node_parser import TokenTextSplitter
from llama_index.embeddings.dashscope import DashScopeEmbedding, DashScopeTextEmbeddingModels
from llama_index.llms.dashscope import DashScope, DashScopeGenerationModels
from llama_index.vector_stores.chroma import ChromaVectorStore
from chromadb.config import Settings as ChromaSettings

# 配置LLM
Settings.llm = DashScope(
    model_name=DashScopeGenerationModels.QWEN_MAX,
    api_key=os.getenv("DASHSCOPE_API_KEY")
)

# 配置嵌入模型
Settings.embed_model = DashScopeEmbedding(
    model_name=DashScopeTextEmbeddingModels.TEXT_EMBEDDING_V3,
    batch_size=10,  # 明确设置批量大小
    api_key=os.getenv("DASHSCOPE_API_KEY")  # 显式传入API_KEY确保生效
)

# 初始化Chroma客户端
client = chromadb.Client(
    ChromaSettings(
        persist_directory="./chroma_data",
        is_persistent=True
    )
)

# 获取或创建集合
collection = client.get_or_create_collection(
    name="deepseek_docs",
    metadata={"description": "DeepSeek V3 相关文档集合"}
)

# 配置向量存储和存储上下文
vector_store = ChromaVectorStore(chroma_collection=collection)
storage_context = StorageContext.from_defaults(vector_store=vector_store)

# 加载文档
documents = SimpleDirectoryReader(
    "./data",
    required_exts=[".pdf"],
).load_data()

# 切分文档
node_parser = TokenTextSplitter(chunk_size=500, chunk_overlap=100)
nodes = node_parser.get_nodes_from_documents(documents)

# 关键修复：手动控制嵌入批次大小，确保每批不超过10个
embed_model = Settings.embed_model
batch_size = 10  # 明确批次大小
for i in range(0, len(nodes), batch_size):
    # 分批次处理节点
    batch_nodes = nodes[i:i + batch_size]
    # 提取批次文本
    texts = [node.get_content() for node in batch_nodes]
    # 获取嵌入
    embeddings = embed_model.get_text_embedding_batch(texts)
    # 为节点分配嵌入
    for node, embedding in zip(batch_nodes, embeddings):
        node.embedding = embedding
    # 将带有嵌入的节点添加到向量存储
    vector_store.add(batch_nodes)

# 从已添加节点的向量存储创建索引
index = VectorStoreIndex.from_vector_store(vector_store, storage_context=storage_context)

# 测试问答
chat_engine = index.as_chat_engine(
    chat_mode=ChatMode.CONDENSE_QUESTION,
    similarity_top_k=5  # 默认可能为2，适当增大（如5-10）
)
streaming_response = chat_engine.stream_chat("deepseek v3数学能力怎么样?")
print(streaming_response)
# streaming_response.print_response_stream()
for token in streaming_response.response_gen:
    print(token, end="", flush=True)

response = chat_engine.chat("代码能力呢?")
print(response)
