from openai import OpenAI
from document_processor import DocumentProcessor
import os

# 配置硅基流动平台信息
SILICONFLOW_API_URL = "https://api.siliconflow.cn/v1/"
SILICONFLOW_API_KEY = "sk-znehdofdtxrlrtmlejvqrrcrxlurvsahhswosufaekgwpuri"
os.environ["HF_ENDPOINT"] = "https://hf-mirror.com"
model = "Qwen/Qwen3-235B-A22B"

# 知识文件路径
knowledge_txt="your_large_document.txt"
# 初始化文档处理器
processor = DocumentProcessor(knowledge_txt)
db = processor.load_and_process()

client = OpenAI(api_key=SILICONFLOW_API_KEY, 
                base_url=SILICONFLOW_API_URL)

def rag_chat(query):
    # 从向量库检索相关文档
    docs = db.similarity_search(query)
    context = "\n".join([doc.page_content for doc in docs])
    
    # 构建增强提示
    messages = [
        {'role': 'system', 'content': '你是一个基于知识库的AI助手，请根据以下上下文回答问题。'},
        {'role': 'user', 'content': f"上下文:\n{context}\n\n问题:{query}"}
    ]
    print(f"增强提示:\n{messages}")
    response = client.chat.completions.create(
        model=model,
        messages=messages,
        stream=True
    )
    
    for chunk in response:
        if not chunk.choices:
            continue
        if chunk.choices[0].delta.content:
            print(chunk.choices[0].delta.content, end="", flush=True)
        if chunk.choices[0].delta.reasoning_content:
            print(chunk.choices[0].delta.reasoning_content, end="", flush=True)

if __name__ == "__main__":
    while True:
        query = input("\n请输入您的问题(输入'exit'退出): ")
        if query.lower() == 'exit':
            break
        print("\n回答:")
        rag_chat(query)