import os
import numpy as np
from openai import OpenAI

# 初始化客户端
client = OpenAI(
    api_key="sk-k2lmJyO4df4AeESNtfH4lpZrmANxvlqDsjQs8EbSKooskmNg",
    base_url="https://api.ssopen.top/v1"
)

# 读取文本文件
txt_file = input("请输入txt文件路径: ")
with open(txt_file, 'r', encoding='utf-8') as f:
    text = f.read()

# 简单分块
chunk_size = 500
chunks = [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)]

# 生成嵌入向量
print("正在处理文本...")
embeddings = []
for chunk in chunks:
    response = client.embeddings.create(
        model="text-embedding-3-small",
        input=chunk
    )
    embeddings.append(response.data[0].embedding)

embeddings = np.array(embeddings)

# 对话循环
print("\n准备就绪！您可以开始提问了（输入 'exit' 退出）\n")
while True:
    query = input("你: ")
    if query.lower() == 'exit':
        break
    
    # 查询向量化
    query_response = client.embeddings.create(
        model="text-embedding-3-small",
        input=query
    )
    query_embedding = np.array(query_response.data[0].embedding)
    
    # 计算相似度并检索最相关的块
    similarities = np.dot(embeddings, query_embedding)
    max_similarity = similarities.max()
    
    
    top_indices = np.argsort(similarities)[-3:][::-1]
    context = "\n".join([chunks[i] for i in top_indices])
    
    # 流式生成回答
    messages = [
        {"role": "system", "content": "你是一个helpful的助手，严格根据提供的文本内容回答用户问题。"},
        {"role": "user", "content": f"基于以下内容回答问题：\n\n{context}\n\n问题：{query}"}
    ]
    
    print("AI: ", end="", flush=True)
    stream = client.chat.completions.create(
        model="gpt-4o-mini",
        messages=messages,
        stream=True
    )
    
    for chunk in stream:
        if chunk.choices[0].delta.content:
            print(chunk.choices[0].delta.content, end="", flush=True)
    print("\n")

