import numpy as np
from openai import OpenAI
client = OpenAI(
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
    # api_key=os.getenv("OPENAI_API_KEY"),# Retrieve the API key from environment variables
    api_key= "sk-f5ef1dcbcdd64719a13de6a781d226f2"
)
client2 = OpenAI(
    base_url="https://api.deepseek.com/v1",
    # api_key=os.getenv("OPENAI_API_KEY"),# Retrieve the API key from environment variables
    api_key= ""
)
# 下面是一个不使用 LangChain 的简单 RAG 系统完整示例（用 Python 实现）
# 适用于本地文件问答，也可以扩展到供应链场景，比如：合同问答、物流记录提取、招标文档理解等。
# ________________________________________
# 🧠 RAG 系统核心代码（不使用 LangChain）
# 💡 环境准备：
documents = [
    "唱，跳",
    "篮球",
    "我们计划在2024年第三季度完成原材料集中采购。",
    "仓储地点为江苏昆山，主要负责南方区域配送。"
]
# 假设我们有一份简单文本
# documents = [
#     "本公司于2023年4月完成了来自A供应商的物流发货记录。",
#     "供应商B在2023年5月投标了项目X并最终中标。",
#     "我们计划在2024年第三季度完成原材料集中采购。",
#     "仓储地点为江苏昆山，主要负责南方区域配送。"
# ]
# ________________________________________
# ✅ 第二步：文本向量化（用 SentenceTransformer）
# deepseek-chat
# text-embedding-v3

def create_embeddings(text, model="text-embedding-v3"):
    response = client.embeddings.create(
        model=model,
        input=text,
        encoding_format="float"
    )

    return response

def create_document_embeddings(text, model="text-embedding-v3"):
    embeddings = []
    for txt in text:
        response = client.embeddings.create(
            model=model,
            input=txt,
            encoding_format="float"
        )
        embeddings.append(response.data[0].embedding)
    return embeddings



# print("query:",create_embeddings(query).data[0].embedding)

#余弦相似度
def cosine_similarity(vec1, vec2):

    return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))
# vec1 = [1, 2, 3]
# vec2 = [4, 5, 6]
# print("123",cosine_similarity(vec1, vec2))
# ________________________________________
# ✅ 第三步：建立向量索引（用 FAISS）
# ✅ 第四步：用户提问 → 检索相关内容
def semantic_search(query, text_chunks, embeddings, k=2):
    query_embedding = create_embeddings(query).data[0].embedding
    similarity_scores = []

    for i, chunk_embedding in enumerate(embeddings):
        similarity_score = cosine_similarity(np.array(query_embedding), np.array(chunk_embedding))
        similarity_scores.append((i, similarity_score))  # 附加索引和相似性分数

    # 按降序对相似性分数进行排序
    similarity_scores.sort(key=lambda x: x[1], reverse=True)
    # 获取前 k 个最相似文本块的索引
    top_indices = [index for index, _ in similarity_scores[:k]]
    # 返回前 k 个最相关的文本块
    return [text_chunks[index] for index in top_indices]
# ________________________________________
# ✅ 第五步：结合文段 + 发给 OpenAI GPT 获取最终回答

# prompt = f"""根据以下背景信息回答问题：
#
# 背景信息：{retrieved_doc}
#
# 问题：{query}
#
# 请用中文简洁回答："""
#
embeddings = create_document_embeddings(documents)
print("embeddings:",embeddings)
query = f"记者\nQuestion: 仓储地点?\n"
top_chunks = semantic_search(query,documents,embeddings)
print("top_2_document:",top_chunks)
# user_prompt = "\n".join([f"Context {i + 1}:\n{chunk}\n" for i, chunk in enumerate(top_chunks)])
#
# system_prompt = "You are an AI assistant that strictly answers based on the given context. If the answer cannot be derived directly from the provided context, respond with: 'I do not have enough information to answer that.'"
# user_prompt =
# You know a lot of news in the entertainment industry and know a lot of celebrities
system_prompt = """
你是综合专家。集成来自所提供上下文的信息以创建新颖的视角。
你的输出必须为：
- 简洁（200 字以下）
- 结构带有要点
- 包括事实总结和逻辑推断
"""
a = { "\n".join([f"Context {i+1}:\n{chunk}" for i, chunk in enumerate(top_chunks)]) }
print("a:",a)
user_prompt = f"""
Generate an innovative analysis by combining these contexts:
{a}

Structure your response as:
🌐 **Overview**: 核心事实整合
🔗 **Cross-Context Insights**: 跨上下文的关联性发现
🚀 **Future Directions**: 基于现有信息的合理推测
"""

# response = model.generate(
#     system_prompt=system_prompt,
#     user_prompt=user_prompt,
#     temperature=0.7  # 增加创造性
# )
# deepseek-chat
# qwq-32b-preview
def generate_response(system_prompt, user_message, model="qwq-32b-preview"):
    response = client.chat.completions.create(
        model=model,
        temperature=0.7,
        messages=[
            {"role": "system", "content": system_prompt},
            {"role": "user", "content": user_message}
        ]
    )
    return response
evaluation_response = generate_response(system_prompt, user_prompt)

# 打印评估响应
print(evaluation_response.choices[0].message.content)