import os
import json
from openai import OpenAI
import numpy as np

from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv(), override=True)

system_prompt = """你是一个AI助手，请根据用户的问题给出回答。"""

client = OpenAI(
    # 若没有配置环境变量，请用百炼API Key将下行替换为：api_key="sk-xxx",
    api_key=os.getenv("DASHSCOPE_API_KEY"), 
    base_url="https://dashscope.aliyuncs.com/compatible-mode/v1",
)

def color_content(content: str, *args, **kwargs):
    print("\033[92m" + content + "\033[0m", *args, **kwargs)

def predict(desc: str, model: str=None, history: list=None, prompt: str=None):
    reasoning_content = ""  # 定义完整思考过程
    answer_content = ""     # 定义完整回复
    is_answering = False   # 判断是否结束思考过程并开始回复
    history = history or [
        {"role": "system", "content": prompt or system_prompt}
    ]
    # 创建聊天完成请求
    completion = client.chat.completions.create(
        model=model or "qwen-max",
        messages=[*history, {"role": "user", "content": desc}],
        stream=True,
        # 解除以下注释会在最后一个chunk返回Token使用量
        stream_options={
            "include_usage": True
        }
    )

    print("\n🤖 AI ➜ ")

    print_thinking = False    
    for chunk in completion:
        # 如果chunk.choices为空，则打印usage
        if not chunk.choices:
            print("\nUsage:")
            print(chunk.usage)
        else:
            delta = chunk.choices[0].delta
            # 打印思考过程
            if hasattr(delta, 'reasoning_content') and delta.reasoning_content != None:
                if not print_thinking:
                    print("<think>\n")
                    print_thinking = True
                color_content(delta.reasoning_content, end='', flush=True)
                reasoning_content += delta.reasoning_content
            elif hasattr(delta, 'content') and delta.content != None:
                # 开始回复
                if delta.content != "" and is_answering == False:
                    if print_thinking:
                        print("\n</think>\n")
                    is_answering = True
                # 打印回复过程
                color_content(delta.content, end='', flush=True)
                answer_content += delta.content
            else:
                print(chunk)

    return answer_content


def chat(model: str=None, history: list=None, prompt: str=None):
    avatar = {
        "system": "🤖 AI",
        "user": "👨‍💼 你",
        "assistant": "🤖 AI",
        "other": "🤩 其他"
    }

    history = history or [
        {"role": "system", "content": prompt or system_prompt}
    ]
    while True:
        desc = input(f"{avatar['user']} ➜ ")
        if desc == "exit":
            break
        elif desc == "memory":
            # 方法一：使用自定义格式化输出
            for i, msg in enumerate(history):
                role = msg["role"]
                avatar = avatars.get(role, avatars["other"])
                # 截断内容，对于长内容只显示前60个字符
                content = msg["content"][:60] + ("..." if len(msg["content"]) > 60 else "")
                print(f"{avatar} ➜ {content}")
        else:
            answer = predict(desc, model, history, prompt)
            history.extend([
                {"role": "user", "content": desc},
                {"role": "assistant", "content": answer}
            ])

def embedding(texts: list[str]):
    """获取文本的向量嵌入表示"""
    completion = client.embeddings.create(
        model="text-embedding-v3",
        input=texts,
        dimensions=1024,
        encoding_format="float"
    )
    return [data.embedding for data in completion.data]

def cosine_similarity(vec1, vec2):
    """计算两个向量的余弦相似度"""
    # 使用numpy实现余弦相似度
    vec1 = np.array(vec1)
    vec2 = np.array(vec2)
    return np.dot(vec1, vec2) / (np.linalg.norm(vec1) * np.linalg.norm(vec2))

def find_most_similar(query_text: str, candidate_texts: list[str], precomputed_embeddings=None):
    """查找与查询文本最相似的候选文本"""
    # 获取查询文本的嵌入向量
    query_embedding = embedding([query_text])[0]
    
    # 如果没有提供预计算的嵌入向量，则计算候选文本的嵌入向量
    if precomputed_embeddings is None:
        candidate_embeddings = embedding(candidate_texts)
    else:
        candidate_embeddings = precomputed_embeddings
    
    # 计算相似度并找出最相似的文本
    similarities = [cosine_similarity(query_embedding, emb) for emb in candidate_embeddings]
    max_index = np.argmax(similarities)
    
    return max_index, candidate_texts[max_index], similarities[max_index]

if __name__ == "__main__":
    print("测试embedding")
    samples = [
        "**新能源科技公司**：太阳能光伏组件研发、储能电池生产销售、新能源电站运维服务 - 分类代码: C3841",
        # "**互联网医疗平台**：健康管理APP开发、在线问诊服务、医药电商平台运营 - 分类代码: I6560",
        # "**现代农业综合体**：有机蔬菜种植、农产品深加工、观光农业体验、冷链物流配送 - 分类代码: A0141",
        # "**跨境直播电商**：跨境商品直播销售、网红孵化、海外仓储服务 - 分类代码: L7249",
        # "**智能硬件制造商**：AIoT设备研发、智能家居系统集成、物联网技术服务 - 分类代码: C3911"
    ]

    # 先计算embeddings
    samples_embedded = embedding(samples)
    
    # 调用函数时，第二个参数应该是文本列表，第三个参数是预计算的embeddings
    index, similar_text, score = find_most_similar("软件开发", samples, samples_embedded)
    print(f"最相似的文本是: {similar_text}, 相似度: {score:.2f}")