import os
from flask import Flask, request, jsonify
from flask_cors import CORS
from mas.memory.mas_memory.GMemory import GMemory
from mas.llm import GPTChat
#from mas.utils import EmbeddingFunc
from mas.memory.common import MASMessage, StateChain

from openai import OpenAI
from mas.llm import LLMCallable, GPTChat, get_price

from dotenv import load_dotenv
load_dotenv()

client = OpenAI(
    api_key=os.getenv("DASHSCOPE_API_KEY"),  # 如果您没有配置环境变量，请在此处用您的API Key进行替换
    base_url=os.getenv("DASHSCOPE_API_BASE_URL") #"https://dashscope.aliyuncs.com/compatible-mode/v1"  # 百炼服务的base_url
)


# class MockEmbeddingFunc:
#     def embed_query(self, text: str) -> List[float]:
#         completion = client.embeddings.create(
#             model="text-embedding-v4",
#             input=text,#'衣服的质量杠杠的，很漂亮，不枉我等了这么久啊，喜欢，以后还来这里买',
#             dimensions=768, # 指定向量维度（仅 text-embedding-v3及 text-embedding-v4支持该参数）
#             encoding_format="float"
#         )

#         print(completion.model_dump_json())
#         return [hash(text) % 1000 / 1000.0] * 768

#     def embed_documents(self, texts: List[str]) -> List[List[float]]:
#         return [self.embed_query(text) for text in texts]

# 初始化Flask应用
app = Flask(__name__)
CORS(app)  # 解决跨域问题

_EMBEDDING_MODEL_CACHE = {} 

_EMBEDDING_CACHE = {}

class EmbeddingFunc2:

    model_type: str = "sentence-transformers/all-MiniLM-L6-v2"

    def __post_init__(self):
        if self.model_type not in _EMBEDDING_MODEL_CACHE:
            _EMBEDDING_MODEL_CACHE[self.model_type] = SentenceTransformer(self.model_type)

        self.func: SentenceTransformer = _EMBEDDING_MODEL_CACHE[self.model_type]

    def embed_documents(self, texts: list[str]) -> list[list]:
        return [self.embed_query(text) for text in texts]

    def embed_query(self, query: str) -> list:
        print("embedding",query)#,completion.data[0].embedding)
        try:
            t = _EMBEDDING_CACHE[hash(query)]
            return t
        except Exception as e:
            pass

        completion = client.embeddings.create(
            model="text-embedding-v4",
            input=query,
            dimensions=768, # 指定向量维度（仅 text-embedding-v3及 text-embedding-v4支持该参数）
            encoding_format="float"
        )

        _EMBEDDING_CACHE[hash(query)] = completion.data[0].embedding

        return completion.data[0].embedding #self.func.encode(query).tolist()


from tasks.utils import get_model_type
model_type = "deepseek-v3"

# 初始化GMemory实例（根据tasks/run.py优化默认配置）
llm_model = GPTChat(model_name="deepseek-chat")
embedding_func = EmbeddingFunc2()
working_dir = os.path.join("./.db", get_model_type(model_type), "api", "gmemory")
gmemory_instance = GMemory(
    llm_model=llm_model,
    embedding_func=embedding_func,
    namespace = "gmemory",
    global_config={
        "hop": 1,
        "start_insights_threshold": 5,
        "rounds_per_insights": 5,
        "insights_point_num": 5,
        "working_dir": working_dir
    }
)

os.makedirs(working_dir, exist_ok=True)

@app.route('/add_memory', methods=['POST'])
def add_memory_api():
    """
    添加记忆接口
    请求参数:
        task_main (str): 主任务名称
        task_description (str): 任务描述
        label (bool): 任务标签（成功/失败）
        extra_fields (dict): 额外字段
        state_chain (str): 状态链序列化字符串
    返回:
        JSON: 操作结果状态
    """
    data = request.json
    print("add_memory",data)
    try:
        # 解析MASMessage对象
        task_main = data.get('task_main')
        task_description = data.get('task_description')
        label = data.get('label')
        extra_fields = data.get('extra_fields', {})
        state_chain_data = data.get('state_chain')

        # 重建StateChain
        reconstructed_state_chain = StateChain.from_str(state_chain_data)

        print("add_memory 2",reconstructed_state_chain)

        # 创建MASMessage实例
        mas_message = MASMessage(
            task_main=task_main,
            task_description=task_description,
            label=label,
            chain_of_states=reconstructed_state_chain,
            extra_fields=extra_fields
        )

        # 调用GMemory模块的add_memory方法
        gmemory_instance.add_memory(mas_message)
        return jsonify({
            "status": "success",
            "message": "Memory added successfully."
        }), 200
    except Exception as e:
        print(e)
        raise e
        return jsonify({
            "status": "error",
            "message": str(e)
        }), 400
        

@app.route('/retrieve_memory', methods=['POST'])
def retrieve_memory_api():
    """
    检索记忆接口
    请求参数:
        query_task (str): 查询任务
        successful_topk (int): 成功案例数量（默认2）
        failed_topk (int): 失败案例数量（默认1）
        insight_topk (int): 洞察数量（默认10）
        threshold (float): 相似度阈值（默认0.3）
    返回:
        JSON: 成功案例、失败案例、洞察结果
    """
    data = request.json
    query_task = data.get('query_task')
    successful_topk = data.get('successful_topk', 2)
    failed_topk = data.get('failed_topk', 1)
    insight_topk = data.get('insight_topk', 10)
    threshold = data.get('threshold', 0.3)

    if not query_task:
        return jsonify({
            "status": "error",
            "message": "query_task is required."
        }), 400

    try:
        # 调用GMemory模块的retrieve_memory方法
        successful_cases, failed_cases, insights = gmemory_instance.retrieve_memory(
            query_task=query_task,
            successful_topk=successful_topk,
            failed_topk=failed_topk,
            insight_topk=insight_topk,
            threshold=threshold
        )

        # 转换为字典以便JSON序列化
        successful_cases_dicts = [MASMessage.to_dict(case) for case in successful_cases]
        failed_cases_dicts = [MASMessage.to_dict(case) for case in failed_cases]

        return jsonify({
            "status": "success",
            "successful_cases": successful_cases_dicts,
            "failed_cases": failed_cases_dicts,
            "insights": insights
        }), 200
    except Exception as e:
        return jsonify({
            "status": "error",
            "message": str(e)
        }), 400

@app.route('/project_insights', methods=['POST'])
def project_insights_api():
    """
    投影洞察接口
    请求参数:
        raw_insights (list): 原始洞察列表
        role (str): 角色（可选）
        task_traj (str): 任务轨迹（可选）
    返回:
        JSON: 投影后的洞察结果
    """
    data = request.json
    raw_insights = data.get('raw_insights')
    role = data.get('role')
    task_traj = data.get('task_traj')

    if not raw_insights or not isinstance(raw_insights, list):
        return jsonify({
            "status": "error",
            "message": "raw_insights (list of strings) is required."
        }), 400

    try:
        # 调用GMemory模块的project_insights方法
        projected_insights = gmemory_instance.project_insights(
            raw_insights=raw_insights,
            role=role,
            task_traj=task_traj
        )
        return jsonify({
            "status": "success",
            "projected_insights": projected_insights
        }), 200
    except Exception as e:
        return jsonify({
            "status": "error",
            "message": str(e)
        }), 400

if __name__ == '__main__':
    app.run(host='0.0.0.0', port=8910, debug=True)