from enum import Enum
from transformers import AutoModelForCausalLM
from deepseek_vl2.models import DeepseekVLV2Processor
from langchain_openai import ChatOpenAI
from pydantic import BaseModel, SecretStr
from langgraph.prebuilt import create_react_agent  # 创建ReAct代理
from langgraph.checkpoint.memory import InMemorySaver


class ModelType(Enum):
    LOCAL = "local"
    HTTP = "http"


class Model:
    model_type: ModelType
    model_key: SecretStr
    model_path: str
    model_url: str

    def __init__(self, model_type: ModelType, dtype, model_name, model_key: str = "", module_path: str = "",
                 model_url: str = "") -> None:
        self.model_type = model_type
        self.dtype = dtype
        self.model_name = model_name
        self.module_path = module_path
        self.model_url = model_url
        self.model_key = SecretStr(model_key)

    def load_model(self):
        if self.model_type == ModelType.LOCAL:
            return AutoModelForCausalLM.from_pretrained(
                self.module_path,
                torch_dtype=self.dtype,
                trust_remote_code=True
            ).cuda().eval()
        else:
            return ChatOpenAI(
                base_url=self.model_url,
                api_key=self.model_key,
                model=self.model_name,
                temperature=0,
            )


class Agent:
    def __init__(self, model: Model, tools, system_prompt, response_format) -> None:
        self.model = model
        self.modelImpl = model.load_model()
        self.system_prompt = system_prompt
        # 创建ReAct代理
        self.graph = create_react_agent(
            self.modelImpl,
            tools=tools,
            checkpointer=InMemorySaver(),
            prompt=self.system_prompt,
            response_format=response_format,
        )
        if self.model.model_type == ModelType.LOCAL:
            self.processor = DeepseekVLV2Processor.from_pretrained(
                self.model.model_path)

    def send_message(self, text: str):
        message = self.graph.invoke({"messages": [{"role": "user", "content": text}]},streaming=False)
        return message
