from langchain.tools import BaseTool
from langchain_core.prompts import ChatPromptTemplate
# from langchain.chains import GraphCypherQAChain
from langchain_neo4j import Neo4jGraph, GraphCypherQAChain
from pydantic import BaseModel, Field, ConfigDict
from typing import Any, Type, Optional
from dotenv import load_dotenv
from model.my_chat_model import ChatModel
import os


# 定义输入参数的数据模型类
class chatInput(BaseModel):
    question: str = Field(..., description="问题")


# 定义工具类
class ChatTool(BaseTool):
    # 定义模型，是否允许输入参数，输入参数的数据模型类
    model_config = ConfigDict(arbitrary_types_allowed=True)

    def __init__(self, **kwargs: Any):
        super().__init__(
            name="get_chat_tool",
            description="主要用于回答其他问题，必须输入的参数是问题",
            **kwargs
        )

    # 定义工具参数
    args_schema: Type[BaseModel] = chatInput

    # 定义工具方法
    def _run(self, question: str):
        # 1加载环境变量
        load_dotenv()
        # 2获取模型
        chat = ChatModel()
        llm = chat.get_local_model()
        rs = llm.invoke(question)
        return rs
