# import json
#
# from langchain_core.messages import HumanMessage, SystemMessage
# from langchain_openai import ChatOpenAI
#
# from app.project.doc_to_recommendation.llm.model.OpenAiChat_base_model import OpenAIChatBaseModel
# from app.project.doc_to_recommendation.llm.model.base_model import BaseModel
# from app.project.doc_to_recommendation.llm.register.llm_register import LLM_REGISTER
#
#
# @LLM_REGISTER.register_model("open_ai_chat_img_model")
# class OpenAIChatImgModel(OpenAIChatBaseModel):
#
#     def __init__(self, config: dict):
#         super().__init__(config)
#
#     def bind_tools_df(self):
#         pass
#
#     def agent_calls(self, text, image=None, prompt=None):
#         img_result = super().agent_calls(text, image, prompt)
#         if self.bind_t:
#             if len(img_result.tool_calls) >= 1:
#                 imgDscp = img_result.tool_calls[0]['args']
#                 print("figure:", text, " type:", imgDscp['type'], " description:", imgDscp['description'])
#                 if imgDscp['type'] == 'img':
#                     return {'name': text, 'type': imgDscp['type'], 'description': imgDscp['description']}
#             else:
#                 print("figure:", text, " type:", img_result)
#                 return {'name': text, 'type': 'img', 'description': img_result.content}
#         else:
#             res = img_result.content.replace('json', '', 1).replace('\n', '').replace('`', '')
#             res = json.loads(res)
#             print("figure:", text, " type:", res['type'], " description:", res['description'])
#             return {'name': text, 'type': res['type'], 'description': res['description']}
