from langchain.agents import  initialize_agent,AgentType,create_react_agent,AgentExecutor
from langchain_community.agent_toolkits.load_tools import load_tools
from langchain.chains.conversation.memory import ConversationBufferWindowMemory
from langchain_core.tools import BaseTool

import os

os.environ['HF_ENDPOINT'] = 'https://hf-mirror.com'

API_KEY = '794380a4cee054a0f96bb2844b41fd12.X4t70kph1CfmoKfT'
BASE_PATH ='https://open.bigmodel.cn/api/paas/v4/'
MODEL_NAME='glm-4'

from langchain_openai import ChatOpenAI
llm =  ChatOpenAI(model_name=MODEL_NAME,temperature=.7,streaming=True,openai_api_key=API_KEY,base_url=BASE_PATH)

import torch
from transformers import BlipForConditionalGeneration,BlipProcessor

# Salesforce/blip-image-captioning-large
image_to_text_model="Salesforce/blip-image-captioning-large"
device = 'cuda' if torch.cuda.is_available() else 'cpu'

processor = BlipProcessor.from_pretrained(image_to_text_model)
model = BlipForConditionalGeneration.from_pretrained(image_to_text_model).to(device)

from transformers.models.oneformer.modeling_oneformer import OneFormerModelOutput
import requests
from PIL import Image


# 描述图片
def describeImage(url):   
    image = Image.open(requests.get(url, stream=True).raw).convert('RGB')
    inputs = processor(images=image, return_tensors="pt").to(device)
    outputs = model.generate(**inputs)
    return processor.decode(outputs[0],skip_special_tokens=True)


class DescribeImageTool(BaseTool):
    name ="Describe Image Tool"
    description="use this tool to describe Image，使用这个工具来描述图片"
    def _run(self,url:str) :
        description = describeImage(url)
        return description
    
    def _arun(self,query:str):
        raise NotImplementedError("Async operation not supported yet")
    
tools = [DescribeImageTool()]
# The function `initialize_agent` was deprecated in LangChain 0.1.0 and will be removed in 0.3.0. Use Use new agent constructor methods like create_react_agent, create_json_agent, create_structured_chat_agent, etc. instead
agent = initialize_agent(
    agent="chat-conversational-react-description",
    tools = tools,
    llm = llm,
    verbose=False,
    max_iterations=3,
    early_stopping_method="generate",
    memory = ConversationBufferWindowMemory(
        memory_key="chat_history",
        k=5,
        return_messages=True
    )
)

# print(describeImage(url))
url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
# 下面这行报错 LangChainDeprecationWarning: The method `Chain.run` was deprecated in langchain 0.1.0 and will be removed in 0.3.0. Use invoke instead.
r = agent.run(f"用中文描述一下下面的图片:\n {url}")
print(r)


