|
from langchain.agents import load_tools |
|
from langchain.agents import initialize_agent |
|
from langchain.agents import AgentType |
|
from langchain.chat_models import AzureChatOpenAI |
|
from langchain.llms import OpenAI |
|
from langchain.chains.conversation.memory import ConversationBufferWindowMemory |
|
|
|
import os |
|
|
|
|
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") |
|
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") |
|
|
|
llm = AzureChatOpenAI(deployment_name="bitservice_chat_35",openai_api_base=OPENAI_API_BASE,openai_api_key=OPENAI_API_KEY,openai_api_version="2023-03-15-preview",model_name="gpt-3.5-turbo") |
|
|
|
|
|
import torch |
|
from transformers import BlipProcessor, BlipForConditionalGeneration |
|
|
|
image_to_text_model = "Salesforce/blip-image-captioning-large" |
|
device = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
|
|
processor = BlipProcessor.from_pretrained(image_to_text_model) |
|
model = BlipForConditionalGeneration.from_pretrained(image_to_text_model).to(device) |
|
|
|
from transformers.models.oneformer.modeling_oneformer import OneFormerModelOutput |
|
import requests |
|
from PIL import Image |
|
|
|
def describeImage(image_url): |
|
image_object = Image.open(image_url).convert('RGB') |
|
|
|
inputs = processor(image_object, return_tensors="pt").to(device) |
|
outputs = model.generate(**inputs) |
|
return processor.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
from langchain.tools import BaseTool |
|
|
|
class DescribeImageTool(BaseTool): |
|
name = "Describe Image Tool" |
|
description = 'use this tool to describe an image.' |
|
|
|
def _run(self, url: str): |
|
description = describeImage(url) |
|
return description |
|
|
|
def _arun(self, query: str): |
|
raise NotImplementedError("Async operation not supported yet") |
|
|
|
tools = [DescribeImageTool()] |
|
|
|
|
|
agent = initialize_agent( |
|
agent='chat-conversational-react-description', |
|
tools=tools, |
|
llm=llm, |
|
verbose=True, |
|
max_iterations=3, |
|
early_stopping_method='generate', |
|
memory=ConversationBufferWindowMemory( |
|
memory_key='chat_history', |
|
k=5, |
|
return_messages=True |
|
) |
|
) |
|
|
|
|
|
import gradio as gr |
|
def segment(image): |
|
|
|
print(image) |
|
image_url = image |
|
|
|
return agent(f"Describe the following image:\n{image_url}").get('output').replace('The response to your last comment is','') |
|
|
|
demo = gr.Interface(segment, gr.Image(type="filepath",shape=(200, 200)), "text") |
|
demo.launch() |