|
from langchain.agents import load_tools |
|
from langchain.agents import initialize_agent |
|
from langchain.agents import AgentType |
|
from langchain.chat_models import AzureChatOpenAI |
|
from langchain.llms import OpenAI |
|
from langchain.chains.conversation.memory import ConversationBufferWindowMemory |
|
|
|
import os |
|
|
|
|
|
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") |
|
OPENAI_API_BASE = os.getenv("OPENAI_API_BASE") |
|
|
|
llm = AzureChatOpenAI(deployment_name="bitservice_chat_35",openai_api_base=OPENAI_API_BASE,openai_api_key=OPENAI_API_KEY,openai_api_version="2023-03-15-preview",model_name="gpt-3.5-turbo") |
|
|
|
|
|
import torch |
|
from transformers import BlipProcessor, BlipForConditionalGeneration |
|
|
|
image_to_text_model = "Salesforce/blip-image-captioning-large" |
|
device = 'cuda' if torch.cuda.is_available() else 'cpu' |
|
|
|
processor = BlipProcessor.from_pretrained(image_to_text_model) |
|
model = BlipForConditionalGeneration.from_pretrained(image_to_text_model).to(device) |
|
|
|
from transformers.models.oneformer.modeling_oneformer import OneFormerModelOutput |
|
import requests |
|
from PIL import Image |
|
|
|
def describeImage(image_url): |
|
image_object = Image.open(image_url).convert('RGB') |
|
|
|
inputs = processor(image_object, return_tensors="pt").to(device) |
|
outputs = model.generate(**inputs) |
|
return processor.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
from langchain.tools import BaseTool |
|
|
|
class DescribeImageTool(BaseTool): |
|
name = "Describe Image Tool" |
|
description = 'use this tool to describe an image.' |
|
|
|
def _run(self, url: str): |
|
description = describeImage(url) |
|
return description |
|
|
|
def _arun(self, query: str): |
|
raise NotImplementedError("Async operation not supported yet") |
|
|
|
tools = [DescribeImageTool()] |
|
|
|
|
|
agent = initialize_agent( |
|
agent='chat-conversational-react-description', |
|
tools=tools, |
|
llm=llm, |
|
verbose=True, |
|
max_iterations=3, |
|
early_stopping_method='generate', |
|
memory=ConversationBufferWindowMemory( |
|
memory_key='chat_history', |
|
k=5, |
|
return_messages=True |
|
) |
|
) |
|
|
|
from langchain.chains import LLMChain |
|
from langchain.prompts import PromptTemplate |
|
def enToChinese(english): |
|
|
|
|
|
pp = "Please translate the following sentence from English to Chinese:{english}" |
|
prompt = PromptTemplate( |
|
input_variables=["english"], |
|
template=pp |
|
) |
|
llchain=LLMChain(llm=llm,prompt=prompt) |
|
return llchain.run(english) |
|
|
|
|
|
def chToEnglish(chinese): |
|
|
|
|
|
pp = "Please translate the following sentence from Chinese to English:{chinese}" |
|
prompt = PromptTemplate( |
|
input_variables=["chinese"], |
|
template=pp |
|
) |
|
llchain=LLMChain(llm=llm,prompt=prompt) |
|
return llchain.run(chinese) |
|
|
|
import gradio as gr |
|
def segment(image,text): |
|
|
|
print(image) |
|
image_url = image |
|
text = chToEnglish(text) |
|
print(text) |
|
return enToChinese(agent(f"{text}:\n{image_url}").get('output')) |
|
|
|
demo = gr.Interface( |
|
fn=segment, |
|
inputs=[ |
|
gr.Image(type="filepath",shape=(200, 200),label="请选择一张图片"), |
|
gr.components.Textbox(label="请输入问题"), |
|
], |
|
outputs=[gr.components.Textbox(label="答案",lines=4)]) |
|
demo.launch() |
|
|
|
|