mm-react / AllInOneApp /langimg_azure_openapi.py
JJteam
draft of dockerfile
acc4ffe
raw
history blame
1.7 kB
import os
from langchain import ConversationChain, LLMChain
from langchain.agents import load_tools, initialize_agent, Tool
from langchain.chains.conversation.memory import ConversationBufferMemory
from langchain.llms import AzureOpenAI
from langchain.utilities import ImunAPIWrapper
MAX_TOKENS = 512
# llm = OpenAI(temperature=0, max_tokens=MAX_TOKENS)
llm = AzureOpenAI(deployment_name="text-chat-davinci-002", model_name="text-chat-davinci-002", temperature=1, top_p=0.9, max_tokens=MAX_TOKENS)
tool_names = ['pal-math', 'imun']
tools = load_tools(tool_names, llm=llm)
memory = ConversationBufferMemory(memory_key="chat_history")
imun_prod_face = ImunAPIWrapper(
params="api-version=2023-02-01-preview&model-version=latest&features=Read"
)
imun_prod_read = ImunAPIWrapper(
imun_url="https://cognitivewudev.azure-api.net/computervision/imageanalysis:analyze",
params="api-version=2023-02-01-preview&model-version=latest&features=Read",
imun_subscription_key=os.environ["IMUN_SUBSCRIPTION_KEY2"])
extra_tools = [
Tool(
name = "OCR Understanding",
func=imun_prod_read.run,
description=(
"A wrapper around OCR Understanding (Optical Character Recognition). "
"Useful after Image Understanding tool has found text or handwriting is present in the image tags."
"This tool can find the actual text."
"Input should be an image url, or path to an image file (e.g. .jpg, .png)."
)
),
]
chain = initialize_agent(tools + extra_tools, llm, agent="conversational-react-description", verbose=True, memory=memory)
output = chain.run("/data/images/langchain_demo/DemoTest/DemoTest/pillbottlefront.png")
print(output)