skynet / app.py
crislmfroes's picture
Add python repl and shell tools
5659cad
raw
history blame
No virus
3.78 kB
import io
import random
from typing import List, Tuple
import os
import aiohttp
import panel as pn
from gradio_tools import *
from langchain_experimental.tools import PythonREPLTool
from langchain.prompts import MessagesPlaceholder
from langchain.memory import ConversationBufferMemory
from langchain.callbacks import *
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.agents import initialize_agent
from langchain.tools import StructuredTool, ShellTool
from langchain.agents import AgentType, load_tools
from langchain.agents.agent_toolkits.openapi.spec import reduce_openapi_spec
from langchain.agents.agent_toolkits.openapi import planner
from langchain.requests import RequestsWrapper
from langchain.utilities import *
from panel.chat import ChatInterface
from panel.chat.langchain import PanelCallbackHandler
from uuid import uuid4
import io
import panel
import yaml
from PIL import Image
import logging
import audio2numpy as a2n
import scipy
def exception_handler(ex):
logging.error("Error", exc_info=ex)
panel.state.notifications.error('Error: %s' % ex)
pn.extension("perspective")
pn.extension(exception_handler=exception_handler, notifications=True)
def on_user_message(contents, user, instance):
global file_input
if file_input.value is not None:
if file_input.filename.endswith((".png", ".jpg", ".jpeg")):
filename = str(uuid4())+".png"
Image.open(contents).save(f'/tmp/{filename}')
elif file_input.filename.endswith((".mp3", ".wav")):
filename = str(uuid4())+".wav"
audio, rate = a2n.audio_from_file(contents)
scipy.io.wavfile.write(f'/tmp/{filename}', rate, audio)
file_input.value = None
return agent.run(f"Uploaded: /tmp/{filename}")
else:
return agent.run(contents)
def send_image(filepath: str):
"""Sends to the user the image stored in the given filepath"""
chat_interface.send(Image.open(filepath), user="Assistant", respond=False)
return 'success'
def send_audio(filepath: str):
"Sends to the user the audio stored in the given filepath"
audio, rate = a2n.audio_from_file(filepath)
chat_interface.send(panel.pane.Audio(audio, sample_rate=rate), user="Assistant", respond=False)
return 'success'
file_input = panel.widgets.FileInput(name="File Upload", accept=".png,.jpg,.jpeg,.mp3,.wav")
# create chat interface
chat_interface = ChatInterface(
callback=on_user_message,
widgets=[
panel.widgets.TextAreaInput(placeholder="Enter your message here."),
file_input
]
)
llm = ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True, callbacks=[StreamingStdOutCallbackHandler()])#, PanelCallbackHandler(instance=chat_interface, user='Assistant', avatar=panel.chat.langchain.DEFAULT_AVATARS["assistant"])])
tools = load_tools(["openweathermap-api"], llm)
tools[0].api_wrapper.openweathermap_api_key = os.environ.get('OPENWEATHERMAP_API_KEY')
tools += [
ImageCaptioningTool(duplicate=True).langchain,
WhisperAudioTranscriptionTool(duplicate=True).langchain,
BarkTextToSpeechTool(duplicate=True).langchain,
StableDiffusionTool(duplicate=True).langchain,
StructuredTool.from_function(send_image),
StructuredTool.from_function(send_audio)
]
tools += [
PythonREPLTool(),
ShellTool()
]
memory = ConversationBufferMemory(memory_key="memory", return_messages=True)
agent = initialize_agent(tools=tools, llm=llm, agent=AgentType.STRUCTURED_CHAT_ZERO_SHOT_REACT_DESCRIPTION, verbose=True, agent_kwargs=dict(extra_prompt_messages=[MessagesPlaceholder(variable_name="memory")]), memory=memory)
main = chat_interface
title = "SkyNet Research Preview"
main.servable(title=title)