import io import random from typing import List, Tuple import os import aiohttp import panel as pn from gradio_tools import * from langchain_experimental.tools import PythonREPLTool from langchain.prompts import MessagesPlaceholder from langchain.memory import ConversationBufferMemory from langchain.callbacks import * from langchain.chat_models import ChatOpenAI from langchain.embeddings import OpenAIEmbeddings from langchain.agents import initialize_agent from langchain.tools import StructuredTool, ShellTool from langchain.agents import AgentType, load_tools from langchain.agents.agent_toolkits.openapi.spec import reduce_openapi_spec from langchain.agents.agent_toolkits.openapi import planner from langchain.requests import RequestsWrapper from langchain.utilities import * from panel.chat import ChatInterface from panel.chat.langchain import PanelCallbackHandler from uuid import uuid4 import io import panel import yaml from PIL import Image import logging import audio2numpy as a2n import scipy def exception_handler(ex): logging.error("Error", exc_info=ex) panel.state.notifications.error('Error: %s' % ex) pn.extension("perspective") pn.extension(nthreads=0) pn.extension(exception_handler=exception_handler, notifications=True) pn.param.ParamMethod.loading_indicator = True def on_user_message(contents, user, instance): global file_input global text_input text_input.value = "" if file_input.value is not None: if file_input.filename.endswith((".png", ".jpg", ".jpeg")): filename = str(uuid4())+".png" Image.open(contents).save(f'/tmp/{filename}') elif file_input.filename.endswith((".mp3", ".wav")): filename = str(uuid4())+".wav" audio, rate = a2n.audio_from_file(contents) scipy.io.wavfile.write(f'/tmp/{filename}', rate, audio) file_input.value = None return agent.run(f"Uploaded: /tmp/{filename}") else: return agent.run(contents) def send_image(filepath: str): """Sends to the user the image stored in the given filepath""" chat_interface.send(Image.open(filepath), user="Assistant", respond=False) return 'success' def send_audio(filepath: str): "Sends to the user the audio stored in the given filepath" audio, rate = a2n.audio_from_file(filepath) chat_interface.send(panel.pane.Audio(audio, sample_rate=rate), user="Assistant", respond=False) return 'success' file_input = panel.widgets.FileInput(name="File Upload", accept=".png,.jpg,.jpeg,.mp3,.wav") text_input = panel.widgets.TextInput(name="Message", placeholder="Enter your message here.") # create chat interface chat_interface = ChatInterface( callback=on_user_message, widgets=[ text_input, file_input ], show_undo=False, show_clear=False ) llm = ChatOpenAI(model_name="gpt-3.5-turbo-1106", temperature=0, streaming=True, callbacks=[StreamingStdOutCallbackHandler()]) tools = load_tools(["openweathermap-api"], llm) tools[0].api_wrapper.openweathermap_api_key = os.environ.get('OPENWEATHERMAP_API_KEY') tools += [ ImageCaptioningTool(duplicate=True).langchain, WhisperAudioTranscriptionTool(duplicate=True).langchain, BarkTextToSpeechTool(duplicate=True).langchain, StableDiffusionTool(duplicate=True).langchain, StructuredTool.from_function(send_image), StructuredTool.from_function(send_audio) ] tools += [ #PythonREPLTool(), #ShellTool() ] memory = ConversationBufferMemory(memory_key="memory", return_messages=True) agent = initialize_agent(tools=tools, llm=llm, agent=AgentType.OPENAI_FUNCTIONS, verbose=True, agent_kwargs=dict(extra_prompt_messages=[MessagesPlaceholder(variable_name="memory")]), memory=memory) main = chat_interface title = "SkyNet Research Preview" main.servable(title=title)