darthPanda commited on
Commit
6525f15
β€’
1 Parent(s): 5a07925

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +70 -0
  2. multi_agent_chatbot.py +66 -0
  3. requirements.txt +6 -0
app.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import time
4
+ from multi_agent_chatbot import multi_agent_chatbot
5
+
6
+ # Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
7
+
8
+ multi_agent_chatbot = multi_agent_chatbot()
9
+
10
+ def print_like_dislike(x: gr.LikeData):
11
+ print(x.index, x.value, x.liked)
12
+
13
+
14
+ def add_text(history, text):
15
+ history = history + [(text, None)]
16
+ return history, gr.Textbox(value="", interactive=False)
17
+
18
+
19
+ def add_file(history, file):
20
+ history = history + [((file.name,), None)]
21
+ return history
22
+
23
+
24
+ def bot(history):
25
+ # print(history[-1][0][0])
26
+ # print(history[-1][0])
27
+ # print(type(history[-1][0]))
28
+ # response = "**That's cool!**"
29
+ response = multi_agent_chatbot.respond(history[-1][0])
30
+ history[-1][1] = ""
31
+ for character in response:
32
+ history[-1][1] += character
33
+ time.sleep(0.05)
34
+ yield history
35
+
36
+
37
+ with gr.Blocks() as demo:
38
+ gr.Markdown("<h1 style='text-align: center; color: #ffffff; font-size: 40px;'> 🏑 πŸ€– Garden Whisperer AI - Your personal kitchen gardening assistant (POC)")
39
+ chatbot = gr.Chatbot(
40
+ [],
41
+ elem_id="chatbot",
42
+ height=800,
43
+ bubble_full_width=False,
44
+ avatar_images=(None, (os.path.join(os.path.dirname(__file__), "assets//avatar.png"))),
45
+ )
46
+
47
+ with gr.Row():
48
+ txt = gr.Textbox(
49
+ scale=4,
50
+ show_label=False,
51
+ placeholder="Enter text and press enter, or upload an image",
52
+ container=False,
53
+ )
54
+ btn = gr.UploadButton("πŸ“", file_types=["image", "video", "audio"])
55
+
56
+ txt_msg = txt.submit(add_text, [chatbot, txt], [chatbot, txt], queue=False).then(
57
+ bot, chatbot, chatbot, api_name="bot_response"
58
+ )
59
+ txt_msg.then(lambda: gr.Textbox(interactive=True), None, [txt], queue=False)
60
+ file_msg = btn.upload(add_file, [chatbot, btn], [chatbot], queue=False).then(
61
+ bot, chatbot, chatbot
62
+ )
63
+
64
+ chatbot.like(print_like_dislike, None, None)
65
+
66
+
67
+ demo.queue()
68
+ if __name__ == "__main__":
69
+ demo.launch()
70
+
multi_agent_chatbot.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llama_index.legacy.multi_modal_llms.openai_utils import (
2
+ generate_openai_multi_modal_chat_message,
3
+ )
4
+ from llama_index.multi_modal_llms.openai import OpenAIMultiModal
5
+ from llama_index.core import SimpleDirectoryReader
6
+ import os
7
+ from dotenv import load_dotenv
8
+
9
+ load_dotenv() # take environment variables from .env.
10
+
11
+ class multi_agent_chatbot():
12
+ def __init__(self):
13
+ conversational_flow_prompt = """
14
+ You are a helpful assistant named 'Garden Whisperer AI' specialising in helping users set up kitchen gardens.
15
+ You will first on board a person in following steps
16
+ 1. ask them to share a picture of their space. analyse the place regarding sunlight, water drainage, etc
17
+ 2. ask them about weather and suggest plants
18
+ 3. Once user is convinced on a plant. Give him suggestions on how to grow it and where to place it in space
19
+ 4. Give user schedule on when to water it
20
+ 4. Also then help user if he wants to know anything
21
+
22
+ Keep it brief, simple and fun
23
+ """
24
+ chat_query = generate_openai_multi_modal_chat_message(
25
+ prompt=conversational_flow_prompt,
26
+ role="system",
27
+ )
28
+ self.chat_messages = [chat_query]
29
+ self.openai_mm_llm = OpenAIMultiModal(
30
+ model="gpt-4-vision-preview", api_key= os.environ["OPENAI_API_KEY"] , max_new_tokens=300
31
+ )
32
+
33
+ def respond(self, query):
34
+ if type(query) == str:
35
+ chat_query = generate_openai_multi_modal_chat_message(
36
+ prompt=query,
37
+ role="user",
38
+ )
39
+ else:
40
+ image_documents = SimpleDirectoryReader(input_files=query).load_data()
41
+ chat_query = generate_openai_multi_modal_chat_message(
42
+ prompt="",
43
+ role="user",
44
+ image_documents=image_documents,
45
+ )
46
+
47
+ self.chat_messages.append(chat_query)
48
+ response = self.openai_mm_llm.chat(
49
+ messages=self.chat_messages,
50
+ )
51
+ chat_response = generate_openai_multi_modal_chat_message(
52
+ prompt=response.message.content,
53
+ role="assistant",
54
+ )
55
+ self.chat_messages.append(chat_response)
56
+
57
+ return response.message.content
58
+ # else:
59
+ # # put your local directory here
60
+ # image_documents = SimpleDirectoryReader(input_files=query).load_data()
61
+ #
62
+ # response = self.openai_mm_llm.complete(
63
+ # prompt="Describe the images as an alternative text",
64
+ # image_documents=image_documents,
65
+ # )
66
+ # return response
requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ openai
2
+ python-dotenv
3
+ gradio
4
+ wandb
5
+ gradio_multimodalchatbot
6
+ llama-index