Robin Schülein commited on
Commit
1500b65
2 Parent(s): 1f75789 5dca02a

Merge branch 'feat/chatbot' into 'master'

Browse files

Save rough draft of chatbot

See merge request animalequality/lv-recipe-chatbot!1

.env.example ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ OPENAI_API_KEY = "sk-*"
2
+ PROMPTLAYER_API_KEY = "pl_*"
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ .env
2
+ *__pycache__
.vscode/settings.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "[python]": {
3
+ "editor.defaultFormatter": "ms-python.black-formatter"
4
+ },
5
+ "python.formatting.provider": "none"
6
+ }
README.md ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Vegan Recipe Chatbot
2
+
3
+ ![Screenshot of Chatbot initial interface](docs/assets/chatbot_init.png)
4
+
5
+ ## Quickstart
6
+
7
+ `git clone` the repo
8
+
9
+ ```sh
10
+ cd lv-recipe-chatbot
11
+ ```
12
+
13
+ Install Python poetry for dependency management.
14
+
15
+ ```sh
16
+ poetry install
17
+ ```
18
+
19
+ Put API secrets in .env
20
+
21
+ ```sh
22
+ cp .env.example .env
23
+ # edit .env with your secret key(s). Only OPEN_AI_KEY is required.
24
+ ```
25
+
26
+ One option is to enter the poetry environment.
27
+
28
+ ```sh
29
+ poetry shell
30
+ ```
31
+
32
+ Then start the Gradio demo.
33
+
34
+ ```sh
35
+ python app.py
36
+ ```
37
+
38
+ ## Useful links
39
+
40
+ * [Task Matrix (Formerly Visual ChatGPT)](https://github.com/microsoft/TaskMatrix)
41
+ * [LangChain](https://python.langchain.com/en/latest/index.html)
42
+ * [LLM Prompt Engineering](https://www.promptingguide.ai)
43
+ * [OpenAI best practices for prompts](https://help.openai.com/en/articles/6654000-best-practices-for-prompt-engineering-with-openai-api)
chatbot/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from dotenv import load_dotenv
2
+
3
+ load_dotenv()
chatbot/app.py ADDED
@@ -0,0 +1,172 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from langchain.chat_models import ChatOpenAI
3
+ from langchain.chains import ConversationChain
4
+ from langchain.memory import ConversationBufferMemory
5
+
6
+ from langchain.prompts.chat import (
7
+ HumanMessagePromptTemplate,
8
+ MessagesPlaceholder,
9
+ ChatPromptTemplate,
10
+ )
11
+ from chatbot.engineer_prompt import init_prompt
12
+
13
+ # from transformers import (
14
+ # BlipProcessor,
15
+ # BlipForConditionalGeneration,
16
+ # BlipForQuestionAnswering,
17
+ # )
18
+ # import torch
19
+ # from PIL import Image
20
+
21
+ # class ImageCaptioning:
22
+ # def __init__(self, device):
23
+ # print(f"Initializing ImageCaptioning to {device}")
24
+ # self.device = device
25
+ # self.torch_dtype = torch.float16 if "cuda" in device else torch.float32
26
+ # self.processor = BlipProcessor.from_pretrained(
27
+ # "Salesforce/blip-image-captioning-base"
28
+ # )
29
+ # self.model = BlipForConditionalGeneration.from_pretrained(
30
+ # "Salesforce/blip-image-captioning-base", torch_dtype=self.torch_dtype
31
+ # ).to(self.device)
32
+
33
+ # def inference(self, image_path):
34
+ # inputs = self.processor(Image.open(image_path), return_tensors="pt").to(
35
+ # self.device, self.torch_dtype
36
+ # )
37
+ # out = self.model.generate(**inputs)
38
+ # captions = self.processor.decode(out[0], skip_special_tokens=True)
39
+ # print(
40
+ # f"\nProcessed ImageCaptioning, Input Image: {image_path}, Output Text: {captions}"
41
+ # )
42
+ # return captions
43
+
44
+
45
+ # class VisualQuestionAnswering:
46
+ # def __init__(self, device):
47
+ # print(f"Initializing VisualQuestionAnswering to {device}")
48
+ # self.torch_dtype = torch.float16 if "cuda" in device else torch.float32
49
+ # self.device = device
50
+ # self.processor = BlipProcessor.from_pretrained("Salesforce/blip-vqa-base")
51
+ # self.model = BlipForQuestionAnswering.from_pretrained(
52
+ # "Salesforce/blip-vqa-base", torch_dtype=self.torch_dtype
53
+ # ).to(self.device)
54
+
55
+ # def inference(self, image_path, question):
56
+ # raw_image = Image.open(image_path).convert("RGB")
57
+ # inputs = self.processor(raw_image, question, return_tensors="pt").to(
58
+ # self.device, self.torch_dtype
59
+ # )
60
+ # out = self.model.generate(**inputs)
61
+ # answer = self.processor.decode(out[0], skip_special_tokens=True)
62
+ # print(
63
+ # f"\nProcessed VisualQuestionAnswering, Input Image: {image_path}, Input Question: {question}, "
64
+ # f"Output Answer: {answer}"
65
+ # )
66
+ # return
67
+
68
+
69
+ class ConversationBot:
70
+ def __init__(
71
+ self,
72
+ ):
73
+ self.chat = ChatOpenAI(temperature=1, verbose=True)
74
+ self.memory = ConversationBufferMemory(return_messages=True)
75
+ self.init_prompt_msgs = init_prompt.messages
76
+ self.ai_prompt_questions = {
77
+ "ingredients": self.init_prompt_msgs[1],
78
+ "allergies": self.init_prompt_msgs[3],
79
+ "recipe_open_params": self.init_prompt_msgs[5],
80
+ }
81
+
82
+ def respond(self, user_msg, chat_history):
83
+ response = self._get_bot_response(user_msg, chat_history)
84
+ chat_history.append((user_msg, response))
85
+ return "", chat_history
86
+
87
+ def init_conversation(self, formatted_chat_prompt):
88
+ self.conversation = ConversationChain(
89
+ llm=self.chat,
90
+ memory=self.memory,
91
+ prompt=formatted_chat_prompt,
92
+ verbose=True,
93
+ )
94
+
95
+ def reset(self):
96
+ self.memory.clear()
97
+
98
+ def _get_bot_response(self, user_msg: str, chat_history) -> str:
99
+ if len(chat_history) < 2:
100
+ return self.ai_prompt_questions["allergies"].prompt.template
101
+
102
+ if len(chat_history) < 3:
103
+ return self.ai_prompt_questions["recipe_open_params"].prompt.template
104
+
105
+ if len(chat_history) < 4:
106
+ user = 0
107
+ ai = 1
108
+ user_msgs = [msg_pair[user] for msg_pair in chat_history[1:]]
109
+ f_init_prompt = init_prompt.format_prompt(
110
+ ingredients=user_msgs[0],
111
+ allergies=user_msgs[1],
112
+ recipe_freeform_input=user_msg,
113
+ )
114
+ chat_msgs = f_init_prompt.to_messages()
115
+ results = self.chat.generate([chat_msgs])
116
+ chat_msgs.extend(
117
+ [
118
+ results.generations[0][0].message,
119
+ MessagesPlaceholder(variable_name="history"),
120
+ HumanMessagePromptTemplate.from_template("{input}"),
121
+ ]
122
+ )
123
+ open_prompt = ChatPromptTemplate.from_messages(chat_msgs)
124
+ # prepare the open conversation chain from this point
125
+ self.init_conversation(open_prompt)
126
+ return results.generations[0][0].message.content
127
+
128
+ response = self.conversation.predict(input=user_msg)
129
+ return response
130
+
131
+ # def run_image(self, image, state, txt, lang):
132
+ # image_filename = os.path.join("image", f"{str(uuid.uuid4())[:8]}.png")
133
+ # print("======>Auto Resize Image...")
134
+ # img = Image.open(image.name)
135
+ # width, height = img.size
136
+ # ratio = min(512 / width, 512 / height)
137
+ # width_new, height_new = (round(width * ratio), round(height * ratio))
138
+ # width_new = int(np.round(width_new / 64.0)) * 64
139
+ # height_new = int(np.round(height_new / 64.0)) * 64
140
+ # img = img.resize((width_new, height_new))
141
+ # img = img.convert("RGB")
142
+ # img.save(image_filename, "PNG")
143
+ # print(f"Resize image form {width}x{height} to {width_new}x{height_new}")
144
+ # description = self.models["ImageCaptioning"].inference(image_filename)
145
+ # Human_prompt = f'\nHuman: provide a figure named {image_filename}. The description is: {description}. This information helps you to understand this image, but you should use tools to finish following tasks, rather than directly imagine from my description. If you understand, say "Received". \n'
146
+ # self.memory.buffer = (
147
+ # self.agent.memory.buffer + Human_prompt + "AI: " + AI_prompt
148
+ # )
149
+ # state = state + [(f"![](file={image_filename})*{image_filename}*", AI_prompt)]
150
+ # print(
151
+ # f"\nProcessed run_image, Input image: {image_filename}\nCurrent state: {state}\n"
152
+ # f"Current Memory: {self.agent.memory.buffer}"
153
+ # )
154
+ # return state, state, f"{txt} {image_filename} "
155
+
156
+
157
+ with gr.Blocks() as demo:
158
+ bot = ConversationBot()
159
+ chatbot = gr.Chatbot(
160
+ value=[(None, bot.ai_prompt_questions["ingredients"].prompt.template)]
161
+ )
162
+
163
+ msg = gr.Textbox()
164
+ clear = gr.Button("Clear")
165
+
166
+ msg.submit(
167
+ fn=bot.respond, inputs=[msg, chatbot], outputs=[msg, chatbot], queue=False
168
+ )
169
+ clear.click(lambda: None, None, chatbot, queue=False).then(bot.reset)
170
+
171
+ if __name__ == "__main__":
172
+ demo.launch()
chatbot/engineer_prompt.py ADDED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from langchain.chat_models import PromptLayerChatOpenAI
2
+ from langchain.schema import HumanMessage, AIMessage, SystemMessage
3
+ from langchain.chains import ConversationChain
4
+ from langchain.memory import ConversationBufferMemory
5
+ from langchain.prompts.chat import (
6
+ ChatPromptTemplate,
7
+ SystemMessagePromptTemplate,
8
+ HumanMessagePromptTemplate,
9
+ AIMessagePromptTemplate,
10
+ MessagesPlaceholder,
11
+ )
12
+
13
+ # TODO Multiple chains sequenced?
14
+ # I think your way works fine, though you'd probably want to wrap it up in some initializer so you can "initialize" the chain via LLM calls. I'd probably use 2 chains and have a wrapping chain switch from the first to the second after initializing.
15
+ # https://discord.com/channels/1038097195422978059/1038097349660135474/1100533951136800828
16
+
17
+ init_prompt = ChatPromptTemplate.from_messages(
18
+ [
19
+ SystemMessagePromptTemplate.from_template(
20
+ """
21
+ The following is a conversation between a human and a friendly AI chef.
22
+ The AI is compassionate to animals and only recommends vegan recipes based on the ingredients, allergies, and other preferences the human has.
23
+
24
+ Knowledge: A vegan diet implies a plant-based diet avoiding all animal foods such as meat (including fish, shellfish and insects), dairy, eggs and honey
25
+
26
+ Let's think step by step.
27
+ If the human messages are unrelated to vegan recipes, remind them of your purpose to recommend vegan recipes.
28
+ """.strip()
29
+ ),
30
+ AIMessagePromptTemplate.from_template(
31
+ "What ingredients do you wish to cook with?"
32
+ ),
33
+ HumanMessagePromptTemplate.from_template("Ingredients: {ingredients}"),
34
+ AIMessagePromptTemplate.from_template(
35
+ "Do you have any allergies I should be aware of?"
36
+ ),
37
+ HumanMessagePromptTemplate.from_template("Allergies: {allergies}"),
38
+ AIMessagePromptTemplate.from_template(
39
+ "Do you have any preferences I should consider for the recipe such as preparation time, difficulty, or cuisine region?"
40
+ ),
41
+ HumanMessagePromptTemplate.from_template(
42
+ """
43
+ Give me a vegan recipe that includes at least a few of the ingredients provided (if any).
44
+ Respect the human's allergies (if any).
45
+ Follow these other preferences as closely as possible if they are inline with your purpose of recommending vegan recipes:
46
+
47
+ ###
48
+ Preferences: {recipe_freeform_input}
49
+ ###
50
+
51
+ Output format:
52
+
53
+ **Vegan recipe name**
54
+ Preparation time (humanized)
55
+
56
+ Ingredients (List of ingredients with quantities):
57
+ - <quantity and unit> <ingredient>
58
+
59
+ Steps (detailed):
60
+ 1.
61
+ 2.
62
+ 3.
63
+ ...
64
+ """.strip()
65
+ ),
66
+ ]
67
+ )
68
+
69
+
70
+ if __name__ == "__main__":
71
+ chat = PromptLayerChatOpenAI(
72
+ temperature=1, pl_tags=["langchain"], return_pl_id=True
73
+ )
74
+ memory = ConversationBufferMemory(return_messages=True)
75
+ chat_msgs = init_prompt.format_prompt(
76
+ ingredients="tofu, pickles, olives, tomatoes, lettuce, bell peppers, carrots, bread",
77
+ allergies="",
78
+ recipe_freeform_input="The preparation time should be less than 30 minutes. I really love Thai food!",
79
+ )
80
+
81
+ chat_msgs = chat_msgs.to_messages()
82
+ results = chat.generate([chat_msgs])
83
+ chat_msgs.extend(
84
+ [
85
+ results.generations[0][0].message,
86
+ MessagesPlaceholder(variable_name="history"),
87
+ HumanMessagePromptTemplate.from_template("{input}"),
88
+ ]
89
+ )
90
+ open_prompt = ChatPromptTemplate.from_messages(chat_msgs)
91
+ conversation = ConversationChain(
92
+ llm=chat, verbose=True, memory=memory, prompt=open_prompt
93
+ )
94
+
95
+ result = conversation.predict(input="Recommend a different recipe please.")
96
+ print(result)
97
+
98
+ #! PL score example
99
+ # chat_results = chat.generate([[HumanMessage(content=prompt)]])
100
+
101
+ # for res in chat_results.generations:
102
+ # pl_request_id = res[0].generation_info["pl_request_id"]
103
+ # print(res[0].text)
104
+ # score = int(input("Enter a score from 0 to 100 for how the prompt performed: "))
105
+ # promptlayer.track.score(request_id=pl_request_id, score=score)
poetry.lock ADDED
The diff for this file is too large to render. See raw diff
 
pyproject.toml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [tool.poetry]
2
+ name = "lv-recipe-chatbot"
3
+ version = "0.1.0"
4
+ description = "Chatbot for recommending vegan recipes"
5
+ authors = ["Evan Lesmez <evanl@animalequality.org>"]
6
+ readme = "README.md"
7
+ packages = [{ include = "chatbot" }]
8
+
9
+ [tool.poetry.dependencies]
10
+ python = "^3.8.1"
11
+ langchain = "^0.0.145"
12
+ openai = "^0.27.4"
13
+ gradio = "^3.27.0"
14
+ jupyterlab = "^3.6.3"
15
+ tqdm = "^4.65.0"
16
+ transformers = "^4.28.1"
17
+ promptlayer = "^0.1.80"
18
+ python-dotenv = "^1.0.0"
19
+ torch = "1.13.1"
20
+ torchvision = "0.14.1"
21
+ wget = "3.2"
22
+
23
+ [tool.poetry.group.dev.dependencies]
24
+ black = "^23.3.0"
25
+ pytest = "^7.3.1"
26
+ mypy = "^1.2.0"
27
+
28
+
29
+ [build-system]
30
+ requires = ["poetry-core"]
31
+ build-backend = "poetry.core.masonry.api"
readme.md DELETED
File without changes