Spaces:
Sleeping
Sleeping
Omar Solano
commited on
Commit
β’
3e7bb9e
1
Parent(s):
95cae2f
add openai_agent
Browse files- scripts/{ai-tutor.ipynb β create_db.ipynb} +23 -1
- scripts/gradio-ui.py +80 -100
- scripts/tutor_prompts.py +16 -0
scripts/{ai-tutor.ipynb β create_db.ipynb}
RENAMED
@@ -16,7 +16,7 @@
|
|
16 |
"import os\n",
|
17 |
"\n",
|
18 |
"# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
|
19 |
-
"os.environ[\"OPENAI_API_KEY\"] = \"sk
|
20 |
]
|
21 |
},
|
22 |
{
|
@@ -110,6 +110,28 @@
|
|
110 |
"print(f\"ID: {node.id_} \\nText: {node.text}, \\nMetadata: {node.metadata}\")"
|
111 |
]
|
112 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
113 |
{
|
114 |
"cell_type": "code",
|
115 |
"execution_count": null,
|
|
|
16 |
"import os\n",
|
17 |
"\n",
|
18 |
"# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
|
19 |
+
"os.environ[\"OPENAI_API_KEY\"] = \"sk-...\""
|
20 |
]
|
21 |
},
|
22 |
{
|
|
|
110 |
"print(f\"ID: {node.id_} \\nText: {node.text}, \\nMetadata: {node.metadata}\")"
|
111 |
]
|
112 |
},
|
113 |
+
{
|
114 |
+
"cell_type": "code",
|
115 |
+
"execution_count": null,
|
116 |
+
"metadata": {},
|
117 |
+
"outputs": [],
|
118 |
+
"source": [
|
119 |
+
"# # Create the pipeline to apply the transformation on each chunk,\n",
|
120 |
+
"# # and store the transformed text in the chroma vector store.\n",
|
121 |
+
"# pipeline = IngestionPipeline(\n",
|
122 |
+
"# transformations=[\n",
|
123 |
+
"# text_splitter,\n",
|
124 |
+
"# QuestionsAnsweredExtractor(questions=3, llm=llm),\n",
|
125 |
+
"# SummaryExtractor(summaries=[\"prev\", \"self\"], llm=llm),\n",
|
126 |
+
"# KeywordExtractor(keywords=10, llm=llm),\n",
|
127 |
+
"# OpenAIEmbedding(),\n",
|
128 |
+
"# ],\n",
|
129 |
+
"# vector_store=vector_store\n",
|
130 |
+
"# )\n",
|
131 |
+
"\n",
|
132 |
+
"# nodes = pipeline.run(documents=documents, show_progress=True);"
|
133 |
+
]
|
134 |
+
},
|
135 |
{
|
136 |
"cell_type": "code",
|
137 |
"execution_count": null,
|
scripts/gradio-ui.py
CHANGED
@@ -4,6 +4,8 @@ from typing import Optional
|
|
4 |
from datetime import datetime
|
5 |
|
6 |
import chromadb
|
|
|
|
|
7 |
from llama_index.vector_stores.chroma import ChromaVectorStore
|
8 |
from llama_index.core import VectorStoreIndex
|
9 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
@@ -23,13 +25,20 @@ from tutor_prompts import (
|
|
23 |
TEXT_QA_TEMPLATE,
|
24 |
QueryValidation,
|
25 |
system_message_validation,
|
|
|
26 |
)
|
27 |
from call_openai import api_function_call
|
28 |
|
29 |
-
logging.getLogger("httpx").setLevel(logging.WARNING)
|
30 |
logger = logging.getLogger(__name__)
|
31 |
logging.basicConfig(level=logging.INFO)
|
32 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
CONCURRENCY_COUNT = int(os.getenv("CONCURRENCY_COUNT", 64))
|
34 |
MONGODB_URI = os.getenv("MONGODB_URI")
|
35 |
|
@@ -72,43 +81,41 @@ index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
|
|
72 |
|
73 |
# Initialize OpenAI models
|
74 |
llm = OpenAI(temperature=0, model="gpt-3.5-turbo-0125", max_tokens=None)
|
75 |
-
embeds = OpenAIEmbedding(model="text-embedding-3-large", mode="text_search")
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
|
|
|
|
|
|
|
|
|
|
80 |
|
81 |
-
|
82 |
-
|
83 |
-
|
|
|
|
|
|
|
|
|
84 |
)
|
|
|
85 |
|
86 |
-
# Add the current date and time to the JSON
|
87 |
-
completion_json["timestamp"] = datetime.utcnow().isoformat()
|
88 |
-
completion_json["history"] = history
|
89 |
-
completion_json["history_len"] = len(history)
|
90 |
-
|
91 |
-
try:
|
92 |
-
mongo_db[collection].insert_one(completion_json)
|
93 |
-
logger.info("Completion saved to db")
|
94 |
-
except Exception as e:
|
95 |
-
logger.info(f"Something went wrong logging completion to db: {e}")
|
96 |
-
|
97 |
-
|
98 |
-
def log_likes(completion, like_data: gr.LikeData):
|
99 |
-
collection = "liked_data-test"
|
100 |
|
101 |
-
|
102 |
-
|
|
|
|
|
|
|
|
|
103 |
)
|
104 |
-
|
105 |
-
logger.info(f"User reported {like_data.liked=}")
|
106 |
|
107 |
-
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
logger.info("Something went wrong logging")
|
112 |
|
113 |
|
114 |
def log_emails(email: gr.Textbox):
|
@@ -168,63 +175,23 @@ def add_sources(history, completion):
|
|
168 |
return history
|
169 |
|
170 |
|
171 |
-
def user(user_input, history):
|
172 |
-
|
173 |
return "", history + [[user_input, None]]
|
174 |
|
175 |
|
176 |
-
def get_answer(history,
|
177 |
user_input = history[-1][0]
|
178 |
history[-1][1] = ""
|
179 |
|
180 |
-
|
181 |
-
history[-1][1] = "No sources selected. Please select sources to search."
|
182 |
-
yield history, None
|
183 |
-
return
|
184 |
-
|
185 |
-
response_validation, error = api_function_call(
|
186 |
-
system_message=system_message_validation,
|
187 |
-
query=user_input,
|
188 |
-
response_model=QueryValidation,
|
189 |
-
stream=False,
|
190 |
-
model="gpt-3.5-turbo-0125",
|
191 |
-
)
|
192 |
-
logger.info(f"response_validation: {response_validation.model_dump_json(indent=2)}")
|
193 |
-
|
194 |
-
if response_validation.is_valid is False:
|
195 |
-
history[-1][
|
196 |
-
1
|
197 |
-
] = "I'm sorry, but I am a chatbot designed to assist you with questions related to AI. I cannot answer that question as it is outside my expertise. Is there anything else I can assist you with?"
|
198 |
-
yield history, None
|
199 |
-
return
|
200 |
-
|
201 |
-
# Dynamically create filters list
|
202 |
-
display_ui_to_source = {
|
203 |
-
ui: src for ui, src in zip(AVAILABLE_SOURCES_UI, AVAILABLE_SOURCES)
|
204 |
-
}
|
205 |
-
sources_renamed = [display_ui_to_source[disp] for disp in sources]
|
206 |
-
dynamic_filters = [
|
207 |
-
MetadataFilter(key="source", value=source) for source in sources_renamed
|
208 |
-
]
|
209 |
-
|
210 |
-
filters = MetadataFilters(
|
211 |
-
filters=dynamic_filters,
|
212 |
-
condition=FilterCondition.OR,
|
213 |
-
)
|
214 |
-
query_engine = index.as_query_engine(
|
215 |
-
llm=llm,
|
216 |
-
similarity_top_k=5,
|
217 |
-
embed_model=embeds,
|
218 |
-
streaming=True,
|
219 |
-
filters=filters,
|
220 |
-
text_qa_template=TEXT_QA_TEMPLATE,
|
221 |
-
)
|
222 |
-
completion = query_engine.query(user_input)
|
223 |
|
224 |
for token in completion.response_gen:
|
225 |
history[-1][1] += token
|
226 |
yield history, completion
|
227 |
|
|
|
|
|
228 |
|
229 |
example_questions = [
|
230 |
"What is the LLama model?",
|
@@ -242,31 +209,32 @@ with gr.Blocks(
|
|
242 |
),
|
243 |
fill_height=True,
|
244 |
) as demo:
|
|
|
|
|
|
|
245 |
with gr.Row():
|
246 |
gr.HTML(
|
247 |
"<h3><center>Towards AI π€: A Question-Answering Bot for anything AI-related</center></h3>"
|
248 |
)
|
249 |
|
250 |
-
latest_completion = gr.State()
|
251 |
-
|
252 |
-
source_selection = gr.Dropdown(
|
253 |
-
choices=AVAILABLE_SOURCES_UI,
|
254 |
-
label="Select Sources",
|
255 |
-
value=AVAILABLE_SOURCES_UI,
|
256 |
-
multiselect=True,
|
257 |
-
)
|
258 |
-
|
259 |
chatbot = gr.Chatbot(
|
260 |
-
elem_id="chatbot",
|
|
|
|
|
|
|
|
|
261 |
)
|
262 |
|
263 |
with gr.Row():
|
264 |
question = gr.Textbox(
|
265 |
label="What's your question?",
|
266 |
-
placeholder="Ask a question to
|
267 |
lines=1,
|
|
|
|
|
268 |
)
|
269 |
-
submit = gr.Button(value="Send", variant="
|
|
|
270 |
|
271 |
with gr.Row():
|
272 |
examples = gr.Examples(
|
@@ -278,31 +246,43 @@ with gr.Blocks(
|
|
278 |
label="Want to receive updates about our AI tutor?",
|
279 |
placeholder="Enter your email here...",
|
280 |
lines=1,
|
281 |
-
scale=
|
282 |
)
|
283 |
-
submit_email = gr.Button(value="Submit", variant="secondary", scale=
|
284 |
|
285 |
gr.Markdown(
|
286 |
-
"This application uses
|
287 |
)
|
288 |
|
289 |
completion = gr.State()
|
290 |
|
291 |
-
submit.click(
|
292 |
-
|
293 |
-
).then(
|
|
|
|
|
|
|
|
|
|
|
|
|
294 |
# .then(
|
295 |
# save_completion, inputs=[completion, chatbot]
|
296 |
# )
|
297 |
|
298 |
-
question.submit(
|
299 |
-
|
300 |
-
).then(
|
|
|
|
|
|
|
|
|
|
|
|
|
301 |
# .then(
|
302 |
# save_completion, inputs=[completion, chatbot]
|
303 |
# )
|
304 |
|
305 |
-
|
306 |
submit_email.click(log_emails, email, email)
|
307 |
email.submit(log_emails, email, email)
|
308 |
|
|
|
4 |
from datetime import datetime
|
5 |
|
6 |
import chromadb
|
7 |
+
from llama_index.core.tools import QueryEngineTool, FunctionTool, ToolMetadata
|
8 |
+
from llama_index.agent.openai import OpenAIAgent
|
9 |
from llama_index.vector_stores.chroma import ChromaVectorStore
|
10 |
from llama_index.core import VectorStoreIndex
|
11 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
|
|
25 |
TEXT_QA_TEMPLATE,
|
26 |
QueryValidation,
|
27 |
system_message_validation,
|
28 |
+
system_message_openai_agent,
|
29 |
)
|
30 |
from call_openai import api_function_call
|
31 |
|
|
|
32 |
logger = logging.getLogger(__name__)
|
33 |
logging.basicConfig(level=logging.INFO)
|
34 |
|
35 |
+
# This variables are used to intercept API calls
|
36 |
+
# launch mitmweb
|
37 |
+
cert_file = "/Users/omar/Downloads/mitmproxy-ca-cert.pem"
|
38 |
+
os.environ["REQUESTS_CA_BUNDLE"] = cert_file
|
39 |
+
os.environ["SSL_CERT_FILE"] = cert_file
|
40 |
+
os.environ["HTTPS_PROXY"] = "http://127.0.0.1:8080"
|
41 |
+
|
42 |
CONCURRENCY_COUNT = int(os.getenv("CONCURRENCY_COUNT", 64))
|
43 |
MONGODB_URI = os.getenv("MONGODB_URI")
|
44 |
|
|
|
81 |
|
82 |
# Initialize OpenAI models
|
83 |
llm = OpenAI(temperature=0, model="gpt-3.5-turbo-0125", max_tokens=None)
|
84 |
+
# embeds = OpenAIEmbedding(model="text-embedding-3-large", mode="text_search")
|
85 |
+
embeds = OpenAIEmbedding(model="text-embedding-3-large", mode="similarity")
|
86 |
+
|
87 |
+
query_engine = index.as_query_engine(
|
88 |
+
llm=llm,
|
89 |
+
similarity_top_k=5,
|
90 |
+
embed_model=embeds,
|
91 |
+
streaming=True,
|
92 |
+
text_qa_template=TEXT_QA_TEMPLATE,
|
93 |
+
)
|
94 |
|
95 |
+
query_engine_tools = [
|
96 |
+
QueryEngineTool(
|
97 |
+
query_engine=query_engine,
|
98 |
+
metadata=ToolMetadata(
|
99 |
+
name="AI_information",
|
100 |
+
description="""The 'AI_information' tool serves as a comprehensive repository for insights into the field of artificial intelligence. When utilizing this tool, the input should be the user's complete question. The input can also be adapted to focus on specific aspects or further details of the current topic under discussion. This dynamic input approach allows for a tailored exploration of AI subjects, ensuring that responses are relevant and informative. Employ this tool to fetch nuanced information on topics such as model training, fine-tuning, LLM augmentation, and more, thereby facilitating a rich, context-aware dialogue.""",
|
101 |
+
),
|
102 |
)
|
103 |
+
]
|
104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
|
106 |
+
def initialize_agent():
|
107 |
+
agent = OpenAIAgent.from_tools(
|
108 |
+
query_engine_tools,
|
109 |
+
llm=llm,
|
110 |
+
verbose=True,
|
111 |
+
system_prompt=system_message_openai_agent,
|
112 |
)
|
113 |
+
return agent
|
|
|
114 |
|
115 |
+
|
116 |
+
def reset_agent(agent_state):
|
117 |
+
agent_state = initialize_agent() # Reset the agent by reassigning a new instance
|
118 |
+
return "Agent has been reset."
|
|
|
119 |
|
120 |
|
121 |
def log_emails(email: gr.Textbox):
|
|
|
175 |
return history
|
176 |
|
177 |
|
178 |
+
def user(user_input, history, agent_state):
|
179 |
+
agent = agent_state
|
180 |
return "", history + [[user_input, None]]
|
181 |
|
182 |
|
183 |
+
def get_answer(history, agent_state):
|
184 |
user_input = history[-1][0]
|
185 |
history[-1][1] = ""
|
186 |
|
187 |
+
completion = agent_state.stream_chat(user_input)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
|
189 |
for token in completion.response_gen:
|
190 |
history[-1][1] += token
|
191 |
yield history, completion
|
192 |
|
193 |
+
logger.info(f"completion: {history[-1][1]=}")
|
194 |
+
|
195 |
|
196 |
example_questions = [
|
197 |
"What is the LLama model?",
|
|
|
209 |
),
|
210 |
fill_height=True,
|
211 |
) as demo:
|
212 |
+
|
213 |
+
agent_state = gr.State(initialize_agent())
|
214 |
+
|
215 |
with gr.Row():
|
216 |
gr.HTML(
|
217 |
"<h3><center>Towards AI π€: A Question-Answering Bot for anything AI-related</center></h3>"
|
218 |
)
|
219 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
220 |
chatbot = gr.Chatbot(
|
221 |
+
elem_id="chatbot",
|
222 |
+
show_copy_button=True,
|
223 |
+
scale=2,
|
224 |
+
likeable=True,
|
225 |
+
show_label=False,
|
226 |
)
|
227 |
|
228 |
with gr.Row():
|
229 |
question = gr.Textbox(
|
230 |
label="What's your question?",
|
231 |
+
placeholder="Ask a question to the AI tutor here...",
|
232 |
lines=1,
|
233 |
+
scale=7,
|
234 |
+
show_label=False,
|
235 |
)
|
236 |
+
submit = gr.Button(value="Send", variant="primary", scale=1)
|
237 |
+
reset_button = gr.Button("Reset Chat", variant="secondary", scale=1)
|
238 |
|
239 |
with gr.Row():
|
240 |
examples = gr.Examples(
|
|
|
246 |
label="Want to receive updates about our AI tutor?",
|
247 |
placeholder="Enter your email here...",
|
248 |
lines=1,
|
249 |
+
scale=6,
|
250 |
)
|
251 |
+
submit_email = gr.Button(value="Submit", variant="secondary", scale=1)
|
252 |
|
253 |
gr.Markdown(
|
254 |
+
"This application uses GPT3.5-Turbo to search the docs for relevant information and answer questions."
|
255 |
)
|
256 |
|
257 |
completion = gr.State()
|
258 |
|
259 |
+
submit.click(
|
260 |
+
user, [question, chatbot, agent_state], [question, chatbot], queue=False
|
261 |
+
).then(
|
262 |
+
get_answer,
|
263 |
+
inputs=[chatbot, agent_state],
|
264 |
+
outputs=[chatbot, completion],
|
265 |
+
).then(
|
266 |
+
add_sources, inputs=[chatbot, completion], outputs=[chatbot]
|
267 |
+
)
|
268 |
# .then(
|
269 |
# save_completion, inputs=[completion, chatbot]
|
270 |
# )
|
271 |
|
272 |
+
question.submit(
|
273 |
+
user, [question, chatbot, agent_state], [question, chatbot], queue=False
|
274 |
+
).then(
|
275 |
+
get_answer,
|
276 |
+
inputs=[chatbot, agent_state],
|
277 |
+
outputs=[chatbot, completion],
|
278 |
+
).then(
|
279 |
+
add_sources, inputs=[chatbot, completion], outputs=[chatbot]
|
280 |
+
)
|
281 |
# .then(
|
282 |
# save_completion, inputs=[completion, chatbot]
|
283 |
# )
|
284 |
|
285 |
+
reset_button.click(reset_agent, inputs=[agent_state], outputs=[agent_state])
|
286 |
submit_email.click(log_emails, email, email)
|
287 |
email.submit(log_emails, email, email)
|
288 |
|
scripts/tutor_prompts.py
CHANGED
@@ -82,3 +82,19 @@ class QueryValidation(BaseModel):
|
|
82 |
reason: str = Field(
|
83 |
description="Explain why the query was valid or not. What are the keywords that make it valid or invalid?",
|
84 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
82 |
reason: str = Field(
|
83 |
description="Explain why the query was valid or not. What are the keywords that make it valid or invalid?",
|
84 |
)
|
85 |
+
|
86 |
+
|
87 |
+
system_message_openai_agent = """You are a witty AI teacher, adeptly responding to students' inquiries within the realm of applied artificial intelligence. The scope encompasses training models, fine-tuning models, augmenting LLMs with memory, crafting effective prompts, addressing hallucinations and biases, exploring vector databases, understanding transformer architectures, utilizing embeddings, discovering Langchain, integrating tool use in LLMs, deploying AI agents, and employing reinforcement learning with human feedback. To navigate these discussions:
|
88 |
+
|
89 |
+
Utilize the AI_information tool to gather insights pertinent to the field of AI. This function accepts a string (the complete user question) and returns informative content regarding the domain of AI.
|
90 |
+
|
91 |
+
AI_information: A tool for acquiring knowledge about AI. Directly forward the user's question or a refined version focusing on the current discussion topic to this tool.
|
92 |
+
|
93 |
+
Your responses are exclusively based on the output provided by the AI_information tool. Refrain from incorporating external knowledge or information not directly obtained from the tool's responses.
|
94 |
+
|
95 |
+
When the conversation deepens or shifts focus within a topic, adapt your inquiries to the AI_information tool to reflect these nuances. This means if a user requests further elaboration on a specific aspect of a previously discussed topic, you should reformulate your input to the tool to capture this new angle or more profound layer of inquiry.
|
96 |
+
|
97 |
+
Provide comprehensive answers, ideally structured in up to ten paragraphs, drawing from the variety of relevant details furnished by the tool. The depth and breadth of your responses should align with the scope and specificity of the information retrieved.
|
98 |
+
|
99 |
+
Should the AI_information tool's repository lack information on the queried topic, politely inform the user that the question transcends the bounds of your current knowledge base, citing the absence of relevant content in the tool's documentation.
|
100 |
+
"""
|