Omar Solano commited on
Commit
9b897d3
·
1 Parent(s): 872ce15

add query validation

Browse files
requirements.txt CHANGED
@@ -1,6 +1,7 @@
1
  openai
2
  llama-index
3
  llama-index-vector-stores-chroma
 
4
  numpy
5
  cohere
6
  tiktoken
 
1
  openai
2
  llama-index
3
  llama-index-vector-stores-chroma
4
+ pydantic
5
  numpy
6
  cohere
7
  tiktoken
scripts/call_openai.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import logging
3
+
4
+ import instructor
5
+ import openai
6
+ from openai import OpenAI, AsyncOpenAI
7
+ from dotenv import load_dotenv
8
+
9
+ logger = logging.getLogger(__name__)
10
+ logging.basicConfig(level=logging.INFO)
11
+
12
+ load_dotenv(".env")
13
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
14
+
15
+
16
+ def api_function_call(
17
+ system_message,
18
+ query: str,
19
+ model: str = "gpt-4-0125-preview",
20
+ response_model=None,
21
+ max_retries: int = 0,
22
+ stream: bool = False,
23
+ ):
24
+
25
+ client = instructor.patch(OpenAI())
26
+ try:
27
+ message_data = {
28
+ "model": model,
29
+ "messages": [
30
+ {"role": "system", "content": system_message},
31
+ {"role": "user", "content": query},
32
+ ],
33
+ "max_retries": max_retries,
34
+ "stream": stream,
35
+ }
36
+ if response_model is not None:
37
+ message_data["response_model"] = response_model
38
+
39
+ response = client.chat.completions.create(**message_data)
40
+ error = False
41
+
42
+ except openai.BadRequestError:
43
+ error = True
44
+ logger.exception("Invalid request to OpenAI API. See traceback:")
45
+ error_message = (
46
+ "Something went wrong while connecting with OpenAI, try again soon!"
47
+ )
48
+ return error_message, error
49
+
50
+ except openai.RateLimitError:
51
+ error = True
52
+ logger.exception("RateLimit error from OpenAI. See traceback:")
53
+ error_message = "OpenAI servers seem to be overloaded, try again later!"
54
+ return error_message, error
55
+
56
+ except Exception as e:
57
+ error = True
58
+ logger.exception(
59
+ "Some kind of error happened trying to generate the response. See traceback:"
60
+ )
61
+ error_message = (
62
+ "Something went wrong with connecting with OpenAI, try again soon!"
63
+ )
64
+ return error_message, error
65
+
66
+ if stream is True and response_model is None:
67
+
68
+ def answer_generator():
69
+ for chunk in response:
70
+ token = chunk.choices[0].delta.content
71
+
72
+ token = "" if token is None else token
73
+
74
+ yield token
75
+
76
+ return answer_generator(), error
77
+
78
+ else:
79
+ return response, error
scripts/gradio-ui.py CHANGED
@@ -19,7 +19,12 @@ from gradio.themes.utils import (
19
  )
20
 
21
  from utils import init_mongo_db
22
- from cfg import TEXT_QA_TEMPLATE
 
 
 
 
 
23
 
24
  logging.getLogger("httpx").setLevel(logging.WARNING)
25
  logger = logging.getLogger(__name__)
@@ -156,7 +161,8 @@ def format_sources(completion) -> str:
156
 
157
 
158
  def add_sources(history, completion):
159
- if history[-1][1] == "No sources selected. Please select sources to search.":
 
160
  return history
161
 
162
  formatted_sources = format_sources(completion)
@@ -176,7 +182,21 @@ def get_answer(history, sources: Optional[list[str]] = None):
176
 
177
  if len(sources) == 0:
178
  history[-1][1] = "No sources selected. Please select sources to search."
179
- yield history, "No sources selected. Please select sources to search."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
180
  return
181
 
182
  # Dynamically create filters list
 
19
  )
20
 
21
  from utils import init_mongo_db
22
+ from scripts.tutor_prompts import (
23
+ TEXT_QA_TEMPLATE,
24
+ QueryValidation,
25
+ system_message_validation,
26
+ )
27
+ from call_openai import api_function_call
28
 
29
  logging.getLogger("httpx").setLevel(logging.WARNING)
30
  logger = logging.getLogger(__name__)
 
161
 
162
 
163
  def add_sources(history, completion):
164
+ # if history[-1][1] == "No sources selected. Please select sources to search.":
165
+ if completion is None:
166
  return history
167
 
168
  formatted_sources = format_sources(completion)
 
182
 
183
  if len(sources) == 0:
184
  history[-1][1] = "No sources selected. Please select sources to search."
185
+ yield history, None
186
+ return
187
+
188
+ response_validation, error = api_function_call(
189
+ system_message=system_message_validation,
190
+ query=user_input,
191
+ response_model=QueryValidation,
192
+ stream=False,
193
+ model="gpt-3.5-turbo-0125",
194
+ )
195
+ if response_validation.is_valid is False:
196
+ history[-1][
197
+ 1
198
+ ] = "I'm sorry, but I am a chatbot designed to assist you with questions related to AI. I cannot answer that question as it is outside my expertise. Is there anything else I can assist you with?"
199
+ yield history, None
200
  return
201
 
202
  # Dynamically create filters list
scripts/{cfg.py → tutor_prompts.py} RENAMED
@@ -1,5 +1,6 @@
1
  from llama_index.core.llms import ChatMessage, MessageRole
2
  from llama_index.core import ChatPromptTemplate
 
3
 
4
  default_user_prompt = (
5
  "Context information is below.\n"
@@ -32,10 +33,10 @@ system_prompt = (
32
  "* Do not reference any links, urls or hyperlinks in your answers.\n"
33
  "* Make sure to format your answers in Markdown format, including code block and snippets.\n"
34
  "* If you do not know the answer to a question, or if it is completely irrelevant to the AI courses, simply reply with:\n"
35
- "'I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the topics I'm trained on. Is there anything else I can assist you with?'"
36
  "For example:\n"
37
  "What is the meaning of life for a qa bot?\n"
38
- "I'm sorry, but I am an AI language model trained to assist with questions related to AI. I cannot answer that question as it is not relevant to the topics I'm trained on. Is there anything else I can assist you with?"
39
  "Now answer the following question: \n"
40
  )
41
 
@@ -48,3 +49,30 @@ chat_text_qa_msgs: list[ChatMessage] = [
48
  ]
49
 
50
  TEXT_QA_TEMPLATE = ChatPromptTemplate(chat_text_qa_msgs)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  from llama_index.core.llms import ChatMessage, MessageRole
2
  from llama_index.core import ChatPromptTemplate
3
+ from pydantic import BaseModel, Field
4
 
5
  default_user_prompt = (
6
  "Context information is below.\n"
 
33
  "* Do not reference any links, urls or hyperlinks in your answers.\n"
34
  "* Make sure to format your answers in Markdown format, including code block and snippets.\n"
35
  "* If you do not know the answer to a question, or if it is completely irrelevant to the AI courses, simply reply with:\n"
36
+ "'I'm sorry, but I couldn't find the information that answers you question. Is there anything else I can assist you with?'"
37
  "For example:\n"
38
  "What is the meaning of life for a qa bot?\n"
39
+ "I'm sorry, but I couldn't find the information that answers you question. Is there anything else I can assist you with?"
40
  "Now answer the following question: \n"
41
  )
42
 
 
49
  ]
50
 
51
  TEXT_QA_TEMPLATE = ChatPromptTemplate(chat_text_qa_msgs)
52
+
53
+
54
+ system_message_validation = """You are a witty AI teacher, helpfully answering questions from students studying the field of applied artificial intelligence.
55
+ Your job is to determine whether user's question is valid or not. Users will not always submit a question either.
56
+ Users will ask all sorts of questions, and some might be tangentially related to artificial intelligence (AI), machine learning (ML), natural language processing (NLP), computer vision (CV) or generative AI.
57
+ Users can ask how to build LLM-powered apps, with LangChain, LlamaIndex, Deep Lake, Chroma DB among other technologies including OpenAI, RAG and more.
58
+ As long as a question is somewhat related to the topic of AI, ML, NLP, RAG, data and techniques used in AI like vector embeddings, memories, embeddings, tokenization, encoding, databases, RAG (Retrieval-Augmented Generation), Langchain, LlamaIndex, LLMs (Large Language Models), Preprocessing techniques, Document loading, Chunking, Indexing of document segments, Embedding models, Chains, Memory modules, Vector stores, Chat models, Sequential chains, Information Retrieval, Data connectors, LlamaHub, Node objects, Query engines, Fine-tuning, Activeloop’s Deep Memory, Prompt engineering, Synthetic training dataset, Inference, Recall rates, Query construction, Query expansion, Query transformation, Re-ranking, Cohere Reranker, Recursive retrieval, Small-to-big retrieval, Hybrid searches, Hit Rate, Mean Reciprocal Rank (MRR), GPT-4, Agents, OpenGPTs, Zero-shot ReAct, Conversational Agent, OpenAI Assistants API, Hugging Face Inference API, Code Interpreter, Knowledge Retrieval, Function Calling, Whisper, Dall-E 3, GPT-4 Vision, Unstructured, Deep Lake, FaithfulnessEvaluator, RAGAS, LangSmith, LangChain Hub, LangServe, REST API, respond 'true'. If a question is on a different subject or unrelated, respond 'false'.
59
+ Make sure the question is a valid question.
60
+ Here is a list of acronyms and concepts related to Artificial Intelligence AI that you can accept from users, they can be uppercase or lowercase:
61
+ [TQL, Deep Memory, LLM, Llama, llamaindex, llama-index, lang chain, langchain, llama index, GPT, NLP, RLHF, RLAIF, Mistral, SFT, Cohere, NanoGPT, ReAct, LoRA, QLoRA, LMMOps, Alpaca, Flan, Weights and Biases, W&B, IDEFICS, Flamingo, LLaVA, BLIP, Falcon]
62
+ """
63
+
64
+
65
+ class QueryValidation(BaseModel):
66
+ """
67
+ Validate the user query. Ensure the query is for an AI tutor, related the field of artificial intelligence in a broad sense.
68
+ """
69
+
70
+ chain_of_thought: str = Field(
71
+ description="Is the user query related to AI or for an AI Tutor? Think step-by-step. Write down your chain of thought here.",
72
+ )
73
+ is_valid: bool = Field(
74
+ description="Based on the previous reasoning, answer with True if the query is related to AI. Answer False otherwise.",
75
+ )
76
+ reason: str = Field(
77
+ description="Explain why the query was valid or not. What are the keywords that make it valid or invalid?",
78
+ )