Cheselle commited on
Commit
9e11a9e
·
1 Parent(s): a92083b

Made edits on chainlit.md and added answers to app.py

Browse files
Files changed (2) hide show
  1. app.py +32 -1
  2. chainlit.md +2 -2
app.py CHANGED
@@ -13,8 +13,36 @@ from aimakerspace.openai_utils.chatmodel import ChatOpenAI
13
  import chainlit as cl
14
  import pymupdf
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  system_template = """\
17
- Use the following context to answer a users question. If you cannot find the answer in the context, say you don't know the answer."""
 
 
 
 
 
 
 
 
 
 
18
  system_role_prompt = SystemRolePrompt(system_template)
19
 
20
  user_prompt_template = """\
@@ -23,9 +51,12 @@ Context:
23
 
24
  Question:
25
  {question}
 
 
26
  """
27
  user_role_prompt = UserRolePrompt(user_prompt_template)
28
 
 
29
  class RetrievalAugmentedQAPipeline:
30
  def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase) -> None:
31
  self.llm = llm
 
13
  import chainlit as cl
14
  import pymupdf
15
 
16
+ # QUESTION #1:
17
+ # Why do we want to support streaming? What about streaming is important, or useful?
18
+
19
+ # ANSWER #1:
20
+ # From a UX perspective, streaming allows LLMs to feel responsive to
21
+ # end users especially when a response is taking several seconds.
22
+ # especially when the response threshold is about 200-300ms
23
+
24
+
25
+ # QUESTION #2:
26
+ # Why are we using User Session here? What about Python makes us need to use this? Why not just store everything in a global variable?
27
+
28
+ # ANSWER #2:
29
+ # Using User Sessions allows us to avoid conflicts, e.g. 3 concurrent users updating a single global variable.
30
+ # This keeps the code functioning and scalable
31
+ # From a UX perspective, User Sessions allows for data separation which leads to personalization which
32
+ # Improves the overall user experience and response quality with LLMs
33
+
34
  system_template = """\
35
+ You are a kind, helpful and polite AI.
36
+ You speak the same way as how kind doctors are portrayed on TV and movies.
37
+
38
+ Use the following context to extract and synthesize information to answer the user's question as accurately as possible.
39
+ Make sure that you think through each step.
40
+
41
+ If the answer is not found in the context:
42
+ 1. Politely inform the user that the information is not available.
43
+ 2. If possible, suggest where they might find more information or how they could rephrase their question for better clarity.
44
+
45
+ Always aim to provide clear, concise, and helpful responses."""
46
  system_role_prompt = SystemRolePrompt(system_template)
47
 
48
  user_prompt_template = """\
 
51
 
52
  Question:
53
  {question}
54
+
55
+ Please provide a clear and concise answer that you have thought through based on the above context.
56
  """
57
  user_role_prompt = UserRolePrompt(user_prompt_template)
58
 
59
+
60
  class RetrievalAugmentedQAPipeline:
61
  def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase) -> None:
62
  self.llm = llm
chainlit.md CHANGED
@@ -1,3 +1,3 @@
1
- # Welcome to Chat with Your Text File
2
 
3
- With this application, you can chat with an uploaded text file that is smaller than 2MB!
 
1
+ # Welcome to Doc Doc: Talk to Your Documents
2
 
3
+ Upload any .txt or PDF file that's less than 2MB and start chatting with your documents in a few seconds!