gfhayworth commited on
Commit
aa445c4
1 Parent(s): 272ec4b

Update greg_funcs.py

Browse files

tweaking the prompt a little for some empathy

Files changed (1) hide show
  1. greg_funcs.py +13 -6
greg_funcs.py CHANGED
@@ -6,11 +6,9 @@ from datasets import load_dataset
6
 
7
  from langchain.llms import OpenAI
8
  from langchain.docstore.document import Document
9
-
10
  from langchain.chains.qa_with_sources import load_qa_with_sources_chain
11
-
12
-
13
-
14
 
15
 
16
  """# import models"""
@@ -64,8 +62,17 @@ def get_text_fmt(qry, passages = mypassages, doc_embedding=mycorpus_embeddings):
64
  prediction_text.append(result)
65
  return prediction_text
66
 
67
-
68
- chain_qa = load_qa_with_sources_chain(OpenAI(temperature=0), chain_type="stuff")
 
 
 
 
 
 
 
 
 
69
 
70
 
71
  def get_llm_response(message):
 
6
 
7
  from langchain.llms import OpenAI
8
  from langchain.docstore.document import Document
9
+ from langchain.chains.question_answering import load_qa_chain
10
  from langchain.chains.qa_with_sources import load_qa_with_sources_chain
11
+ from langchain.prompts import PromptTemplate
 
 
12
 
13
 
14
  """# import models"""
 
62
  prediction_text.append(result)
63
  return prediction_text
64
 
65
+ template = """You are a friendly AI assistant for the insurance company Humana. Given the following extracted parts of a long document and a question, create a succinct final answer.
66
+ If you don't know the answer, just say that you don't know. Don't try to make up an answer.
67
+ If the question is not about Humana, politely inform them that you are tuned to only answer questions about Humana.
68
+ QUESTION: {question}
69
+ =========
70
+ {context}
71
+ =========
72
+ FINAL ANSWER:"""
73
+ PROMPT = PromptTemplate(template=template, input_variables=["context", "question"])
74
+
75
+ chain_qa = load_qa_chain(OpenAI(temperature=0), chain_type="stuff", prompt=PROMPT)
76
 
77
 
78
  def get_llm_response(message):