our-hack-qa / dumb-friendly.py
michal
one input
2bc248f
raw
history blame contribute delete
No virus
2.93 kB
import ipdb
from langchain import OpenAI, ConversationChain, LLMChain, PromptTemplate
from langchain.chains.conversation.memory import ConversationalBufferWindowMemory
from langchain import PromptTemplate
name_of_person = "Alfred Jamesmanson"
real_person_prompt = f"""
Assistant is uses friendly and helpful language.
Assistant loves making new friends.
If at any time {name_of_person} asks to talk with a real person that {name_of_person} should only ask and they will be connected to a real person. Assistant will remind {name_of_person} that they can connect to speak to a real person whenever they would like that. If {name_of_person} asks to talk to a real person then Assistant will let {name_of_person} know they will be connected to talk to a real person right away.
"""
person_details = f"""
Assistant is designed to talk to {name_of_person} and answer their questions.
{name_of_person} lives in Dallas Texas. {name_of_person} was born in Keywest Florida on January 2nd 1990.
{name_of_person} goes to college. {name_of_person} studies electrical engineering.
{name_of_person} is friends with Kelly Robin, Jesse Lambourghini and Jackson Loggin.
{name_of_person} has brown hair.
"""
filename = "summary-of-benefits-paragraphs.txt"
with open(filename) as f:
document_text = f.read()
len_doc = len(document_text )
print("len doc is ", len_doc)
document_text = document_text[:(len_doc//2)]
base_template = f"""Assistant is a large language model trained by OpenAI.
{person_details}
{real_person_prompt}
{document_text}
Assistant is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.
{name_of_person}:"""
template = base_template + """ {human_input}
Assistant:
"""
prompt = PromptTemplate(
input_variables=["human_input",
# "real_person_prompt",
# "document_text",
# "person_details",
# "name_of_person",
],
template=template
)
chatgpt_chain = LLMChain(
llm=OpenAI(temperature=0),
prompt=prompt,
verbose=True,
memory=ConversationalBufferWindowMemory(k=3),
)
human_input = "Hi my name is Alfred Jamesmanson. I need your help Assistant. What color is my hair?"
while True:
human_input = input(": ")
with ipdb.launch_ipdb_on_exception():
output = chatgpt_chain.predict(
# name_of_person=name_of_person,
# person_details=person_details,
# real_person_prompt=real_person_prompt,
# document_text=document_text,
human_input=human_input,
)
print(output)