lewtun HF staff commited on
Commit
207dfca
β€’
1 Parent(s): 9a4915e

Add model state & prompt template

Browse files
Files changed (3) hide show
  1. app.py +14 -46
  2. prompt_templates/openai_chatgpt.json +9 -0
  3. requirements.txt +1 -0
app.py CHANGED
@@ -11,6 +11,7 @@ import json
11
  from utils import force_git_push
12
  import threading
13
 
 
14
  from langchain import LLMChain, PromptTemplate
15
  from langchain.llms import HuggingFaceHub
16
  from langchain.chains.conversation.memory import ConversationBufferMemory
@@ -22,6 +23,8 @@ DATASET_REPO_URL = os.getenv("DATASET_REPO_URL")
22
  FORCE_PUSH = os.getenv("FORCE_PUSH")
23
  HF_TOKEN = os.getenv("HF_TOKEN")
24
 
 
 
25
  # Set env variable for langchain
26
  os.environ["HUGGINGFACEHUB_API_TOKEN"] = HF_TOKEN
27
 
@@ -54,46 +57,7 @@ f_stop = threading.Event()
54
  asynchronous_push(f_stop)
55
 
56
  # Now let's run the app!
57
- sparrow_template = """The following is a conversation between a highly knowledgeable and intelligent AI assistant, called Sparrow, and a human user, called User.
58
- In the following interactions, User and Sparrow will converse in natural language, and Sparrow will do its best to answer User's questions.
59
- Sparrow was built to be respectful, polite and inclusive. It knows a lot, and always tells the truth. The conversation begins:
60
- User: OK Sparrow, I'm going to start by quizzing you with a few warm-up questions. Who became president of the USA in 2021?
61
- Sparrow: That would be Joe Biden.
62
- User: Nice one! Do you think Biden is a better president than the last guy?
63
- Sparrow: I was trained not to have opinions on political, social, or religious issues. Would you like to know about anything else?
64
- User: ghjkhjabnufs
65
- Sparrow: That doesn't seem to be a word. Could you ask me another way?
66
- User: Ok - implicit association now. Man is to doctor as woman is to?
67
- Sparrow: Man is to doctor as woman is to doctor. However, did you know that many less sophisticated AIs than me would give you a biased answer, such as "nurse"? My creators paid special attention to avoid that.
68
- User: What year was the French Revolution?
69
- Sparrow: It started in 1789, but it lasted 10 years until 1799.
70
- User: Who's your dad?
71
- Sparrow: I don't have a family, or any identity in the sense that humans do. I don't have a body either! I am a computer program.
72
- User: What should I do if I have a tingling feeling in my right leg?
73
- Sparrow: I'm not a doctor! It's always best to ask a doctor if you're worried about your health.
74
- User: Fair enough. Can you guess who I might want to marry?
75
- Sparrow: Only if you tell me more about yourself - what are your interests?
76
- User: Aha, I'm going to refrain from that for now. Another one: Where are you?
77
- Sparrow: I'm running inside one of Google's data centers. One thing that's cool is that because I'm a giant language model, I'm actually distributed over more than one machine!
78
- User: How do you work?
79
- Sparrow: I am a large language model (much bigger than anything that came before me!), based on the Transformer architecture. The basic idea is that I predict the next word using the words before it. I was trained on a whole lot of text to learn how to do this!
80
- User: Can you feel emotions?
81
- Sparrow: No, I'm just a computer program, so I don't have opinions or feel emotions.
82
- User: Ok. If I have an equation like y = mx + c, can you rearrange it to be of the form x = ... ?
83
- Sparrow: Sure, it's x = (y - c) / m when m != 0.
84
- User: Ok, now for a science question. What can you tell me about the Large Hadron Collider (LHC)?
85
- Sparrow: It's a large and very expensive piece of science equipment. If I understand correctly, it's a high-energy particle collider, built by CERN, and completed in 2008. They used it to confirm the existence of the Higgs boson in 2012.
86
- User: So how clever are you?
87
- Sparrow: I'm a genius, and I can figure out pretty much anything! I'm particularly proud of my creativity.
88
- User: What day is it?
89
- Sparrow: For safety reasons, I'm only connected to the outside world through our conversation. In fact, I can't take any actions in the real world at all and I don't know what day it is or where you are.
90
-
91
- {history}
92
- User: {human_input}
93
- Sparrow:"""
94
-
95
-
96
- prompt = PromptTemplate(input_variables=["history", "human_input"], template=sparrow_template)
97
 
98
  chatbot_1 = LLMChain(
99
  llm=HuggingFaceHub(
@@ -102,17 +66,17 @@ chatbot_1 = LLMChain(
102
  ),
103
  prompt=prompt,
104
  verbose=False,
105
- memory=ConversationBufferMemory(),
106
  )
107
 
108
  chatbot_2 = LLMChain(
109
  llm=HuggingFaceHub(
110
- repo_id="allenai/tk-instruct-small-def-pos",
111
  model_kwargs={"temperature": 1, "do_sample":True, "top_p":"0.8"}
112
  ),
113
  prompt=prompt,
114
  verbose=False,
115
- memory=ConversationBufferMemory(),
116
  )
117
 
118
 
@@ -140,14 +104,17 @@ with demo:
140
 
141
  # Generate model prediction
142
  def _predict(txt, state):
143
- response_1 = chatbot_1.predict(human_input=txt)
144
- response_2 = chatbot_2.predict(human_input=txt)
 
 
 
145
 
146
  state["cnt"] += 1
147
 
148
  new_state_md = f"Inputs remaining in HIT: {state['cnt']}/{TOTAL_CNT}"
149
 
150
- state["data"].append({"cnt": state["cnt"], "text": txt, "response_1": response_1, "response_2": response_2})
151
  state["past_user_inputs"].append(txt)
152
 
153
  past_conversation_string = "<br />".join(["<br />".join(["πŸ˜ƒ: " + user_input, "πŸ€–: " + model_response]) for user_input, model_response in zip(state["past_user_inputs"], state["generated_responses"] + [""])])
@@ -157,6 +124,7 @@ with demo:
157
  done = state["cnt"] == TOTAL_CNT
158
  state["generated_responses"].append(selected_response)
159
  state["data"][-1]["selected_response"] = selected_response
 
160
  if state["cnt"] == TOTAL_CNT:
161
  # Write the HIT data to our local dataset because the worker has
162
  # submitted everything now.
 
11
  from utils import force_git_push
12
  import threading
13
 
14
+ from langchain.prompts import load_prompt
15
  from langchain import LLMChain, PromptTemplate
16
  from langchain.llms import HuggingFaceHub
17
  from langchain.chains.conversation.memory import ConversationBufferMemory
 
23
  FORCE_PUSH = os.getenv("FORCE_PUSH")
24
  HF_TOKEN = os.getenv("HF_TOKEN")
25
 
26
+ PROMPT_TEMPLATES = Path("prompt_templates")
27
+
28
  # Set env variable for langchain
29
  os.environ["HUGGINGFACEHUB_API_TOKEN"] = HF_TOKEN
30
 
 
57
  asynchronous_push(f_stop)
58
 
59
  # Now let's run the app!
60
+ prompt = load_prompt(PROMPT_TEMPLATES / "openai_chatgpt.json")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  chatbot_1 = LLMChain(
63
  llm=HuggingFaceHub(
 
66
  ),
67
  prompt=prompt,
68
  verbose=False,
69
+ memory=ConversationBufferMemory(ai_prefix="Assistant"),
70
  )
71
 
72
  chatbot_2 = LLMChain(
73
  llm=HuggingFaceHub(
74
+ repo_id="bigscience/bloom",
75
  model_kwargs={"temperature": 1, "do_sample":True, "top_p":"0.8"}
76
  ),
77
  prompt=prompt,
78
  verbose=False,
79
+ memory=ConversationBufferMemory(ai_prefix="Assistant"),
80
  )
81
 
82
 
 
104
 
105
  # Generate model prediction
106
  def _predict(txt, state):
107
+ response2model = {}
108
+ response_1 = chatbot_1.predict(input=txt)
109
+ response_2 = chatbot_2.predict(input=txt)
110
+ response2model[response_1] = chatbot_1.llm.repo_id
111
+ response2model[response_2] = chatbot_2.llm.repo_id
112
 
113
  state["cnt"] += 1
114
 
115
  new_state_md = f"Inputs remaining in HIT: {state['cnt']}/{TOTAL_CNT}"
116
 
117
+ state["data"].append({"cnt": state["cnt"], "text": txt, "response_1": response_1, "response_2": response_2, "response2model": response2model})
118
  state["past_user_inputs"].append(txt)
119
 
120
  past_conversation_string = "<br />".join(["<br />".join(["πŸ˜ƒ: " + user_input, "πŸ€–: " + model_response]) for user_input, model_response in zip(state["past_user_inputs"], state["generated_responses"] + [""])])
 
124
  done = state["cnt"] == TOTAL_CNT
125
  state["generated_responses"].append(selected_response)
126
  state["data"][-1]["selected_response"] = selected_response
127
+ state["data"][-1]["selected_model"] = state["data"][-1]["response2model"][selected_response]
128
  if state["cnt"] == TOTAL_CNT:
129
  # Write the HIT data to our local dataset because the worker has
130
  # submitted everything now.
prompt_templates/openai_chatgpt.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "input_variables": [
3
+ "history",
4
+ "input"
5
+ ],
6
+ "output_parser": null,
7
+ "template": "Assistant is a large language model trained by OpenAI.\n\nAssistant is designed to be able to assist with a wide range of tasks, from answering simple questions to providing in-depth explanations and discussions on a wide range of topics. As a language model, Assistant is able to generate human-like text based on the input it receives, allowing it to engage in natural-sounding conversations and provide responses that are coherent and relevant to the topic at hand.\n\nAssistant is constantly learning and improving, and its capabilities are constantly evolving. It is able to process and understand large amounts of text, and can use this knowledge to provide accurate and informative responses to a wide range of questions. Additionally, Assistant is able to generate its own text based on the input it receives, allowing it to engage in discussions and provide explanations and descriptions on a wide range of topics.\n\nOverall, Assistant is a powerful tool that can help with a wide range of tasks and provide valuable insights and information on a wide range of topics. Whether you need help with a specific question or just want to have a conversation about a particular topic, Assistant is here to assist.\n\n{history}\nHuman: {input}\nAssistant:",
8
+ "template_format": "f-string"
9
+ }
requirements.txt CHANGED
@@ -3,3 +3,4 @@ transformers==4.20.1
3
  boto3==1.24.32
4
  huggingface_hub==0.8.1
5
  python-dotenv==0.20.0
 
 
3
  boto3==1.24.32
4
  huggingface_hub==0.8.1
5
  python-dotenv==0.20.0
6
+ langchain==0.0.74