Haofei Yu commited on
Commit
33a0edf
1 Parent(s): 3ce130a

formalize accordin part of the code (#17)

Browse files

* add the issue and pr template

* only show generated conversation

* support multi-turn sotopia prompt

* reformalize the accordin part of code

Files changed (1) hide show
  1. app.py +104 -51
app.py CHANGED
@@ -9,39 +9,41 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig
9
 
10
  from utils import Agent, get_starter_prompt, format_sotopia_prompt
11
 
 
12
 
13
- HUMAN_AGENT = Agent(
14
- name="Ethan Johnson",
15
- background="Ethan Johnson is a 34-year-old male chef. He/him pronouns. Ethan Johnson is famous for cooking Italian food.",
16
- goal="Uknown",
17
- secrets="Uknown",
18
- personality="Ethan Johnson, a creative yet somewhat reserved individual, values power and fairness. He likes to analyse situations before deciding.",)
19
 
20
- MACHINE_AGENT = Agent(
21
- name="Benjamin Jackson",
22
- background="Benjamin Jackson is a 24-year-old male environmental activist. He/him pronouns. Benjamin Jackson is well-known for his impassioned speeches.",
23
- goal="Figure out why they estranged you recently, and maintain the existing friendship (Extra information: you notice that your friend has been intentionally avoiding you, you would like to figure out why. You value your friendship with the friend and don't want to lose it.)",
24
- secrets="Descendant of a wealthy oil tycoon, rejects family fortune",
25
- personality="Benjamin Jackson, expressive and imaginative, leans towards self-direction and liberty. His decisions aim for societal betterment.",)
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
- SCENARIO = "Conversation between two friends, where one is upset and crying"
28
 
29
- DEFUALT_INSTRUCTIONS = get_starter_prompt(
30
- MACHINE_AGENT,
31
- HUMAN_AGENT,
32
- SCENARIO
33
- )
34
 
35
- DEPLOYED = os.getenv("DEPLOYED", "true").lower() == "true"
36
- MODEL_NAME = "cmu-lti/sotopia-pi-mistral-7b-BC_SR"
37
- COMPUTE_DTYPE = torch.float16
38
 
39
- config_dict = PeftConfig.from_json_file("peft_config.json")
40
- config = PeftConfig.from_peft_type(**config_dict)
41
- tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
42
- model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1").to("cuda")
43
- model = PeftModel.from_pretrained(model, MODEL_NAME, config=config).to("cuda")
44
- according_visible = True
 
 
 
 
45
 
46
 
47
  def introduction():
@@ -61,7 +63,9 @@ def introduction():
61
  )
62
 
63
 
64
- def chat_accordion():
 
 
65
  with gr.Accordion("Parameters", open=False, visible=according_visible):
66
  temperature = gr.Slider(
67
  minimum=0.1,
@@ -84,22 +88,17 @@ def chat_accordion():
84
  interactive=False,
85
  visible=False,
86
  )
87
- with gr.Accordion("Instructions", open=False, visible=False):
88
- instructions = gr.Textbox(
89
- placeholder="The Instructions",
90
- value=DEFUALT_INSTRUCTIONS,
91
- lines=16,
92
- interactive=True,
93
- label="Instructions",
94
- max_lines=16,
95
- show_label=False,
96
- )
97
  with gr.Row():
98
  with gr.Column():
99
  user_name = gr.Textbox(
100
  lines=1,
101
  label="username",
102
- value=HUMAN_AGENT.name,
103
  interactive=True,
104
  placeholder="Username: ",
105
  show_label=False,
@@ -108,14 +107,38 @@ def chat_accordion():
108
  with gr.Column():
109
  bot_name = gr.Textbox(
110
  lines=1,
111
- value=MACHINE_AGENT.name,
112
  interactive=True,
113
  placeholder="Bot Name",
114
  show_label=False,
115
  max_lines=1,
116
  visible=False,
117
  )
118
- return temperature, instructions, user_name, bot_name, session_id, max_tokens
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
 
120
 
121
  # history are input output pairs
@@ -152,17 +175,47 @@ def run_chat(
152
 
153
 
154
  def chat_tab():
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
155
  with gr.Column():
156
  with gr.Row():
157
- (
158
- temperature,
159
- instructions,
160
- user_name,
161
- bot_name,
162
- session_id,
163
- max_tokens
164
- ) = chat_accordion()
165
-
166
  with gr.Column():
167
  with gr.Blocks():
168
  gr.ChatInterface(
@@ -189,7 +242,7 @@ def chat_tab():
189
  bot_name,
190
  temperature,
191
  session_id,
192
- max_tokens
193
  ],
194
  submit_btn="Send",
195
  stop_btn="Stop",
 
9
 
10
  from utils import Agent, get_starter_prompt, format_sotopia_prompt
11
 
12
+ DEPLOYED = os.getenv("DEPLOYED", "true").lower() == "true"
13
 
 
 
 
 
 
 
14
 
15
+ def prepare_sotopia_info():
16
+ human_agent = Agent(
17
+ name="Ethan Johnson",
18
+ background="Ethan Johnson is a 34-year-old male chef. He/him pronouns. Ethan Johnson is famous for cooking Italian food.",
19
+ goal="Uknown",
20
+ secrets="Uknown",
21
+ personality="Ethan Johnson, a creative yet somewhat reserved individual, values power and fairness. He likes to analyse situations before deciding.",)
22
+
23
+ machine_agent = Agent(
24
+ name="Benjamin Jackson",
25
+ background="Benjamin Jackson is a 24-year-old male environmental activist. He/him pronouns. Benjamin Jackson is well-known for his impassioned speeches.",
26
+ goal="Figure out why they estranged you recently, and maintain the existing friendship (Extra information: you notice that your friend has been intentionally avoiding you, you would like to figure out why. You value your friendship with the friend and don't want to lose it.)",
27
+ secrets="Descendant of a wealthy oil tycoon, rejects family fortune",
28
+ personality="Benjamin Jackson, expressive and imaginative, leans towards self-direction and liberty. His decisions aim for societal betterment.",)
29
+
30
+ scenario = "Conversation between two friends, where one is upset and crying"
31
+ instructions = get_starter_prompt(machine_agent, human_agent, scenario)
32
+ return human_agent, machine_agent, scenario, instructions
33
 
 
34
 
 
 
 
 
 
35
 
 
 
 
36
 
37
+ def prepare():
38
+ model_name = "cmu-lti/sotopia-pi-mistral-7b-BC_SR"
39
+ compute_type = torch.float16
40
+ config_dict = PeftConfig.from_json_file("peft_config.json")
41
+ config = PeftConfig.from_peft_type(**config_dict)
42
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1")
43
+ model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.1").to("cuda")
44
+ model = PeftModel.from_pretrained(model, model_name, config=config).to("cuda")
45
+ return model, tokenizer
46
+
47
 
48
 
49
  def introduction():
 
63
  )
64
 
65
 
66
+
67
+ def param_accordion(according_visible=True):
68
+
69
  with gr.Accordion("Parameters", open=False, visible=according_visible):
70
  temperature = gr.Slider(
71
  minimum=0.1,
 
88
  interactive=False,
89
  visible=False,
90
  )
91
+ return temperature, session_id, max_tokens
92
+
93
+
94
+ def sotopia_info_accordion(human_agent, machine_agent, scenario, according_visible=True):
95
+ with gr.Accordion("Instructions", open=False, visible=according_visible):
 
 
 
 
 
96
  with gr.Row():
97
  with gr.Column():
98
  user_name = gr.Textbox(
99
  lines=1,
100
  label="username",
101
+ value=human_agent,
102
  interactive=True,
103
  placeholder="Username: ",
104
  show_label=False,
 
107
  with gr.Column():
108
  bot_name = gr.Textbox(
109
  lines=1,
110
+ value=machine_agent,
111
  interactive=True,
112
  placeholder="Bot Name",
113
  show_label=False,
114
  max_lines=1,
115
  visible=False,
116
  )
117
+ with gr.Column():
118
+ scenario = gr.Textbox(
119
+ lines=4,
120
+ value=scenario,
121
+ interactive=False,
122
+ placeholder="Scenario",
123
+ show_label=False,
124
+ max_lines=4,
125
+ visible=False,
126
+ )
127
+ return user_name, bot_name, scenario
128
+
129
+
130
+ def instructions_accordion(instructions, according_visible=False):
131
+ with gr.Accordion("Instructions", open=False, visible=according_visible):
132
+ instructions = gr.Textbox(
133
+ lines=10,
134
+ value=instructions,
135
+ interactive=False,
136
+ placeholder="Instructions",
137
+ show_label=False,
138
+ max_lines=10,
139
+ visible=False,
140
+ )
141
+ return instructions
142
 
143
 
144
  # history are input output pairs
 
175
 
176
 
177
  def chat_tab():
178
+ model, tokenizer = prepare()
179
+ human_agent, machine_agent, scenario, instructions = prepare_sotopia_info()
180
+ # history are input output pairs
181
+ def run_chat(
182
+ message: str,
183
+ history,
184
+ instructions: str,
185
+ user_name: str,
186
+ bot_name: str,
187
+ temperature: float,
188
+ top_p: float,
189
+ max_tokens: int,
190
+ ):
191
+ prompt = format_sotopia_prompt(
192
+ message,
193
+ history,
194
+ instructions,
195
+ user_name,
196
+ bot_name
197
+ )
198
+ input_tokens = tokenizer(prompt, return_tensors="pt", padding="do_not_pad").input_ids.to("cuda")
199
+ input_length = input_tokens.shape[-1]
200
+ output_tokens = model.generate(
201
+ input_tokens,
202
+ temperature=temperature,
203
+ top_p=top_p,
204
+ max_length=max_tokens,
205
+ pad_token_id=tokenizer.eos_token_id,
206
+ num_return_sequences=1
207
+ )
208
+ output_tokens = output_tokens[:, input_length:]
209
+ text_output = tokenizer.decode(output_tokens[0], skip_special_tokens=True)
210
+ return text_output
211
+
212
+
213
  with gr.Column():
214
  with gr.Row():
215
+ temperature, session_id, max_tokens = param_accordion()
216
+ user_name, bot_name, scenario = sotopia_info_accordion(human_agent, machine_agent, scenario)
217
+ instructions = instructions_accordion(instructions)
218
+
 
 
 
 
 
219
  with gr.Column():
220
  with gr.Blocks():
221
  gr.ChatInterface(
 
242
  bot_name,
243
  temperature,
244
  session_id,
245
+ max_tokens,
246
  ],
247
  submit_btn="Send",
248
  stop_btn="Stop",