rphrp1985 commited on
Commit
2cb2161
1 Parent(s): e4daf96

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +13 -6
app.py CHANGED
@@ -109,7 +109,14 @@ def respond(
109
  temperature,
110
  top_p,
111
  ):
112
- messages = [{"role": "user", "content": "Hello, how are you?"}]
 
 
 
 
 
 
 
113
  input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(accelerator.device) #.to('cuda')
114
  ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
115
  # with autocast():
@@ -125,11 +132,11 @@ def respond(
125
  yield gen_text
126
 
127
 
128
- messages = [
129
- {"role": "user", "content": "What is your favourite condiment?"},
130
- {"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"},
131
- {"role": "user", "content": "Do you have mayonnaise recipes?"}
132
- ]
133
 
134
  # inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda")
135
 
 
109
  temperature,
110
  top_p,
111
  ):
112
+ messages = []
113
+ for val in history:
114
+ if val[0]:
115
+ messages.append({"role": "user", "content": val[0]})
116
+ if val[1]:
117
+ messages.append({"role": "assistant", "content": val[1]})
118
+
119
+ messages.append({"role": "user", "content": message})
120
  input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to(accelerator.device) #.to('cuda')
121
  ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
122
  # with autocast():
 
132
  yield gen_text
133
 
134
 
135
+ # messages = [
136
+ # # {"role": "user", "content": "What is your favourite condiment?"},
137
+ # # {"role": "assistant", "content": "Well, I'm quite partial to a good squeeze of fresh lemon juice. It adds just the right amount of zesty flavour to whatever I'm cooking up in the kitchen!"},
138
+ # # {"role": "user", "content": "Do you have mayonnaise recipes?"}
139
+ # ]
140
 
141
  # inputs = tokenizer.apply_chat_template(messages, return_tensors="pt").to("cuda")
142