theostos commited on
Commit
8a18adb
·
1 Parent(s): 3b5eac6

fix prompting issue

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -27,7 +27,7 @@ def respond(
27
  max_tokens,
28
  temperature,
29
  ):
30
- prompt = """<|start_header_id|>system<|end_header_id|>
31
 
32
  You are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>
33
  {message}
@@ -35,7 +35,7 @@ def respond(
35
  """
36
  model_inputs = generate_custom_mask(tokenizer, [prompt], device)
37
 
38
- outputs = model.generate(temperature=0.7, max_tokens=64, **model_inputs)
39
  outputs = outputs[:, model_inputs['input_ids'].shape[1]:]
40
  result = tokenizer.batch_decode(outputs, skip_special_tokens=True)
41
 
 
27
  max_tokens,
28
  temperature,
29
  ):
30
+ prompt = f"""<|start_header_id|>system<|end_header_id|>
31
 
32
  You are a helpful assistant.<|eot_id|><|start_header_id|>user<|end_header_id|>
33
  {message}
 
35
  """
36
  model_inputs = generate_custom_mask(tokenizer, [prompt], device)
37
 
38
+ outputs = model.generate(temperature=0.7, max_tokens=32, **model_inputs)
39
  outputs = outputs[:, model_inputs['input_ids'].shape[1]:]
40
  result = tokenizer.batch_decode(outputs, skip_special_tokens=True)
41