umm-maybe commited on
Commit
d9e1efb
1 Parent(s): 82151fa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -3
app.py CHANGED
@@ -57,7 +57,7 @@ def generate_text(prompt):
57
  top_k=40,
58
  repetition_penalty=1.1
59
  )
60
- results = tokenizer.decode(outputs[0], clean_up_tokenization_spaces=False) #.replace(prompt,"")
61
  return results
62
 
63
  def merlin_chat(message, history):
@@ -119,7 +119,8 @@ def merlin_chat(message, history):
119
  #result = generate_text(prompt, model_path, parameters, headers)
120
  #result = model(prompt,return_full_text=False, max_new_tokens=256, temperature=0.8, repetition_penalty=1.1)
121
  #response = result[0]['generated_text']
122
- response = generate_text(prompt)
 
123
  print(f"COMPLETION: {response}") # so we can see it in logs
124
  start = 0
125
  end = 0
@@ -131,7 +132,7 @@ def merlin_chat(message, history):
131
  if end<=0:
132
  continue
133
  cleanStr = cleanStr[:end]
134
- messageStr = cleanStr + ']'
135
  messages = json.loads(messageStr)
136
  message = messages[-1]
137
  if message['role'] != 'assistant':
 
57
  top_k=40,
58
  repetition_penalty=1.1
59
  )
60
+ results = tokenizer.decode(outputs[0], clean_up_tokenization_spaces=False)
61
  return results
62
 
63
  def merlin_chat(message, history):
 
119
  #result = generate_text(prompt, model_path, parameters, headers)
120
  #result = model(prompt,return_full_text=False, max_new_tokens=256, temperature=0.8, repetition_penalty=1.1)
121
  #response = result[0]['generated_text']
122
+ result = generate_text(prompt)
123
+ response = result.replace(prompt,"")
124
  print(f"COMPLETION: {response}") # so we can see it in logs
125
  start = 0
126
  end = 0
 
132
  if end<=0:
133
  continue
134
  cleanStr = cleanStr[:end]
135
+ messageStr = prompt + cleanStr + ']'
136
  messages = json.loads(messageStr)
137
  message = messages[-1]
138
  if message['role'] != 'assistant':